hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
dd5f35ddf0cf30481ac845d1a5c2a575ff4e55fd.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <err_common.hpp>
#include <solve.hpp>
#if defined(WITH_CUDA_LINEAR_ALGEBRA)
#include <cusolverDnManager.hpp>
#include <rocblas.h>
#include <identity.hpp>
#include <iostream>
#include <memory.hpp>
#include <copy.hpp>
#include <transpose.hpp>
#include <math.hpp>
#include <err_common.hpp>
#include <blas.hpp>
#include <lu.hpp>
#include <qr.hpp>
#include <handle.hpp>
#include <cstdio>
namespace cuda
{
using cusolver::getDnHandle;
//cusolverStatus_t cusolverDn<>getrs(
// hipsolverDnHandle_t handle,
// hipblasOperation_t trans,
// int n, int nrhs,
// const <> *A, int lda,
// const int *devIpiv,
// <> *B, int ldb,
// int *devInfo );
template<typename T>
struct getrs_func_def_t
{
typedef cusolverStatus_t (*getrs_func_def) (
hipsolverDnHandle_t,
hipblasOperation_t,
int, int,
const T *, int,
const int *,
T *, int,
int *);
};
#define SOLVE_FUNC_DEF( FUNC ) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def \
FUNC##_func();
#define SOLVE_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() \
{ return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)&cusolverDn##PREFIX##FUNC; } \
SOLVE_FUNC_DEF( getrs )
SOLVE_FUNC(getrs , float , S)
SOLVE_FUNC(getrs , double , D)
SOLVE_FUNC(getrs , cfloat , C)
SOLVE_FUNC(getrs , cdouble, Z)
//cusolverStatus_t cusolverDn<>geqrf_bufferSize(
// hipsolverDnHandle_t handle,
// int m, int n,
// <> *A,
// int lda,
// int *Lwork );
//
//cusolverStatus_t cusolverDn<>geqrf(
// hipsolverDnHandle_t handle,
// int m, int n,
// <> *A, int lda,
// <> *TAU,
// <> *Workspace,
// int Lwork, int *devInfo );
//
//cusolverStatus_t cusolverDn<>mqr(
// hipsolverDnHandle_t handle,
// hipblasSideMode_t side, hipblasOperation_t trans,
// int m, int n, int k,
// const double *A, int lda,
// const double *tau,
// double *C, int ldc,
// double *work,
// int lwork, int *devInfo);
template<typename T>
struct geqrf_solve_func_def_t
{
typedef cusolverStatus_t (*geqrf_solve_func_def) (
hipsolverDnHandle_t, int, int,
T *, int,
T *,
T *,
int, int *);
};
template<typename T>
struct geqrf_solve_buf_func_def_t
{
typedef cusolverStatus_t (*geqrf_solve_buf_func_def) (
hipsolverDnHandle_t, int, int,
T *, int, int *);
};
template<typename T>
struct mqr_solve_func_def_t
{
typedef cusolverStatus_t (*mqr_solve_func_def) (
hipsolverDnHandle_t,
hipblasSideMode_t, hipblasOperation_t,
int, int, int,
const T *, int,
const T *,
T *, int,
T *, int,
int *);
};
#define QR_FUNC_DEF( FUNC ) \
template<typename T> \
static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \
FUNC##_solve_func(); \
\
template<typename T> \
static typename FUNC##_solve_buf_func_def_t<T>::FUNC##_solve_buf_func_def \
FUNC##_solve_buf_func(); \
#define QR_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def FUNC##_solve_func<TYPE>() \
{ return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def)&cusolverDn##PREFIX##FUNC; } \
\
template<> typename FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def FUNC##_solve_buf_func<TYPE>() \
{ return (FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def)& cusolverDn##PREFIX##FUNC##_bufferSize; }
QR_FUNC_DEF( geqrf )
QR_FUNC(geqrf , float , S)
QR_FUNC(geqrf , double , D)
QR_FUNC(geqrf , cfloat , C)
QR_FUNC(geqrf , cdouble, Z)
#define MQR_FUNC_DEF( FUNC ) \
template<typename T> \
static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \
FUNC##_solve_func();
#define MQR_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def \
FUNC##_solve_func<TYPE>() \
{ return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def)&cusolverDn##PREFIX; } \
MQR_FUNC_DEF( mqr )
MQR_FUNC(mqr , float , Sormqr)
MQR_FUNC(mqr , double , Dormqr)
MQR_FUNC(mqr , cfloat , Cunmqr)
MQR_FUNC(mqr , cdouble, Zunmqr)
template<typename T>
Array<T> solveLU(const Array<T> &A, const Array<int> &pivot,
const Array<T> &b, const af_mat_prop options)
{
int N = A.dims()[0];
int NRHS = b.dims()[1];
Array< T > B = copyArray<T>(b);
int *info = memAlloc<int>(1);
CUSOLVER_CHECK(getrs_func<T>()(getDnHandle(),
HIPBLAS_OP_N,
N, NRHS,
A.get(), A.strides()[1],
pivot.get(),
B.get(), B.strides()[1],
info));
memFree(info);
return B;
}
template<typename T>
Array<T> generalSolve(const Array<T> &a, const Array<T> &b)
{
int M = a.dims()[0];
int N = a.dims()[1];
int K = b.dims()[1];
Array<T> A = copyArray<T>(a);
Array<T> B = copyArray<T>(b);
Array<int> pivot = lu_inplace(A, false);
int *info = memAlloc<int>(1);
CUSOLVER_CHECK(getrs_func<T>()(getDnHandle(),
HIPBLAS_OP_N,
N, K,
A.get(), A.strides()[1],
pivot.get(),
B.get(), B.strides()[1],
info));
memFree(info);
return B;
}
template<typename T>
hipblasOperation_t trans() { return HIPBLAS_OP_T; }
template<> hipblasOperation_t trans<cfloat>() { return HIPBLAS_OP_C; }
template<> hipblasOperation_t trans<cdouble>() { return HIPBLAS_OP_C; }
template<typename T>
Array<T> leastSquares(const Array<T> &a, const Array<T> &b)
{
int M = a.dims()[0];
int N = a.dims()[1];
int K = b.dims()[1];
Array<T> B = createEmptyArray<T>(dim4());
if (M < N) {
// Least squres for this case is solved using the following
// solve(A, B) == matmul(Q, Xpad);
// Where:
// Xpad == pad(Xt, N - M, 1);
// Xt == tri_solve(R1, B);
// R1 == R(seq(M), seq(M));
// transpose(A) == matmul(Q, R);
// QR is performed on the transpose of A
Array<T> A = transpose<T>(a, true);
B = padArray<T, T>(b, dim4(N, K), scalar<T>(0));
int lwork = 0;
// Get workspace needed for QR
CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(getDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
&lwork));
T *workspace = memAlloc<T>(lwork);
Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1));
int *info = memAlloc<int>(1);
// In place Perform in place QR
CUSOLVER_CHECK(geqrf_solve_func<T>()(getDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
t.get(),
workspace, lwork,
info));
// R1 = R(seq(M), seq(M));
A.resetDims(dim4(M, M));
// Bt = tri_solve(R1, B);
B.resetDims(dim4(M, K));
trsm<T>(A, B, AF_MAT_CTRANS, true, true, false);
// Bpad = pad(Bt, ..)
B.resetDims(dim4(N, K));
// matmul(Q, Bpad)
CUSOLVER_CHECK(mqr_solve_func<T>()(getDnHandle(),
HIPBLAS_SIDE_LEFT, HIPBLAS_OP_N,
B.dims()[0],
B.dims()[1],
A.dims()[0],
A.get(), A.strides()[1],
t.get(),
B.get(), B.strides()[1],
workspace, lwork,
info));
memFree(workspace);
memFree(info);
} else if (M > N) {
// Least squres for this case is solved using the following
// solve(A, B) == tri_solve(R1, Bt);
// Where:
// R1 == R(seq(N), seq(N));
// Bt == matmul(transpose(Q1), B);
// Q1 == Q(span, seq(N));
// A == matmul(Q, R);
Array<T> A = copyArray<T>(a);
B = copyArray(b);
int lwork = 0;
// Get workspace needed for QR
CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(getDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
&lwork));
T *workspace = memAlloc<T>(lwork);
Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1));
int *info = memAlloc<int>(1);
// In place Perform in place QR
CUSOLVER_CHECK(geqrf_solve_func<T>()(getDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
t.get(),
workspace, lwork,
info));
// matmul(Q1, B)
CUSOLVER_CHECK(mqr_solve_func<T>()(getDnHandle(),
HIPBLAS_SIDE_LEFT,
trans<T>(),
M, K, N,
A.get(), A.strides()[1],
t.get(),
B.get(), B.strides()[1],
workspace, lwork,
info));
// tri_solve(R1, Bt)
A.resetDims(dim4(N, N));
B.resetDims(dim4(N, K));
trsm(A, B, AF_MAT_NONE, true, true, false);
memFree(workspace);
memFree(info);
}
return B;
}
template<typename T>
Array<T> triangleSolve(const Array<T> &A, const Array<T> &b, const af_mat_prop options)
{
Array<T> B = copyArray<T>(b);
trsm(A, B,
AF_MAT_NONE, // transpose flag
options & AF_MAT_UPPER ? true : false,
true, // is_left
options & AF_MAT_DIAG_UNIT ? true : false);
return B;
}
template<typename T>
Array<T> solve(const Array<T> &a, const Array<T> &b, const af_mat_prop options)
{
if (options & AF_MAT_UPPER ||
options & AF_MAT_LOWER) {
return triangleSolve<T>(a, b, options);
}
if(a.dims()[0] == a.dims()[1]) {
return generalSolve<T>(a, b);
} else {
return leastSquares<T>(a, b);
}
}
#define INSTANTIATE_SOLVE(T) \
template Array<T> solve<T>(const Array<T> &a, const Array<T> &b, \
const af_mat_prop options); \
template Array<T> solveLU<T>(const Array<T> &A, const Array<int> &pivot, \
const Array<T> &b, const af_mat_prop options); \
INSTANTIATE_SOLVE(float)
INSTANTIATE_SOLVE(cfloat)
INSTANTIATE_SOLVE(double)
INSTANTIATE_SOLVE(cdouble)
}
#elif defined(WITH_CPU_LINEAR_ALGEBRA)
#include<cpu_lapack/cpu_solve.hpp>
namespace cuda
{
template<typename T>
Array<T> solveLU(const Array<T> &A, const Array<int> &pivot,
const Array<T> &b, const af_mat_prop options)
{
return cpu::solveLU(A, pivot, b, options);
}
template<typename T>
Array<T> solve(const Array<T> &a, const Array<T> &b, const af_mat_prop options)
{
return cpu::solve(a, b, options);
}
#define INSTANTIATE_SOLVE(T) \
template Array<T> solve<T>(const Array<T> &a, const Array<T> &b, \
const af_mat_prop options); \
template Array<T> solveLU<T>(const Array<T> &A, const Array<int> &pivot, \
const Array<T> &b, const af_mat_prop options); \
INSTANTIATE_SOLVE(float)
INSTANTIATE_SOLVE(cfloat)
INSTANTIATE_SOLVE(double)
INSTANTIATE_SOLVE(cdouble)
}
#else
namespace cuda
{
template<typename T>
Array<T> solveLU(const Array<T> &A, const Array<int> &pivot,
const Array<T> &b, const af_mat_prop options)
{
AF_ERROR("Linear Algebra is diabled on CUDA",
AF_ERR_NOT_CONFIGURED);
}
template<typename T>
Array<T> solve(const Array<T> &a, const Array<T> &b, const af_mat_prop options)
{
AF_ERROR("Linear Algebra is diabled on CUDA",
AF_ERR_NOT_CONFIGURED);
}
#define INSTANTIATE_SOLVE(T) \
template Array<T> solve<T>(const Array<T> &a, const Array<T> &b, \
const af_mat_prop options); \
template Array<T> solveLU<T>(const Array<T> &A, const Array<int> &pivot, \
const Array<T> &b, const af_mat_prop options); \
INSTANTIATE_SOLVE(float)
INSTANTIATE_SOLVE(cfloat)
INSTANTIATE_SOLVE(double)
INSTANTIATE_SOLVE(cdouble)
}
#endif
| dd5f35ddf0cf30481ac845d1a5c2a575ff4e55fd.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <err_common.hpp>
#include <solve.hpp>
#if defined(WITH_CUDA_LINEAR_ALGEBRA)
#include <cusolverDnManager.hpp>
#include <cublas_v2.h>
#include <identity.hpp>
#include <iostream>
#include <memory.hpp>
#include <copy.hpp>
#include <transpose.hpp>
#include <math.hpp>
#include <err_common.hpp>
#include <blas.hpp>
#include <lu.hpp>
#include <qr.hpp>
#include <handle.hpp>
#include <cstdio>
namespace cuda
{
using cusolver::getDnHandle;
//cusolverStatus_t cusolverDn<>getrs(
// cusolverDnHandle_t handle,
// cublasOperation_t trans,
// int n, int nrhs,
// const <> *A, int lda,
// const int *devIpiv,
// <> *B, int ldb,
// int *devInfo );
template<typename T>
struct getrs_func_def_t
{
typedef cusolverStatus_t (*getrs_func_def) (
cusolverDnHandle_t,
cublasOperation_t,
int, int,
const T *, int,
const int *,
T *, int,
int *);
};
#define SOLVE_FUNC_DEF( FUNC ) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def \
FUNC##_func();
#define SOLVE_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() \
{ return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)&cusolverDn##PREFIX##FUNC; } \
SOLVE_FUNC_DEF( getrs )
SOLVE_FUNC(getrs , float , S)
SOLVE_FUNC(getrs , double , D)
SOLVE_FUNC(getrs , cfloat , C)
SOLVE_FUNC(getrs , cdouble, Z)
//cusolverStatus_t cusolverDn<>geqrf_bufferSize(
// cusolverDnHandle_t handle,
// int m, int n,
// <> *A,
// int lda,
// int *Lwork );
//
//cusolverStatus_t cusolverDn<>geqrf(
// cusolverDnHandle_t handle,
// int m, int n,
// <> *A, int lda,
// <> *TAU,
// <> *Workspace,
// int Lwork, int *devInfo );
//
//cusolverStatus_t cusolverDn<>mqr(
// cusolverDnHandle_t handle,
// cublasSideMode_t side, cublasOperation_t trans,
// int m, int n, int k,
// const double *A, int lda,
// const double *tau,
// double *C, int ldc,
// double *work,
// int lwork, int *devInfo);
template<typename T>
struct geqrf_solve_func_def_t
{
typedef cusolverStatus_t (*geqrf_solve_func_def) (
cusolverDnHandle_t, int, int,
T *, int,
T *,
T *,
int, int *);
};
template<typename T>
struct geqrf_solve_buf_func_def_t
{
typedef cusolverStatus_t (*geqrf_solve_buf_func_def) (
cusolverDnHandle_t, int, int,
T *, int, int *);
};
template<typename T>
struct mqr_solve_func_def_t
{
typedef cusolverStatus_t (*mqr_solve_func_def) (
cusolverDnHandle_t,
cublasSideMode_t, cublasOperation_t,
int, int, int,
const T *, int,
const T *,
T *, int,
T *, int,
int *);
};
#define QR_FUNC_DEF( FUNC ) \
template<typename T> \
static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \
FUNC##_solve_func(); \
\
template<typename T> \
static typename FUNC##_solve_buf_func_def_t<T>::FUNC##_solve_buf_func_def \
FUNC##_solve_buf_func(); \
#define QR_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def FUNC##_solve_func<TYPE>() \
{ return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def)&cusolverDn##PREFIX##FUNC; } \
\
template<> typename FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def FUNC##_solve_buf_func<TYPE>() \
{ return (FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def)& cusolverDn##PREFIX##FUNC##_bufferSize; }
QR_FUNC_DEF( geqrf )
QR_FUNC(geqrf , float , S)
QR_FUNC(geqrf , double , D)
QR_FUNC(geqrf , cfloat , C)
QR_FUNC(geqrf , cdouble, Z)
#define MQR_FUNC_DEF( FUNC ) \
template<typename T> \
static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \
FUNC##_solve_func();
#define MQR_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def \
FUNC##_solve_func<TYPE>() \
{ return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def)&cusolverDn##PREFIX; } \
MQR_FUNC_DEF( mqr )
MQR_FUNC(mqr , float , Sormqr)
MQR_FUNC(mqr , double , Dormqr)
MQR_FUNC(mqr , cfloat , Cunmqr)
MQR_FUNC(mqr , cdouble, Zunmqr)
template<typename T>
Array<T> solveLU(const Array<T> &A, const Array<int> &pivot,
const Array<T> &b, const af_mat_prop options)
{
int N = A.dims()[0];
int NRHS = b.dims()[1];
Array< T > B = copyArray<T>(b);
int *info = memAlloc<int>(1);
CUSOLVER_CHECK(getrs_func<T>()(getDnHandle(),
CUBLAS_OP_N,
N, NRHS,
A.get(), A.strides()[1],
pivot.get(),
B.get(), B.strides()[1],
info));
memFree(info);
return B;
}
template<typename T>
Array<T> generalSolve(const Array<T> &a, const Array<T> &b)
{
int M = a.dims()[0];
int N = a.dims()[1];
int K = b.dims()[1];
Array<T> A = copyArray<T>(a);
Array<T> B = copyArray<T>(b);
Array<int> pivot = lu_inplace(A, false);
int *info = memAlloc<int>(1);
CUSOLVER_CHECK(getrs_func<T>()(getDnHandle(),
CUBLAS_OP_N,
N, K,
A.get(), A.strides()[1],
pivot.get(),
B.get(), B.strides()[1],
info));
memFree(info);
return B;
}
template<typename T>
cublasOperation_t trans() { return CUBLAS_OP_T; }
template<> cublasOperation_t trans<cfloat>() { return CUBLAS_OP_C; }
template<> cublasOperation_t trans<cdouble>() { return CUBLAS_OP_C; }
template<typename T>
Array<T> leastSquares(const Array<T> &a, const Array<T> &b)
{
int M = a.dims()[0];
int N = a.dims()[1];
int K = b.dims()[1];
Array<T> B = createEmptyArray<T>(dim4());
if (M < N) {
// Least squres for this case is solved using the following
// solve(A, B) == matmul(Q, Xpad);
// Where:
// Xpad == pad(Xt, N - M, 1);
// Xt == tri_solve(R1, B);
// R1 == R(seq(M), seq(M));
// transpose(A) == matmul(Q, R);
// QR is performed on the transpose of A
Array<T> A = transpose<T>(a, true);
B = padArray<T, T>(b, dim4(N, K), scalar<T>(0));
int lwork = 0;
// Get workspace needed for QR
CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(getDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
&lwork));
T *workspace = memAlloc<T>(lwork);
Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1));
int *info = memAlloc<int>(1);
// In place Perform in place QR
CUSOLVER_CHECK(geqrf_solve_func<T>()(getDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
t.get(),
workspace, lwork,
info));
// R1 = R(seq(M), seq(M));
A.resetDims(dim4(M, M));
// Bt = tri_solve(R1, B);
B.resetDims(dim4(M, K));
trsm<T>(A, B, AF_MAT_CTRANS, true, true, false);
// Bpad = pad(Bt, ..)
B.resetDims(dim4(N, K));
// matmul(Q, Bpad)
CUSOLVER_CHECK(mqr_solve_func<T>()(getDnHandle(),
CUBLAS_SIDE_LEFT, CUBLAS_OP_N,
B.dims()[0],
B.dims()[1],
A.dims()[0],
A.get(), A.strides()[1],
t.get(),
B.get(), B.strides()[1],
workspace, lwork,
info));
memFree(workspace);
memFree(info);
} else if (M > N) {
// Least squres for this case is solved using the following
// solve(A, B) == tri_solve(R1, Bt);
// Where:
// R1 == R(seq(N), seq(N));
// Bt == matmul(transpose(Q1), B);
// Q1 == Q(span, seq(N));
// A == matmul(Q, R);
Array<T> A = copyArray<T>(a);
B = copyArray(b);
int lwork = 0;
// Get workspace needed for QR
CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(getDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
&lwork));
T *workspace = memAlloc<T>(lwork);
Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1));
int *info = memAlloc<int>(1);
// In place Perform in place QR
CUSOLVER_CHECK(geqrf_solve_func<T>()(getDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
t.get(),
workspace, lwork,
info));
// matmul(Q1, B)
CUSOLVER_CHECK(mqr_solve_func<T>()(getDnHandle(),
CUBLAS_SIDE_LEFT,
trans<T>(),
M, K, N,
A.get(), A.strides()[1],
t.get(),
B.get(), B.strides()[1],
workspace, lwork,
info));
// tri_solve(R1, Bt)
A.resetDims(dim4(N, N));
B.resetDims(dim4(N, K));
trsm(A, B, AF_MAT_NONE, true, true, false);
memFree(workspace);
memFree(info);
}
return B;
}
template<typename T>
Array<T> triangleSolve(const Array<T> &A, const Array<T> &b, const af_mat_prop options)
{
Array<T> B = copyArray<T>(b);
trsm(A, B,
AF_MAT_NONE, // transpose flag
options & AF_MAT_UPPER ? true : false,
true, // is_left
options & AF_MAT_DIAG_UNIT ? true : false);
return B;
}
template<typename T>
Array<T> solve(const Array<T> &a, const Array<T> &b, const af_mat_prop options)
{
if (options & AF_MAT_UPPER ||
options & AF_MAT_LOWER) {
return triangleSolve<T>(a, b, options);
}
if(a.dims()[0] == a.dims()[1]) {
return generalSolve<T>(a, b);
} else {
return leastSquares<T>(a, b);
}
}
#define INSTANTIATE_SOLVE(T) \
template Array<T> solve<T>(const Array<T> &a, const Array<T> &b, \
const af_mat_prop options); \
template Array<T> solveLU<T>(const Array<T> &A, const Array<int> &pivot, \
const Array<T> &b, const af_mat_prop options); \
INSTANTIATE_SOLVE(float)
INSTANTIATE_SOLVE(cfloat)
INSTANTIATE_SOLVE(double)
INSTANTIATE_SOLVE(cdouble)
}
#elif defined(WITH_CPU_LINEAR_ALGEBRA)
#include<cpu_lapack/cpu_solve.hpp>
namespace cuda
{
template<typename T>
Array<T> solveLU(const Array<T> &A, const Array<int> &pivot,
const Array<T> &b, const af_mat_prop options)
{
return cpu::solveLU(A, pivot, b, options);
}
template<typename T>
Array<T> solve(const Array<T> &a, const Array<T> &b, const af_mat_prop options)
{
return cpu::solve(a, b, options);
}
#define INSTANTIATE_SOLVE(T) \
template Array<T> solve<T>(const Array<T> &a, const Array<T> &b, \
const af_mat_prop options); \
template Array<T> solveLU<T>(const Array<T> &A, const Array<int> &pivot, \
const Array<T> &b, const af_mat_prop options); \
INSTANTIATE_SOLVE(float)
INSTANTIATE_SOLVE(cfloat)
INSTANTIATE_SOLVE(double)
INSTANTIATE_SOLVE(cdouble)
}
#else
namespace cuda
{
template<typename T>
Array<T> solveLU(const Array<T> &A, const Array<int> &pivot,
const Array<T> &b, const af_mat_prop options)
{
AF_ERROR("Linear Algebra is diabled on CUDA",
AF_ERR_NOT_CONFIGURED);
}
template<typename T>
Array<T> solve(const Array<T> &a, const Array<T> &b, const af_mat_prop options)
{
AF_ERROR("Linear Algebra is diabled on CUDA",
AF_ERR_NOT_CONFIGURED);
}
#define INSTANTIATE_SOLVE(T) \
template Array<T> solve<T>(const Array<T> &a, const Array<T> &b, \
const af_mat_prop options); \
template Array<T> solveLU<T>(const Array<T> &A, const Array<int> &pivot, \
const Array<T> &b, const af_mat_prop options); \
INSTANTIATE_SOLVE(float)
INSTANTIATE_SOLVE(cfloat)
INSTANTIATE_SOLVE(double)
INSTANTIATE_SOLVE(cdouble)
}
#endif
|
d6d9594512af0ba678d2dab3a56c00f09abdb031.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define SIZE 1024
#define TILE_WIDTH 64
#include <iostream>
#include <cassert>
#include <stdio.h>
template <typename T>
struct Mat{
public:
T *h_x;
T *d_x;
int n;
Mat(size_t n_): n(n_)
{
// assert(typename(T)==int or double or float)
// figure this out
h_x = (T *) malloc(sizeof(T)*n*n);
hipMalloc((void**)&d_x, sizeof(T)*n*n);
}
void cudaMemcpyH2D(){
hipMemcpy(d_x, h_x, sizeof(T)*n*n, hipMemcpyHostToDevice);
}
void cudaMemcpyD2H(){
hipMemcpy(h_x, d_x, sizeof(T)*n*n, hipMemcpyDeviceToHost);
}
~Mat(){
free(h_x);
hipFree(d_x);
}
};
void initialize(float *a, float *b, float *c, int size){
assert((a!=NULL)&&(b!=NULL)&&(c!=NULL));
assert((a+size*size-1!=NULL)&&(b+size*size-1!=NULL)&&(c+size*size-1!=NULL));
for(int rows=0; rows<size; rows++){
for(int cols = 0; cols <size; cols++){
a[size*rows+cols]=((float)rand())/RAND_MAX;
b[size*rows+cols]=((float)rand())/RAND_MAX;
c[size*rows+cols]=0.0f;
}
}
}
void matmul_host(float *a, float *b, float *c, int size){
assert((a!=NULL)&&(b!=NULL)&&(c!=NULL));
assert((a+size*size-1!=NULL)&&(b+size*size-1!=NULL)&&(c+size*size-1!=NULL));
for(int i=0; i<size; i++){
for(int j=0; j<size; j++){
for(int k=0; k<size; k++){
c[size*i+j] += a[size*i+k]*b[size*k+j];
}
}
}
}
void print(float *A, int size){
assert((A!=NULL) && (A+size*size-1)!=NULL);
for (int i=0; i<size; i++){
for(int j=0; j<size; j++){
std::cout << A[size*i+j];
if(j!=size-1){
std::cout << " ";
}
}
std::cout << std::endl;
}
}
void print(float *a, float *b, float *c, int size){
assert((a!=NULL)&&(b!=NULL)&&(c!=NULL));
assert((a+size*size-1!=NULL)&&(b+size*size-1!=NULL)&&(c+size*size-1!=NULL));
std::cout << "A=" << std::endl;
print(a, size);
std::cout << "B=" << std::endl;
print(b, size);
std::cout << "C=" << std::endl;
print(c, size);
}
//naive
__global__ void matmul_device(float *a, float *b, float *c, int size){
assert((a!=NULL)&&(b!=NULL)&&(c!=NULL));
assert((a+size*size-1!=NULL)&&(b+size*size-1!=NULL)&&(c+size*size-1!=NULL));
int tx = blockIdx.x*TILE_WIDTH + threadIdx.x;
int ty = blockIdx.y*TILE_WIDTH + threadIdx.y;
float p = 0;
while((tx<size)&&(ty<size)){
for(int k=0; k<size; k++){
p += a[size*ty+k]*b[size*k+tx];
}
c[ty+size*tx]=p;
}
}
int main(int argc, char **argv){
float *a, *b, *c;
float *a_d, *b_d, *c_d;
a = (float *) malloc(SIZE*SIZE*sizeof(float));
b = (float *) malloc(SIZE*SIZE*sizeof(float));
c = (float *) malloc(SIZE*SIZE*sizeof(float));
initialize(a,b,c,SIZE);
// matmul_host(a, b, c, SIZE);
std::cout << "HOST SUCCESS " << std::endl;
// print(c, SIZE);
hipMalloc((void **)&a_d,SIZE*SIZE*sizeof(float));
hipMalloc((void **)&b_d,SIZE*SIZE*sizeof(float));
hipMalloc((void **)&c_d,SIZE*SIZE*sizeof(float));
hipMemcpy(a_d, a, SIZE*SIZE*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(b_d, b, SIZE*SIZE*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(c_d, c, SIZE*SIZE*sizeof(float), hipMemcpyHostToDevice);
dim3 dimGrid(SIZE/TILE_WIDTH, SIZE/TILE_WIDTH);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
hipLaunchKernelGGL(( matmul_device), dim3(dimGrid), dim3(dimBlock), 0, 0, a_d, b_d, c_d, SIZE);
std::cout << "DEVICE SUCCESS " << std::endl;
unsigned int data_size;
data_size = sizeof(float)*SIZE*SIZE/pow(2,20);
std::cout << "Size of matrix in MB " << data_size << std::endl;
hipMemcpy(c, c_d, SIZE*SIZE*sizeof(float), hipMemcpyDeviceToHost);
// hipMemcpy(a, a_d, SIZE*SIZE*sizeof(float), hipMemcpyDeviceToHost);
// print(c,SIZE);
free(a);
free(b);
free(c);
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
return 0;
}
| d6d9594512af0ba678d2dab3a56c00f09abdb031.cu | #define SIZE 1024
#define TILE_WIDTH 64
#include <iostream>
#include <cassert>
#include <stdio.h>
template <typename T>
struct Mat{
public:
T *h_x;
T *d_x;
int n;
Mat(size_t n_): n(n_)
{
// assert(typename(T)==int or double or float)
// figure this out
h_x = (T *) malloc(sizeof(T)*n*n);
cudaMalloc((void**)&d_x, sizeof(T)*n*n);
}
void cudaMemcpyH2D(){
cudaMemcpy(d_x, h_x, sizeof(T)*n*n, cudaMemcpyHostToDevice);
}
void cudaMemcpyD2H(){
cudaMemcpy(h_x, d_x, sizeof(T)*n*n, cudaMemcpyDeviceToHost);
}
~Mat(){
free(h_x);
cudaFree(d_x);
}
};
void initialize(float *a, float *b, float *c, int size){
assert((a!=NULL)&&(b!=NULL)&&(c!=NULL));
assert((a+size*size-1!=NULL)&&(b+size*size-1!=NULL)&&(c+size*size-1!=NULL));
for(int rows=0; rows<size; rows++){
for(int cols = 0; cols <size; cols++){
a[size*rows+cols]=((float)rand())/RAND_MAX;
b[size*rows+cols]=((float)rand())/RAND_MAX;
c[size*rows+cols]=0.0f;
}
}
}
void matmul_host(float *a, float *b, float *c, int size){
assert((a!=NULL)&&(b!=NULL)&&(c!=NULL));
assert((a+size*size-1!=NULL)&&(b+size*size-1!=NULL)&&(c+size*size-1!=NULL));
for(int i=0; i<size; i++){
for(int j=0; j<size; j++){
for(int k=0; k<size; k++){
c[size*i+j] += a[size*i+k]*b[size*k+j];
}
}
}
}
void print(float *A, int size){
assert((A!=NULL) && (A+size*size-1)!=NULL);
for (int i=0; i<size; i++){
for(int j=0; j<size; j++){
std::cout << A[size*i+j];
if(j!=size-1){
std::cout << " ";
}
}
std::cout << std::endl;
}
}
void print(float *a, float *b, float *c, int size){
assert((a!=NULL)&&(b!=NULL)&&(c!=NULL));
assert((a+size*size-1!=NULL)&&(b+size*size-1!=NULL)&&(c+size*size-1!=NULL));
std::cout << "A=" << std::endl;
print(a, size);
std::cout << "B=" << std::endl;
print(b, size);
std::cout << "C=" << std::endl;
print(c, size);
}
//naive
__global__ void matmul_device(float *a, float *b, float *c, int size){
assert((a!=NULL)&&(b!=NULL)&&(c!=NULL));
assert((a+size*size-1!=NULL)&&(b+size*size-1!=NULL)&&(c+size*size-1!=NULL));
int tx = blockIdx.x*TILE_WIDTH + threadIdx.x;
int ty = blockIdx.y*TILE_WIDTH + threadIdx.y;
float p = 0;
while((tx<size)&&(ty<size)){
for(int k=0; k<size; k++){
p += a[size*ty+k]*b[size*k+tx];
}
c[ty+size*tx]=p;
}
}
int main(int argc, char **argv){
float *a, *b, *c;
float *a_d, *b_d, *c_d;
a = (float *) malloc(SIZE*SIZE*sizeof(float));
b = (float *) malloc(SIZE*SIZE*sizeof(float));
c = (float *) malloc(SIZE*SIZE*sizeof(float));
initialize(a,b,c,SIZE);
// matmul_host(a, b, c, SIZE);
std::cout << "HOST SUCCESS " << std::endl;
// print(c, SIZE);
cudaMalloc((void **)&a_d,SIZE*SIZE*sizeof(float));
cudaMalloc((void **)&b_d,SIZE*SIZE*sizeof(float));
cudaMalloc((void **)&c_d,SIZE*SIZE*sizeof(float));
cudaMemcpy(a_d, a, SIZE*SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, SIZE*SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c, SIZE*SIZE*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid(SIZE/TILE_WIDTH, SIZE/TILE_WIDTH);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
matmul_device<<<dimGrid, dimBlock>>>(a_d, b_d, c_d, SIZE);
std::cout << "DEVICE SUCCESS " << std::endl;
unsigned int data_size;
data_size = sizeof(float)*SIZE*SIZE/pow(2,20);
std::cout << "Size of matrix in MB " << data_size << std::endl;
cudaMemcpy(c, c_d, SIZE*SIZE*sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(a, a_d, SIZE*SIZE*sizeof(float), cudaMemcpyDeviceToHost);
// print(c,SIZE);
free(a);
free(b);
free(c);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
}
|
158b8e280d9476a5f12538a1bc072e8e618064fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Filters
//
// Includes: system
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdint.h>
#include <errno.h>
#include <assert.h>
#include <string.h>
#include <sys/io.h>
#include <cutil_inline.h>
// Includes: local
#include "bmp.h"
enum {SOBEL_FILTER, AVERAGE_FILTER, HIGH_BOOST_FILTER};
#define CLAMP_8bit(x) max(0, min(1023, (x)))//1023
char *BMPInFile = "dublin.bmp";//lena/dublin
char *BMPOutFile = "output.bmp";
char *Filter = "average";
int FilterMode = AVERAGE_FILTER;
// create and stat timer as unsigned interger
unsigned int timer_CPU =0;
unsigned int timer_GPU =0;
// Functions
void Cleanup(void);
void ParseArguments(int, char**);
void FilterWrapper(unsigned char* pImageIn, int Width, int Height);
// Kernels
__global__ void SobelFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
//__global__ void AverageFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
//__global__ void HighBoostFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
/* Device Memory */
unsigned char *d_In;
unsigned char *d_Out;
// Setup for kernel size
const int TILE_WIDTH = 6;
const int TILE_HEIGHT = 6;
const int FILTER_RADIUS = 1;
// const int FILTER_RADIUS = 3;
const int FILTER_DIAMETER = 2 * FILTER_RADIUS + 1;
const int FILTER_AREA = FILTER_DIAMETER * FILTER_DIAMETER;
const int BLOCK_WIDTH = TILE_WIDTH + 2*FILTER_RADIUS;
const int BLOCK_HEIGHT = TILE_HEIGHT + 2*FILTER_RADIUS;
const int EDGE_VALUE_THRESHOLD = 70;
const int HIGH_BOOST_FACTOR = 10;
#include "filter_kernel.hip"
void BitMapRead(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char **data, unsigned char **palete)
{
size_t palete_size;
int fd;
if((fd = open(file, O_RDONLY )) < 0)
FATAL("Open Source");
if(read(fd, bmp, BMP_SIZE) != BMP_SIZE)
FATAL("Read BMP Header");
if(read(fd, dib, DIB_SIZE) != DIB_SIZE)
FATAL("Read DIB Header");
assert(dib->bpp == 8);
palete_size = bmp->offset - BMP_SIZE - DIB_SIZE;
if(palete_size > 0) {
*palete = (unsigned char *)malloc(palete_size);
int go = read(fd, *palete, palete_size);
if (go != palete_size) {
FATAL("Read Palete");
}
}
*data = (unsigned char *)malloc(dib->image_size);
if(read(fd, *data, dib->image_size) != dib->image_size)
FATAL("Read Image");
close(fd);
}
void BitMapWrite(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char *data, unsigned char *palete)
{
size_t palete_size;
int fd;
palete_size = bmp->offset - BMP_SIZE - DIB_SIZE;
if((fd = open(file, O_WRONLY | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR |S_IRGRP)) < 0)
FATAL("Open Destination");
if(write(fd, bmp, BMP_SIZE) != BMP_SIZE)
FATAL("Write BMP Header");
if(write(fd, dib, DIB_SIZE) != DIB_SIZE)
FATAL("Write BMP Header");
if(palete_size != 0) {
if(write(fd, palete, palete_size) != palete_size)
FATAL("Write Palete");
}
if(write(fd, data, dib->image_size) != dib->image_size)
FATAL("Write Image");
close(fd);
}
void CPU_Avg(unsigned char* imageIn, unsigned char* imageOut, int width, int height)
{
int i, j, rows, cols, startCol, endCol, startRow, endRow;
const float SobelMatrix[9] = {-1,0,1,-2,0,2,-1,0,1};
rows = height;
cols = width;
// Initialize all output pixels to zero
for(i=0; i<rows; i++) {
for(j=0; j<cols; j++) {
imageOut[i*width + j] = 0;
}
}
startCol = 1;
endCol = cols - 1;
startRow = 1;
endRow = rows - 1;
// Go through all inner pizel positions
for(i=startRow; i<endRow; i++) {
for(j=startCol; j<endCol; j++) {
// sum up the 9 values to calculate both the direction x and direction y
//float sumX = 0, sumY=0;
float sum =0;
for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; dx++) {
float Pixel = (float)(imageIn[i*width + j + (dy * width + dx)]);
// sumX += Pixel * SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)];
// sumY += Pixel * SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)];
sum += Pixel;
}
}
// imageOut[i*width + j] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 1023 : 0;//255: 0/ 1023:0
imageOut[i*width + j] =static_cast<unsigned char>( sum/FILTER_AREA);//255: 0/ 1023:0
}
}
}
// Host code
int main(int argc, char** argv)
{
ParseArguments(argc, argv);
struct bmp_header bmp;
struct dib_header dib;
unsigned char *palete = NULL;
unsigned char *data = NULL, *out = NULL;
printf("Running %s filter\n", Filter);
BitMapRead(BMPInFile, &bmp, &dib, &data, &palete);
out = (unsigned char *)malloc(dib.image_size);
printf("Computing the CPU output\n");
printf("Image details: %d by %d = %d , imagesize = %d\n", dib.width, dib.height, dib.width * dib.height,dib.image_size);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Intialize the timer to zero cycles
cutilCheckError(cutCreateTimer(&timer_CPU));
cutilCheckError(cutCreateTimer(&timer_GPU));
/////////////////////////////// cpu sobel call start here /////////////////////////////////////////////////
// Start the CPU timer////
cutilCheckError(cutStartTimer(timer_CPU));
CPU_Avg(data, out, dib.width, dib.height);
// stop CPU timer ///
cutilCheckError(cutStopTimer(timer_CPU));
////////////// cpu sobel call end here ///////////////////////////////////////////////
BitMapWrite("CPU_Avg.bmp", &bmp, &dib, out, palete);
printf("Done with CPU output\n");
printf("Allocating %d bytes for image \n", dib.image_size);
cutilSafeCall( hipMalloc( (void **)&d_In, dib.image_size*sizeof(unsigned char)) );
cutilSafeCall( hipMalloc( (void **)&d_Out, dib.image_size*sizeof(unsigned char)) );
hipMemcpy(d_In, data, dib.image_size*sizeof(unsigned char), hipMemcpyHostToDevice);
/////////////////////// calling kernel here ////////////////////////////////////////////////////
// Start the GPU timer////
cutilCheckError(cutStartTimer(timer_GPU));
FilterWrapper(data, dib.width, dib.height);
// stop GPU timer ///
cutilCheckError(cutStopTimer(timer_GPU));
//////////////////////////////// kernel call end //////////////////////////////////////////////
// Copy image back to host
hipMemcpy(out, d_Out, dib.image_size*sizeof(unsigned char), hipMemcpyDeviceToHost);
// Write output image
BitMapWrite(BMPOutFile, &bmp, &dib, out, palete);
// print timers
printf ("CPU Execution time: %f (ms) \n ", cutGetTimerValue(timer_CPU));
printf ("GPU Execution time: %f (ms) \n ", cutGetTimerValue(timer_GPU));
Cleanup();
}
void Cleanup(void)
{
//Destroy (Free) timer
cutilCheckError(cutDeleteTimer(timer_CPU));
cutilCheckError(cutDeleteTimer(timer_GPU));
cutilSafeCall( hipDeviceReset() );
exit(0);
}
void FilterWrapper(unsigned char* pImageIn, int Width, int Height)
{
// Design grid disection around tile size
int gridWidth = (Width + TILE_WIDTH - 1) / TILE_WIDTH;
int gridHeight = (Height + TILE_HEIGHT - 1) / TILE_HEIGHT;
dim3 dimGrid(gridWidth, gridHeight);
// But actually invoke larger blocks to take care of surrounding shared memory
dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
switch(FilterMode) {
/* case SOBEL_FILTER:
printf("Sobel Filter \n");
SobelFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;*/
case AVERAGE_FILTER:
printf("Average Filter \n");
hipLaunchKernelGGL(( AverageFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
/* case HIGH_BOOST_FILTER:
printf("Boost Filter \n");
HighBoostFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;*/
}
cutilSafeCall( hipDeviceSynchronize() );
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i) {
if (strcmp(argv[i], "--file") == 0 || strcmp(argv[i], "-file") == 0) {
BMPInFile = argv[i+1];
i = i + 1;
}
if (strcmp(argv[i], "--out") == 0 || strcmp(argv[i], "-out") == 0) {
BMPOutFile = argv[i+1];
i = i + 1;
}
if (strcmp(argv[i], "--filter") == 0 || strcmp(argv[i], "-filter") == 0) {
Filter = argv[i+1];
i = i + 1;
if (strcmp(Filter, "sobel") == 0)
FilterMode = SOBEL_FILTER;
else if (strcmp(Filter, "average") == 0)
FilterMode = AVERAGE_FILTER;
else if (strcmp(Filter, "boost") == 0)
FilterMode = HIGH_BOOST_FILTER;
}
}
}
| 158b8e280d9476a5f12538a1bc072e8e618064fb.cu | //
// Filters
//
// Includes: system
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdint.h>
#include <errno.h>
#include <assert.h>
#include <string.h>
#include <sys/io.h>
#include <cutil_inline.h>
// Includes: local
#include "bmp.h"
enum {SOBEL_FILTER, AVERAGE_FILTER, HIGH_BOOST_FILTER};
#define CLAMP_8bit(x) max(0, min(1023, (x)))//1023
char *BMPInFile = "dublin.bmp";//lena/dublin
char *BMPOutFile = "output.bmp";
char *Filter = "average";
int FilterMode = AVERAGE_FILTER;
// create and stat timer as unsigned interger
unsigned int timer_CPU =0;
unsigned int timer_GPU =0;
// Functions
void Cleanup(void);
void ParseArguments(int, char**);
void FilterWrapper(unsigned char* pImageIn, int Width, int Height);
// Kernels
__global__ void SobelFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
//__global__ void AverageFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
//__global__ void HighBoostFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
/* Device Memory */
unsigned char *d_In;
unsigned char *d_Out;
// Setup for kernel size
const int TILE_WIDTH = 6;
const int TILE_HEIGHT = 6;
const int FILTER_RADIUS = 1;
// const int FILTER_RADIUS = 3;
const int FILTER_DIAMETER = 2 * FILTER_RADIUS + 1;
const int FILTER_AREA = FILTER_DIAMETER * FILTER_DIAMETER;
const int BLOCK_WIDTH = TILE_WIDTH + 2*FILTER_RADIUS;
const int BLOCK_HEIGHT = TILE_HEIGHT + 2*FILTER_RADIUS;
const int EDGE_VALUE_THRESHOLD = 70;
const int HIGH_BOOST_FACTOR = 10;
#include "filter_kernel.cu"
void BitMapRead(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char **data, unsigned char **palete)
{
size_t palete_size;
int fd;
if((fd = open(file, O_RDONLY )) < 0)
FATAL("Open Source");
if(read(fd, bmp, BMP_SIZE) != BMP_SIZE)
FATAL("Read BMP Header");
if(read(fd, dib, DIB_SIZE) != DIB_SIZE)
FATAL("Read DIB Header");
assert(dib->bpp == 8);
palete_size = bmp->offset - BMP_SIZE - DIB_SIZE;
if(palete_size > 0) {
*palete = (unsigned char *)malloc(palete_size);
int go = read(fd, *palete, palete_size);
if (go != palete_size) {
FATAL("Read Palete");
}
}
*data = (unsigned char *)malloc(dib->image_size);
if(read(fd, *data, dib->image_size) != dib->image_size)
FATAL("Read Image");
close(fd);
}
void BitMapWrite(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char *data, unsigned char *palete)
{
size_t palete_size;
int fd;
palete_size = bmp->offset - BMP_SIZE - DIB_SIZE;
if((fd = open(file, O_WRONLY | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR |S_IRGRP)) < 0)
FATAL("Open Destination");
if(write(fd, bmp, BMP_SIZE) != BMP_SIZE)
FATAL("Write BMP Header");
if(write(fd, dib, DIB_SIZE) != DIB_SIZE)
FATAL("Write BMP Header");
if(palete_size != 0) {
if(write(fd, palete, palete_size) != palete_size)
FATAL("Write Palete");
}
if(write(fd, data, dib->image_size) != dib->image_size)
FATAL("Write Image");
close(fd);
}
void CPU_Avg(unsigned char* imageIn, unsigned char* imageOut, int width, int height)
{
int i, j, rows, cols, startCol, endCol, startRow, endRow;
const float SobelMatrix[9] = {-1,0,1,-2,0,2,-1,0,1};
rows = height;
cols = width;
// Initialize all output pixels to zero
for(i=0; i<rows; i++) {
for(j=0; j<cols; j++) {
imageOut[i*width + j] = 0;
}
}
startCol = 1;
endCol = cols - 1;
startRow = 1;
endRow = rows - 1;
// Go through all inner pizel positions
for(i=startRow; i<endRow; i++) {
for(j=startCol; j<endCol; j++) {
// sum up the 9 values to calculate both the direction x and direction y
//float sumX = 0, sumY=0;
float sum =0;
for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; dx++) {
float Pixel = (float)(imageIn[i*width + j + (dy * width + dx)]);
// sumX += Pixel * SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)];
// sumY += Pixel * SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)];
sum += Pixel;
}
}
// imageOut[i*width + j] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 1023 : 0;//255: 0/ 1023:0
imageOut[i*width + j] =static_cast<unsigned char>( sum/FILTER_AREA);//255: 0/ 1023:0
}
}
}
// Host code
int main(int argc, char** argv)
{
ParseArguments(argc, argv);
struct bmp_header bmp;
struct dib_header dib;
unsigned char *palete = NULL;
unsigned char *data = NULL, *out = NULL;
printf("Running %s filter\n", Filter);
BitMapRead(BMPInFile, &bmp, &dib, &data, &palete);
out = (unsigned char *)malloc(dib.image_size);
printf("Computing the CPU output\n");
printf("Image details: %d by %d = %d , imagesize = %d\n", dib.width, dib.height, dib.width * dib.height,dib.image_size);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Intialize the timer to zero cycles
cutilCheckError(cutCreateTimer(&timer_CPU));
cutilCheckError(cutCreateTimer(&timer_GPU));
/////////////////////////////// cpu sobel call start here /////////////////////////////////////////////////
// Start the CPU timer////
cutilCheckError(cutStartTimer(timer_CPU));
CPU_Avg(data, out, dib.width, dib.height);
// stop CPU timer ///
cutilCheckError(cutStopTimer(timer_CPU));
////////////// cpu sobel call end here ///////////////////////////////////////////////
BitMapWrite("CPU_Avg.bmp", &bmp, &dib, out, palete);
printf("Done with CPU output\n");
printf("Allocating %d bytes for image \n", dib.image_size);
cutilSafeCall( cudaMalloc( (void **)&d_In, dib.image_size*sizeof(unsigned char)) );
cutilSafeCall( cudaMalloc( (void **)&d_Out, dib.image_size*sizeof(unsigned char)) );
cudaMemcpy(d_In, data, dib.image_size*sizeof(unsigned char), cudaMemcpyHostToDevice);
/////////////////////// calling kernel here ////////////////////////////////////////////////////
// Start the GPU timer////
cutilCheckError(cutStartTimer(timer_GPU));
FilterWrapper(data, dib.width, dib.height);
// stop GPU timer ///
cutilCheckError(cutStopTimer(timer_GPU));
//////////////////////////////// kernel call end //////////////////////////////////////////////
// Copy image back to host
cudaMemcpy(out, d_Out, dib.image_size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
// Write output image
BitMapWrite(BMPOutFile, &bmp, &dib, out, palete);
// print timers
printf ("CPU Execution time: %f (ms) \n ", cutGetTimerValue(timer_CPU));
printf ("GPU Execution time: %f (ms) \n ", cutGetTimerValue(timer_GPU));
Cleanup();
}
void Cleanup(void)
{
//Destroy (Free) timer
cutilCheckError(cutDeleteTimer(timer_CPU));
cutilCheckError(cutDeleteTimer(timer_GPU));
cutilSafeCall( cudaThreadExit() );
exit(0);
}
void FilterWrapper(unsigned char* pImageIn, int Width, int Height)
{
// Design grid disection around tile size
int gridWidth = (Width + TILE_WIDTH - 1) / TILE_WIDTH;
int gridHeight = (Height + TILE_HEIGHT - 1) / TILE_HEIGHT;
dim3 dimGrid(gridWidth, gridHeight);
// But actually invoke larger blocks to take care of surrounding shared memory
dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
switch(FilterMode) {
/* case SOBEL_FILTER:
printf("Sobel Filter \n");
SobelFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;*/
case AVERAGE_FILTER:
printf("Average Filter \n");
AverageFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
/* case HIGH_BOOST_FILTER:
printf("Boost Filter \n");
HighBoostFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;*/
}
cutilSafeCall( cudaThreadSynchronize() );
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i) {
if (strcmp(argv[i], "--file") == 0 || strcmp(argv[i], "-file") == 0) {
BMPInFile = argv[i+1];
i = i + 1;
}
if (strcmp(argv[i], "--out") == 0 || strcmp(argv[i], "-out") == 0) {
BMPOutFile = argv[i+1];
i = i + 1;
}
if (strcmp(argv[i], "--filter") == 0 || strcmp(argv[i], "-filter") == 0) {
Filter = argv[i+1];
i = i + 1;
if (strcmp(Filter, "sobel") == 0)
FilterMode = SOBEL_FILTER;
else if (strcmp(Filter, "average") == 0)
FilterMode = AVERAGE_FILTER;
else if (strcmp(Filter, "boost") == 0)
FilterMode = HIGH_BOOST_FILTER;
}
}
}
|
3d04a16cd30854dee63115f2ef7a2c72bd4f5688.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "solver.cuh"
__device__ void update_score(Individu *individu) {
double score = 0.f;
int prev_index = individu->pathIndexes[0];
for(int i = 1; i < N_CITIES; i++) {
int current_index = individu->pathIndexes[i];
score += pow(cities[current_index][0] - cities[prev_index][0], 2) + pow(cities[current_index][1] - cities[prev_index][1], 2);
prev_index = current_index;
}
individu->score = (float)score;
}
__device__ bool is_gonna_die(hiprandState_t *state){
float position = 1 - ((float)(threadIdx.x) / (blockDim.x - 1)); //first thread is 1.0, last is 0.0
float powK = pow(position, PROBA_K);
float probaToDie = 0.75f * powK;//(powK - (powK / (PROBA_K))) / PROBA_K;
return hiprand_uniform(state) < probaToDie;
}
__device__ bool is_mutating(hiprandState_t *state){
return hiprand_uniform(state) < PROBA_MUTATION;
}
__device__ void random_init(Individu *individu, hiprandState_t *state){
bool used[N_CITIES] = {false};
for (int i = 0 ; i < N_CITIES ; i++) {
unsigned short index = (unsigned short)(hiprand_uniform(state) * N_CITIES);
while (used[index])
index = (unsigned short)((index + 1) % N_CITIES);
used[index] = true;
individu->pathIndexes[i] = index;
}
}
__device__ Individu select_migrant(Individu *migrants, hiprandState_t *state) {
unsigned short index = (unsigned short)(hiprand_uniform(state) * N_ISLAND);
if (index == blockIdx.x)
index = (unsigned short)((index + 1) % N_ISLAND);
return migrants[index];
}
__device__ void select_mutation(hiprandState_t *state, unsigned short *mutation) {
mutation[0] = (unsigned short)(hiprand_uniform(state) * N_CITIES);
mutation[1] = (unsigned short)(hiprand_uniform(state) * N_CITIES);
if (mutation[1] == mutation[0])
mutation[1] = (unsigned short)((mutation[1] + 1) % N_CITIES);
}
__device__ void select_parents(hiprandState_t *state, int *parents, int numbersOfParents) {
int current_parent = 0;
while (current_parent < numbersOfParents) {
for(int i = blockDim.x - 1; i >= 0; --i) {
if(hiprand_uniform(state) < PROBA_SELECTION) {
parents[current_parent++] = i;
break;
}
}
}
}
__device__ void mix_parents(Individu *population, hiprandState_t *state, int replacedIndex, int *parents, int numbersOfParents) {
int chunkSize = ceil((float)N_CITIES / numbersOfParents);
int taken;
for (int citiesCount = 0 ; citiesCount < N_CITIES ; citiesCount += taken) {
int selected_parent = parents[hiprand(state) % numbersOfParents];//(chunkSize * 2)
taken = hiprand(state) % (chunkSize * 2);
if(citiesCount + taken > N_CITIES)
taken = N_CITIES - citiesCount; // si on dpasse, on prend le reste
for(int i = citiesCount; i < citiesCount + taken; ++i) {
population[replacedIndex].pathIndexes[i] = population[selected_parent].pathIndexes[i];
}
}
}
__device__ void swap_cities(Individu *ind, unsigned short *citiesIndex){
ind->pathIndexes[citiesIndex[0]] ^= ind->pathIndexes[citiesIndex[1]];
ind->pathIndexes[citiesIndex[1]] ^= ind->pathIndexes[citiesIndex[0]];
ind->pathIndexes[citiesIndex[0]] ^= ind->pathIndexes[citiesIndex[1]];
}
__device__ void print_path(Individu ind) {
for(int i = 0; i < N_CITIES; i++) {
printf("%2hu ", ind.pathIndexes[i]);
}
printf("\n");
}
__device__ void delete_doublons(Individu *population, bool *isDoublon, int *isUnseen, int tailleBloc, int indexDebutBloc) {
__shared__ int sem;
for(int currentIndividu = 0; currentIndividu < blockDim.x; ++currentIndividu) {
if(!population[currentIndividu].isGonnaDie)
continue;
if(threadIdx.x == 0)
sem = 0;
__syncthreads();
// Rinitialisation de isDoublon
for(int cityToCheck = indexDebutBloc; cityToCheck < indexDebutBloc + tailleBloc && cityToCheck < N_CITIES; ++cityToCheck) {
isDoublon[cityToCheck] = false;
}
__syncthreads(); // Every thread delete double in currentIndividu
for(int cityToCheck = indexDebutBloc; cityToCheck < indexDebutBloc + tailleBloc && cityToCheck < N_CITIES; ++cityToCheck) {
bool seen = false;
for(int currentCity = 0; currentCity < N_CITIES; ++currentCity) {
if(population[currentIndividu].pathIndexes[currentCity] == cityToCheck) {
if(seen)
isDoublon[currentCity] = true;
else
seen = true;
}
}
// Les threads peuvent s'occuper de 0 ou plusieurs villes
if(seen == false) {
int it = atomicAdd(&sem, 1);
isUnseen[it] = cityToCheck;
}
}
//TODO : shuffle unSeen ?
__syncthreads();
/*
//AFFICHAGE
if(threadIdx.x == 0) {
printf("\nIndividu %d\n", currentIndividu);
printPath(population[currentIndividu]);
for(int i = 0; i < N_CITIES; ++i) {
printf("%2d ", isDoublon[i]);
}
printf("\n");
for(int i = 0; i < N_CITIES; ++i) {
printf("%2d ", isUnseen[i]);
}
}
*/
// Both table are fully initialized, we replace all double
if(threadIdx.x == 0)
sem = 0;
__syncthreads();
for(int cityToCheck = indexDebutBloc; cityToCheck < indexDebutBloc + tailleBloc && cityToCheck < N_CITIES; ++cityToCheck) {
if(isDoublon[cityToCheck]) {
int it = atomicAdd(&sem, 1);
population[currentIndividu].pathIndexes[cityToCheck] = isUnseen[it];
}
}
}
}
__device__ void loop_generations(Individu *population, Individu *migrants, hiprandState_t *state, bool *isDoublon, int *isUnseen) {
int tailleBloc = ceil((float)N_CITIES / blockDim.x);
int indexDebutBloc = threadIdx.x * tailleBloc;
// Main generation loop
for(int i = 0; i < N_GENERATION ; i++) {
__syncthreads();
if (threadIdx.x == 0) {
migrants[blockIdx.x] = population[blockDim.x-1]; //export migrant
population[0] = select_migrant(migrants, state); //import migrant
}
population[threadIdx.x].isGonnaDie = false;
if(is_gonna_die(state)) {
population[threadIdx.x].isGonnaDie = true; // TODO : sync with atomicadd instead of struct member
int parents[3];
select_parents(state, parents, 3);
mix_parents(population, state, threadIdx.x, parents, 3);
} else if(is_mutating(state)) {
// printf("%d is mutating.\n", threadIdx.x);
unsigned short citiesToBeExchanged[2];
select_mutation(state, citiesToBeExchanged);
swap_cities(population + threadIdx.x, citiesToBeExchanged);
update_score(&population[threadIdx.x]);
}
__syncthreads();
delete_doublons(population, isDoublon, isUnseen, tailleBloc, indexDebutBloc);
if(population[threadIdx.x].isGonnaDie)
update_score(&population[threadIdx.x]);
__syncthreads();
merge_sort(population);
if (threadIdx.x == blockDim.x-1) {
printf("Best individual for island %d, generation %d, scores %f\n", blockIdx.x, i, population[blockDim.x-1].score);
}
//TODO replace with better specialized sort
}
}
__global__ void solve(Individu *migrants, int *g_paths) {
extern __shared__ Individu mem[];
Individu *population = mem;
int *isUnseen = (int *)&population[blockDim.x];
bool *isDoublon = (bool *)&isUnseen[N_CITIES];
hiprandState_t state;
hiprand_init(blockIdx.x * blockDim.x + threadIdx.x, 0, 0, &state);
random_init(population + threadIdx.x, &state);
update_score(population + threadIdx.x);
if (threadIdx.x == 0) {
//fill this block's migrant as soon as possible to be sure first migrant selection from another island won't get an uninitialized individual
migrants[blockIdx.x] = population[0];
}
__syncthreads();
merge_sort(population);
loop_generations(population, migrants, &state, isDoublon, isUnseen);
__syncthreads();
if(threadIdx.x == 0) {
for(int i = 0; i < N_CITIES; ++i) {
g_paths[blockIdx.x * N_CITIES + i] = population[blockDim.x - 1].pathIndexes[i];
}
}
} | 3d04a16cd30854dee63115f2ef7a2c72bd4f5688.cu | #include "solver.cuh"
__device__ void update_score(Individu *individu) {
double score = 0.f;
int prev_index = individu->pathIndexes[0];
for(int i = 1; i < N_CITIES; i++) {
int current_index = individu->pathIndexes[i];
score += pow(cities[current_index][0] - cities[prev_index][0], 2) + pow(cities[current_index][1] - cities[prev_index][1], 2);
prev_index = current_index;
}
individu->score = (float)score;
}
__device__ bool is_gonna_die(curandState_t *state){
float position = 1 - ((float)(threadIdx.x) / (blockDim.x - 1)); //first thread is 1.0, last is 0.0
float powK = pow(position, PROBA_K);
float probaToDie = 0.75f * powK;//(powK - (powK / (PROBA_K))) / PROBA_K;
return curand_uniform(state) < probaToDie;
}
__device__ bool is_mutating(curandState_t *state){
return curand_uniform(state) < PROBA_MUTATION;
}
__device__ void random_init(Individu *individu, curandState_t *state){
bool used[N_CITIES] = {false};
for (int i = 0 ; i < N_CITIES ; i++) {
unsigned short index = (unsigned short)(curand_uniform(state) * N_CITIES);
while (used[index])
index = (unsigned short)((index + 1) % N_CITIES);
used[index] = true;
individu->pathIndexes[i] = index;
}
}
__device__ Individu select_migrant(Individu *migrants, curandState_t *state) {
unsigned short index = (unsigned short)(curand_uniform(state) * N_ISLAND);
if (index == blockIdx.x)
index = (unsigned short)((index + 1) % N_ISLAND);
return migrants[index];
}
__device__ void select_mutation(curandState_t *state, unsigned short *mutation) {
mutation[0] = (unsigned short)(curand_uniform(state) * N_CITIES);
mutation[1] = (unsigned short)(curand_uniform(state) * N_CITIES);
if (mutation[1] == mutation[0])
mutation[1] = (unsigned short)((mutation[1] + 1) % N_CITIES);
}
__device__ void select_parents(curandState_t *state, int *parents, int numbersOfParents) {
int current_parent = 0;
while (current_parent < numbersOfParents) {
for(int i = blockDim.x - 1; i >= 0; --i) {
if(curand_uniform(state) < PROBA_SELECTION) {
parents[current_parent++] = i;
break;
}
}
}
}
__device__ void mix_parents(Individu *population, curandState_t *state, int replacedIndex, int *parents, int numbersOfParents) {
int chunkSize = ceil((float)N_CITIES / numbersOfParents);
int taken;
for (int citiesCount = 0 ; citiesCount < N_CITIES ; citiesCount += taken) {
int selected_parent = parents[curand(state) % numbersOfParents];//(chunkSize * 2)
taken = curand(state) % (chunkSize * 2);
if(citiesCount + taken > N_CITIES)
taken = N_CITIES - citiesCount; // si on dépasse, on prend le reste
for(int i = citiesCount; i < citiesCount + taken; ++i) {
population[replacedIndex].pathIndexes[i] = population[selected_parent].pathIndexes[i];
}
}
}
__device__ void swap_cities(Individu *ind, unsigned short *citiesIndex){
ind->pathIndexes[citiesIndex[0]] ^= ind->pathIndexes[citiesIndex[1]];
ind->pathIndexes[citiesIndex[1]] ^= ind->pathIndexes[citiesIndex[0]];
ind->pathIndexes[citiesIndex[0]] ^= ind->pathIndexes[citiesIndex[1]];
}
__device__ void print_path(Individu ind) {
for(int i = 0; i < N_CITIES; i++) {
printf("%2hu ", ind.pathIndexes[i]);
}
printf("\n");
}
__device__ void delete_doublons(Individu *population, bool *isDoublon, int *isUnseen, int tailleBloc, int indexDebutBloc) {
__shared__ int sem;
for(int currentIndividu = 0; currentIndividu < blockDim.x; ++currentIndividu) {
if(!population[currentIndividu].isGonnaDie)
continue;
if(threadIdx.x == 0)
sem = 0;
__syncthreads();
// Réinitialisation de isDoublon
for(int cityToCheck = indexDebutBloc; cityToCheck < indexDebutBloc + tailleBloc && cityToCheck < N_CITIES; ++cityToCheck) {
isDoublon[cityToCheck] = false;
}
__syncthreads(); // Every thread delete double in currentIndividu
for(int cityToCheck = indexDebutBloc; cityToCheck < indexDebutBloc + tailleBloc && cityToCheck < N_CITIES; ++cityToCheck) {
bool seen = false;
for(int currentCity = 0; currentCity < N_CITIES; ++currentCity) {
if(population[currentIndividu].pathIndexes[currentCity] == cityToCheck) {
if(seen)
isDoublon[currentCity] = true;
else
seen = true;
}
}
// Les threads peuvent s'occuper de 0 ou plusieurs villes
if(seen == false) {
int it = atomicAdd(&sem, 1);
isUnseen[it] = cityToCheck;
}
}
//TODO : shuffle unSeen ?
__syncthreads();
/*
//AFFICHAGE
if(threadIdx.x == 0) {
printf("\nIndividu %d\n", currentIndividu);
printPath(population[currentIndividu]);
for(int i = 0; i < N_CITIES; ++i) {
printf("%2d ", isDoublon[i]);
}
printf("\n");
for(int i = 0; i < N_CITIES; ++i) {
printf("%2d ", isUnseen[i]);
}
}
*/
// Both table are fully initialized, we replace all double
if(threadIdx.x == 0)
sem = 0;
__syncthreads();
for(int cityToCheck = indexDebutBloc; cityToCheck < indexDebutBloc + tailleBloc && cityToCheck < N_CITIES; ++cityToCheck) {
if(isDoublon[cityToCheck]) {
int it = atomicAdd(&sem, 1);
population[currentIndividu].pathIndexes[cityToCheck] = isUnseen[it];
}
}
}
}
__device__ void loop_generations(Individu *population, Individu *migrants, curandState_t *state, bool *isDoublon, int *isUnseen) {
int tailleBloc = ceil((float)N_CITIES / blockDim.x);
int indexDebutBloc = threadIdx.x * tailleBloc;
// Main generation loop
for(int i = 0; i < N_GENERATION ; i++) {
__syncthreads();
if (threadIdx.x == 0) {
migrants[blockIdx.x] = population[blockDim.x-1]; //export migrant
population[0] = select_migrant(migrants, state); //import migrant
}
population[threadIdx.x].isGonnaDie = false;
if(is_gonna_die(state)) {
population[threadIdx.x].isGonnaDie = true; // TODO : sync with atomicadd instead of struct member
int parents[3];
select_parents(state, parents, 3);
mix_parents(population, state, threadIdx.x, parents, 3);
} else if(is_mutating(state)) {
// printf("%d is mutating.\n", threadIdx.x);
unsigned short citiesToBeExchanged[2];
select_mutation(state, citiesToBeExchanged);
swap_cities(population + threadIdx.x, citiesToBeExchanged);
update_score(&population[threadIdx.x]);
}
__syncthreads();
delete_doublons(population, isDoublon, isUnseen, tailleBloc, indexDebutBloc);
if(population[threadIdx.x].isGonnaDie)
update_score(&population[threadIdx.x]);
__syncthreads();
merge_sort(population);
if (threadIdx.x == blockDim.x-1) {
printf("Best individual for island %d, generation %d, scores %f\n", blockIdx.x, i, population[blockDim.x-1].score);
}
//TODO replace with better specialized sort
}
}
__global__ void solve(Individu *migrants, int *g_paths) {
extern __shared__ Individu mem[];
Individu *population = mem;
int *isUnseen = (int *)&population[blockDim.x];
bool *isDoublon = (bool *)&isUnseen[N_CITIES];
curandState_t state;
curand_init(blockIdx.x * blockDim.x + threadIdx.x, 0, 0, &state);
random_init(population + threadIdx.x, &state);
update_score(population + threadIdx.x);
if (threadIdx.x == 0) {
//fill this block's migrant as soon as possible to be sure first migrant selection from another island won't get an uninitialized individual
migrants[blockIdx.x] = population[0];
}
__syncthreads();
merge_sort(population);
loop_generations(population, migrants, &state, isDoublon, isUnseen);
__syncthreads();
if(threadIdx.x == 0) {
for(int i = 0; i < N_CITIES; ++i) {
g_paths[blockIdx.x * N_CITIES + i] = population[blockDim.x - 1].pathIndexes[i];
}
}
} |
ccbda628b7eb8f6c1244a2f1c9af654b3836b828.hip | // !!! This is a file automatically generated by hipify!!!
#include "mex.h"
#include <hip/hip_runtime.h>
#include "kcDefs.h" //see for info on anything starting with KC_
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
int currentDevice, newDevice;
hipError_t ce;
hipGetDevice(¤tDevice);
mexPrintf("Current GPU device: %d\n",currentDevice);
if(nrhs == 0) {
ce = hipSetDevice(KC_GPU_DEVICE);
}
else {
ce = hipSetDevice((int)mxGetScalar(prhs[0]));
}
if(ce != hipSuccess) {
mexPrintf("Error selecting device ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
hipGetDevice(&newDevice);
mexPrintf("Changed to GPU device: %d\n",newDevice);
}
| ccbda628b7eb8f6c1244a2f1c9af654b3836b828.cu | #include "mex.h"
#include <cuda_runtime.h>
#include "kcDefs.h" //see for info on anything starting with KC_
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
int currentDevice, newDevice;
cudaError_t ce;
cudaGetDevice(¤tDevice);
mexPrintf("Current GPU device: %d\n",currentDevice);
if(nrhs == 0) {
ce = cudaSetDevice(KC_GPU_DEVICE);
}
else {
ce = cudaSetDevice((int)mxGetScalar(prhs[0]));
}
if(ce != cudaSuccess) {
mexPrintf("Error selecting device ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
cudaGetDevice(&newDevice);
mexPrintf("Changed to GPU device: %d\n",newDevice);
}
|
0077f73c41730c5e817e0d223cdc9bc47597e4f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/conv_shift_op.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
using framework::Tensor;
namespace {
inline int div_up(int x, int y) { return (x + y - 1) / y; }
// Some notes on the design:
//
// Each thread is responsible for computing a single output out[k, i].
// Thread blocks are based on tiles of x with height 1 in the batch dimension.
//
// This design is based on the typical use case where the filter
// y is fairly small. For large y, it would probably be more efficient
// to also tile across y.
template <typename T>
__global__ void conv_shift_forward(const T *x, const T *y, T *out, int x_width,
int y_width, int y_half_width,
int batch_size) {
extern __shared__ T mem[];
int tx = threadIdx.x;
int i = blockIdx.x * blockDim.x + tx; // global x index
int k = blockIdx.y; // batch index
// Check if we are in a boundary block with fewer x's to process than
// blockDim.x.
int num_x =
(blockIdx.x == gridDim.x - 1) ? (x_width % blockDim.x) : blockDim.x;
T *sx = mem;
T *sx_pad = &mem[num_x];
T *sy = &mem[blockDim.x + y_width];
// Collaboratively load y[k, :] and length-y padding of x into shared memory.
int pad_start = blockIdx.x * blockDim.x + num_x + x_width - y_half_width;
for (int j = tx; j < y_width; j += blockDim.x) {
sy[j] = y[k * y_width + j];
sx_pad[j] = x[k * x_width + (pad_start + j) % x_width];
}
// Load a cyclically shifted slice of x into shared memory.
if (tx < num_x) {
int load_i = (i - y_half_width + x_width) % x_width;
sx[tx] = x[k * x_width + load_i];
} else {
return;
}
__syncthreads();
// Compute dot product of sx[tx:tx + y_width] and sy.
T sum = 0;
for (int j = 0; j < y_width; ++j) {
sum += sx[tx + j] * sy[j];
}
// Save to out[k, i].
out[k * x_width + i] = sum;
}
// Compute x gradient - initial naive implementation with atomic add.
template <typename T>
__global__ void conv_shift_dx(const T *dout, const T *y, T *dx, int x_width,
int y_width, int y_half_width, int batch_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // x index
int j = blockIdx.y; // y index
int k = blockIdx.z; // batch index
if (i < x_width) {
int index = (i + j - y_half_width + x_width) % x_width;
atomicAdd(&dx[k * x_width + index],
dout[k * x_width + i] * y[k * y_width + j]);
}
}
// Compute y gradient - initial naive implementation with atomic add.
template <typename T>
__global__ void conv_shift_dy(const T *x, const T *dout, T *dy, int x_width,
int y_width, int y_half_width, int batch_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // x index
int j = blockIdx.y; // y index
int k = blockIdx.z; // batch index
if (i < x_width) {
int index = (i + j - y_half_width + x_width) % x_width;
atomicAdd(&dy[k * y_width + j],
x[k * x_width + index] * dout[k * x_width + i]);
}
}
} // namespace
template <typename T>
class ConvShiftKernel<platform::GPUPlace, T> : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const Tensor *X = context.Input<Tensor>("X");
const Tensor *Y = context.Input<Tensor>("Y");
Tensor *Out = context.Output<Tensor>("Out");
const T *x_data = X->data<T>();
const T *y_data = Y->data<T>();
T *out_data = Out->mutable_data<T>(context.GetPlace());
int batch_size = X->dims()[0];
int x_width = X->dims()[1];
int y_width = Y->dims()[1];
int y_half_width = (y_width - 1) / 2;
const int x_per_block = 256;
int num_x_blocks = div_up(x_width, x_per_block);
int mem_per_block = (x_per_block + 2 * y_width) * sizeof(T);
dim3 grid_dim(num_x_blocks, batch_size);
auto stream = reinterpret_cast<const platform::CUDADeviceContext &>(
context.device_context())
.stream();
hipLaunchKernelGGL(( conv_shift_forward<T>), dim3(grid_dim), dim3(x_per_block), mem_per_block, stream,
x_data, y_data, out_data, x_width, y_width, y_half_width, batch_size);
}
};
template <typename T>
class ConvShiftGradKernel<platform::GPUPlace, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const Tensor *X = context.Input<Tensor>("X");
const Tensor *Y = context.Input<Tensor>("Y");
const Tensor *dOut = context.Input<Tensor>(framework::GradVarName("Out"));
const T *x_data = X->data<T>();
const T *y_data = Y->data<T>();
const T *dout_data = dOut->data<T>();
Tensor *dX = context.Output<Tensor>(framework::GradVarName("X"));
Tensor *dY = context.Output<Tensor>(framework::GradVarName("Y"));
int batch_size = X->dims()[0];
int x_width = X->dims()[1];
int y_width = Y->dims()[1];
int y_half_width = (y_width - 1) / 2;
auto stream = reinterpret_cast<const platform::CUDADeviceContext &>(
context.device_context())
.stream();
const int x_per_block = 256;
int num_x_blocks = div_up(x_width, x_per_block);
dim3 grid_dim(num_x_blocks, y_width, batch_size);
if (dX) {
T *dx_data = dX->mutable_data<T>(context.GetPlace());
hipMemsetAsync(dx_data, 0, dX->numel() * sizeof(T), stream);
hipLaunchKernelGGL(( conv_shift_dx<T>), dim3(grid_dim), dim3(x_per_block), 0, stream,
dout_data, y_data, dx_data, x_width, y_width, y_half_width,
batch_size);
}
if (dY) {
T *dy_data = dY->mutable_data<T>(context.GetPlace());
hipMemsetAsync(dy_data, 0, dY->numel() * sizeof(T), stream);
hipLaunchKernelGGL(( conv_shift_dy<T>), dim3(grid_dim), dim3(x_per_block), 0, stream,
x_data, dout_data, dy_data, x_width, y_width, y_half_width,
batch_size);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(conv_shift,
ops::ConvShiftKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
conv_shift_grad,
ops::ConvShiftGradKernel<paddle::platform::GPUPlace, float>);
| 0077f73c41730c5e817e0d223cdc9bc47597e4f9.cu | /* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/conv_shift_op.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
using framework::Tensor;
namespace {
inline int div_up(int x, int y) { return (x + y - 1) / y; }
// Some notes on the design:
//
// Each thread is responsible for computing a single output out[k, i].
// Thread blocks are based on tiles of x with height 1 in the batch dimension.
//
// This design is based on the typical use case where the filter
// y is fairly small. For large y, it would probably be more efficient
// to also tile across y.
template <typename T>
__global__ void conv_shift_forward(const T *x, const T *y, T *out, int x_width,
int y_width, int y_half_width,
int batch_size) {
extern __shared__ T mem[];
int tx = threadIdx.x;
int i = blockIdx.x * blockDim.x + tx; // global x index
int k = blockIdx.y; // batch index
// Check if we are in a boundary block with fewer x's to process than
// blockDim.x.
int num_x =
(blockIdx.x == gridDim.x - 1) ? (x_width % blockDim.x) : blockDim.x;
T *sx = mem;
T *sx_pad = &mem[num_x];
T *sy = &mem[blockDim.x + y_width];
// Collaboratively load y[k, :] and length-y padding of x into shared memory.
int pad_start = blockIdx.x * blockDim.x + num_x + x_width - y_half_width;
for (int j = tx; j < y_width; j += blockDim.x) {
sy[j] = y[k * y_width + j];
sx_pad[j] = x[k * x_width + (pad_start + j) % x_width];
}
// Load a cyclically shifted slice of x into shared memory.
if (tx < num_x) {
int load_i = (i - y_half_width + x_width) % x_width;
sx[tx] = x[k * x_width + load_i];
} else {
return;
}
__syncthreads();
// Compute dot product of sx[tx:tx + y_width] and sy.
T sum = 0;
for (int j = 0; j < y_width; ++j) {
sum += sx[tx + j] * sy[j];
}
// Save to out[k, i].
out[k * x_width + i] = sum;
}
// Compute x gradient - initial naive implementation with atomic add.
template <typename T>
__global__ void conv_shift_dx(const T *dout, const T *y, T *dx, int x_width,
int y_width, int y_half_width, int batch_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // x index
int j = blockIdx.y; // y index
int k = blockIdx.z; // batch index
if (i < x_width) {
int index = (i + j - y_half_width + x_width) % x_width;
atomicAdd(&dx[k * x_width + index],
dout[k * x_width + i] * y[k * y_width + j]);
}
}
// Compute y gradient - initial naive implementation with atomic add.
template <typename T>
__global__ void conv_shift_dy(const T *x, const T *dout, T *dy, int x_width,
int y_width, int y_half_width, int batch_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // x index
int j = blockIdx.y; // y index
int k = blockIdx.z; // batch index
if (i < x_width) {
int index = (i + j - y_half_width + x_width) % x_width;
atomicAdd(&dy[k * y_width + j],
x[k * x_width + index] * dout[k * x_width + i]);
}
}
} // namespace
template <typename T>
class ConvShiftKernel<platform::GPUPlace, T> : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const Tensor *X = context.Input<Tensor>("X");
const Tensor *Y = context.Input<Tensor>("Y");
Tensor *Out = context.Output<Tensor>("Out");
const T *x_data = X->data<T>();
const T *y_data = Y->data<T>();
T *out_data = Out->mutable_data<T>(context.GetPlace());
int batch_size = X->dims()[0];
int x_width = X->dims()[1];
int y_width = Y->dims()[1];
int y_half_width = (y_width - 1) / 2;
const int x_per_block = 256;
int num_x_blocks = div_up(x_width, x_per_block);
int mem_per_block = (x_per_block + 2 * y_width) * sizeof(T);
dim3 grid_dim(num_x_blocks, batch_size);
auto stream = reinterpret_cast<const platform::CUDADeviceContext &>(
context.device_context())
.stream();
conv_shift_forward<T><<<grid_dim, x_per_block, mem_per_block, stream>>>(
x_data, y_data, out_data, x_width, y_width, y_half_width, batch_size);
}
};
template <typename T>
class ConvShiftGradKernel<platform::GPUPlace, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const Tensor *X = context.Input<Tensor>("X");
const Tensor *Y = context.Input<Tensor>("Y");
const Tensor *dOut = context.Input<Tensor>(framework::GradVarName("Out"));
const T *x_data = X->data<T>();
const T *y_data = Y->data<T>();
const T *dout_data = dOut->data<T>();
Tensor *dX = context.Output<Tensor>(framework::GradVarName("X"));
Tensor *dY = context.Output<Tensor>(framework::GradVarName("Y"));
int batch_size = X->dims()[0];
int x_width = X->dims()[1];
int y_width = Y->dims()[1];
int y_half_width = (y_width - 1) / 2;
auto stream = reinterpret_cast<const platform::CUDADeviceContext &>(
context.device_context())
.stream();
const int x_per_block = 256;
int num_x_blocks = div_up(x_width, x_per_block);
dim3 grid_dim(num_x_blocks, y_width, batch_size);
if (dX) {
T *dx_data = dX->mutable_data<T>(context.GetPlace());
cudaMemsetAsync(dx_data, 0, dX->numel() * sizeof(T), stream);
conv_shift_dx<T><<<grid_dim, x_per_block, 0, stream>>>(
dout_data, y_data, dx_data, x_width, y_width, y_half_width,
batch_size);
}
if (dY) {
T *dy_data = dY->mutable_data<T>(context.GetPlace());
cudaMemsetAsync(dy_data, 0, dY->numel() * sizeof(T), stream);
conv_shift_dy<T><<<grid_dim, x_per_block, 0, stream>>>(
x_data, dout_data, dy_data, x_width, y_width, y_half_width,
batch_size);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(conv_shift,
ops::ConvShiftKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
conv_shift_grad,
ops::ConvShiftGradKernel<paddle::platform::GPUPlace, float>);
|
75bc10155cb76bd66606cec6859b6a2b7c0e9bcd.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <rocblas.h>
#include "helper_cuda.h"
#include "utils.cuh"
int main()
{
int M, N, K;
M = 4;
N = 5;
K = 6;
srand(2019);
// initialize host buffers
helper::CBuffer<half> inputMatrix1, inputMatrix2;
helper::CBuffer<float> outputMatrix;
float alpha, beta;
inputMatrix1.init(K * M, true);
inputMatrix2.init(N * K, true);
outputMatrix.init(N * M, true);
bool tensor_core = false;
// create cuda event handles
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// create cublas handle
hipblasHandle_t cublas_handle;
checkCudaErrors(
hipblasCreate(&cublas_handle));
int print_threshold = 12;
if (M < print_threshold && N < print_threshold && K < print_threshold) {
std::cout << "inputMatrix1:" << std::endl;
helper::printMatrix(inputMatrix1.h_ptr_, K, M);
std::cout << "inputMatrix2:" << std::endl;
helper::printMatrix(inputMatrix2.h_ptr_, N, K);
std::cout << "outputMatrix:" << std::endl;
helper::printMatrix(outputMatrix.h_ptr_, N, M);
}
alpha = 1.f;
beta = 0.f;
// determin data type information for GemmEx()
hipDataType TYPE_A, TYPE_B, TYPE_C;
if (typeid(*inputMatrix1.h_ptr_) == typeid(float)) {
TYPE_A = TYPE_B = HIP_R_32F;
}
else if (typeid(*inputMatrix1.h_ptr_) == typeid(half)) {
TYPE_A = TYPE_B = HIP_R_16F;
}
else if (typeid(*inputMatrix1.h_ptr_) == typeid(int8_t)) {
TYPE_A = TYPE_B = HIP_R_8I;
}
else {
printf("Not supported precision\n");
return -1;
}
if (typeid(*outputMatrix.h_ptr_) == typeid(float)) {
TYPE_C = HIP_R_32F;
}
else if (typeid(*outputMatrix.h_ptr_) == typeid(int)) {
TYPE_C = HIP_R_32I;
}
else {
printf("Not supported precision\n");
return -1;
}
// allocate GPU memory and copy the data
inputMatrix1.cuda(true);
inputMatrix2.cuda(true);
outputMatrix.cuda(true);
// enables tensorcore operation when it is possible
// checkCudaErrors(
// cublasSetMathMode(cublas_handle, CUBLAS_TENSOR_OP_MATH));
hipEventRecord(start);
checkCudaErrors(
hipblasGemmEx(cublas_handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
M, N, K,
&alpha,
inputMatrix1.d_ptr_, TYPE_A, M,
inputMatrix2.d_ptr_, TYPE_B, K,
&beta,
outputMatrix.d_ptr_, TYPE_C, M,
TYPE_C,
(tensor_core) ? CUBLAS_GEMM_DEFAULT_TENSOR_OP : HIPBLAS_GEMM_DEFAULT));
hipEventRecord(stop);
outputMatrix.copyToHost();
if (M < print_threshold && N < print_threshold && K < print_threshold) {
std::cout << "outputMatrix out:" << std::endl;
helper::printMatrix(outputMatrix.h_ptr_, N, M);
}
// print out elapsed time
float cudaElapsedTime;
hipEventElapsedTime(&cudaElapsedTime, start, stop);
std::cout << std::setw(4) << cudaElapsedTime << " ms" << std::endl;
checkCudaErrors(
hipblasDestroy(cublas_handle));
return 0;
}
| 75bc10155cb76bd66606cec6859b6a2b7c0e9bcd.cu | #include <iostream>
#include <iomanip>
#include <cublas_v2.h>
#include "helper_cuda.h"
#include "utils.cuh"
int main()
{
int M, N, K;
M = 4;
N = 5;
K = 6;
srand(2019);
// initialize host buffers
helper::CBuffer<half> inputMatrix1, inputMatrix2;
helper::CBuffer<float> outputMatrix;
float alpha, beta;
inputMatrix1.init(K * M, true);
inputMatrix2.init(N * K, true);
outputMatrix.init(N * M, true);
bool tensor_core = false;
// create cuda event handles
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// create cublas handle
cublasHandle_t cublas_handle;
checkCudaErrors(
cublasCreate(&cublas_handle));
int print_threshold = 12;
if (M < print_threshold && N < print_threshold && K < print_threshold) {
std::cout << "inputMatrix1:" << std::endl;
helper::printMatrix(inputMatrix1.h_ptr_, K, M);
std::cout << "inputMatrix2:" << std::endl;
helper::printMatrix(inputMatrix2.h_ptr_, N, K);
std::cout << "outputMatrix:" << std::endl;
helper::printMatrix(outputMatrix.h_ptr_, N, M);
}
alpha = 1.f;
beta = 0.f;
// determin data type information for GemmEx()
cudaDataType TYPE_A, TYPE_B, TYPE_C;
if (typeid(*inputMatrix1.h_ptr_) == typeid(float)) {
TYPE_A = TYPE_B = CUDA_R_32F;
}
else if (typeid(*inputMatrix1.h_ptr_) == typeid(half)) {
TYPE_A = TYPE_B = CUDA_R_16F;
}
else if (typeid(*inputMatrix1.h_ptr_) == typeid(int8_t)) {
TYPE_A = TYPE_B = CUDA_R_8I;
}
else {
printf("Not supported precision\n");
return -1;
}
if (typeid(*outputMatrix.h_ptr_) == typeid(float)) {
TYPE_C = CUDA_R_32F;
}
else if (typeid(*outputMatrix.h_ptr_) == typeid(int)) {
TYPE_C = CUDA_R_32I;
}
else {
printf("Not supported precision\n");
return -1;
}
// allocate GPU memory and copy the data
inputMatrix1.cuda(true);
inputMatrix2.cuda(true);
outputMatrix.cuda(true);
// enables tensorcore operation when it is possible
// checkCudaErrors(
// cublasSetMathMode(cublas_handle, CUBLAS_TENSOR_OP_MATH));
cudaEventRecord(start);
checkCudaErrors(
cublasGemmEx(cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
M, N, K,
&alpha,
inputMatrix1.d_ptr_, TYPE_A, M,
inputMatrix2.d_ptr_, TYPE_B, K,
&beta,
outputMatrix.d_ptr_, TYPE_C, M,
TYPE_C,
(tensor_core) ? CUBLAS_GEMM_DEFAULT_TENSOR_OP : CUBLAS_GEMM_DEFAULT));
cudaEventRecord(stop);
outputMatrix.copyToHost();
if (M < print_threshold && N < print_threshold && K < print_threshold) {
std::cout << "outputMatrix out:" << std::endl;
helper::printMatrix(outputMatrix.h_ptr_, N, M);
}
// print out elapsed time
float cudaElapsedTime;
cudaEventElapsedTime(&cudaElapsedTime, start, stop);
std::cout << std::setw(4) << cudaElapsedTime << " ms" << std::endl;
checkCudaErrors(
cublasDestroy(cublas_handle));
return 0;
}
|
26d9860abedeb870f9340b93756d89ad6665ab08.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_pos_coherent;
glm::vec3 *dev_vel_coherent;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_pointer_cast<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_pointer_cast<int>(dev_particleGridIndices);
hipMalloc((void**)&dev_pos_coherent, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos_coherent failed!");
hipMalloc((void**)&dev_vel_coherent, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel_coherent failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 perceivedCen = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 c = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedVel = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 change = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 thisPos = pos[iSelf];
glm::vec3 thisVel = vel[iSelf];
int neighbor1 = 0;
int neighbor2 = 0;
for (int i = 0; i < N; i++) {
if (iSelf == i) {
continue;
}
float dist = glm::distance(pos[i], thisPos);
if (dist < rule1Distance) {
neighbor1++;
perceivedCen += pos[i];
}
if (dist < rule2Distance) {
c -= pos[i] - thisPos;
}
if (dist < rule3Distance) {
neighbor2++;
perceivedVel += vel[i];
}
}
if (neighbor1 > 0) {
perceivedCen /= neighbor1;
change += (perceivedCen - thisPos) * rule1Scale;
}
if (neighbor2 > 0) {
perceivedVel /= neighbor2;
change += perceivedVel * rule3Scale;
}
change += c * rule2Scale;
return change;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its velocity based on its current velocity and position.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 newVel = computeVelocityChange(N, index, pos, vel1) + vel1[index];
float speed = glm::length(newVel);
if (speed > maxSpeed) {
newVel = newVel / speed * maxSpeed;
}
vel2[index] = newVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
indices[index] = index;
int x = floor((pos[index].x - gridMin.x)) * inverseCellWidth;
int y = floor((pos[index].y - gridMin.y)) * inverseCellWidth;
int z = floor((pos[index].z - gridMin.z)) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(x, y, z, gridResolution);
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int current = particleGridIndices[index];
if (index >= N) {
return;
}
else if (index == 0) {
gridCellStartIndices[current] = 0;
}
else if (index > 0 && index < N - 1) {
int previous = particleGridIndices[index - 1];
if (previous != current) {
gridCellStartIndices[current] = index;
gridCellEndIndices[previous] = index-1;
}
}
else {
int previous = particleGridIndices[index - 1];
if (previous != current) {
gridCellStartIndices[current] = index;
gridCellEndIndices[previous] = index - 1;
}
gridCellEndIndices[current] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
glm::vec3 perceivedCen(0.0f, 0.0f, 0.0f);
glm::vec3 c(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedVel(0.0f, 0.0f, 0.0f);
glm::vec3 newVel(0.0f, 0.0f, 0.0f);
float halfCellWidth = cellWidth / 2;
int left = imax(floor(thisPos.x - gridMin.x - halfCellWidth) * inverseCellWidth, 0);
int right = imin(floor(thisPos.x - gridMin.x + halfCellWidth) * inverseCellWidth, gridResolution-1);
int down = imax(floor(thisPos.y - gridMin.y - halfCellWidth) * inverseCellWidth, 0);
int top = imin(floor(thisPos.y - gridMin.y + halfCellWidth) * inverseCellWidth, gridResolution - 1);
int back = imax(floor(thisPos.z - gridMin.z - halfCellWidth) * inverseCellWidth, 0);
int front = imin(floor(thisPos.z - gridMin.z + halfCellWidth) * inverseCellWidth, gridResolution - 1);
int neighbor1 = 0;
int neighbor2 = 0;
for (int i = back; i <= front; i++) {
for (int j = down; j <= top; j++) {
for (int k = left; k <= right; k++) {
int idx = gridIndex3Dto1D(k, j, i, gridResolution);
int start = gridCellStartIndices[idx];
if (start < 0) {
continue;
}
int end = gridCellEndIndices[idx];
for (int m = start; m <= end;m++) {
int boid = particleArrayIndices[m];
if (boid == index) {continue;}
glm::vec3 thatPos = pos[boid];
float dist = glm::distance(thisPos, thatPos);
// rule 1
if (dist < rule1Distance) {
neighbor1++;
perceivedCen += thatPos;
}
// rule 2
if (dist < rule2Distance) {
c -= thatPos - thisPos;
}
// rule 3
if (dist < rule3Distance) {
neighbor2++;
perceivedVel += vel1[boid];
}
}
}
}
}
if (neighbor1 > 0) {
perceivedCen /= neighbor1;
newVel += (perceivedCen - thisPos) * rule1Scale;
}
if (neighbor2 > 0) {
perceivedVel /= neighbor2;
newVel += perceivedVel * rule3Scale;
}
newVel += c * rule2Scale + vel1[index];
float speed = glm::length(newVel);
if (speed > maxSpeed) {
newVel = newVel / speed * maxSpeed;
}
vel2[index] = newVel;
}
__global__ void kernReorder(int N,
int *particleArrayIndices, glm::vec3 *pos,
glm::vec3 *vel, glm::vec3 *pos_coherent, glm::vec3 *vel_coherent) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
pos_coherent[index] = pos[particleArrayIndices[index]];
vel_coherent[index] = vel[particleArrayIndices[index]];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
glm::vec3 perceivedCen(0.0f, 0.0f, 0.0f);
glm::vec3 c(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedVel(0.0f, 0.0f, 0.0f);
glm::vec3 newVel(0.0f, 0.0f, 0.0f);
float halfCellWidth = cellWidth / 2;
int left = imax(floor(thisPos.x - gridMin.x - halfCellWidth) * inverseCellWidth, 0);
int right = imin(floor(thisPos.x - gridMin.x + halfCellWidth) * inverseCellWidth, gridResolution - 1);
int down = imax(floor(thisPos.y - gridMin.y - halfCellWidth) * inverseCellWidth, 0);
int top = imin(floor(thisPos.y - gridMin.y + halfCellWidth) * inverseCellWidth, gridResolution - 1);
int back = imax(floor(thisPos.z - gridMin.z - halfCellWidth) * inverseCellWidth, 0);
int front = imin(floor(thisPos.z - gridMin.z + halfCellWidth) * inverseCellWidth, gridResolution - 1);
int neighbor1 = 0;
int neighbor2 = 0;
for (int i = back; i <= front; i++) {
for (int j = down; j <= top; j++) {
for (int k = left; k <= right; k++) {
int idx = gridIndex3Dto1D(k, j, i, gridResolution);
int start = gridCellStartIndices[idx];
if (start < 0) {
continue;
}
int end = gridCellEndIndices[idx];
for (int m = start; m <= end; m++) {
if (m == index) {
continue;
}
glm::vec3 thatPos = pos[m];
float dist = glm::distance(thisPos, thatPos);
// rule 1
if (dist < rule1Distance) {
neighbor1++;
perceivedCen += thatPos;
}
// rule 2
if (dist < rule2Distance) {
c -= thatPos - thisPos;
}
// rule 3
if (dist < rule3Distance) {
neighbor2++;
perceivedVel += vel1[m];
}
}
}
}
}
if (neighbor1 > 0) {
perceivedCen /= neighbor1;
newVel += (perceivedCen - thisPos) * rule1Scale;
}
if (neighbor2 > 0) {
perceivedVel /= neighbor2;
newVel += perceivedVel * rule3Scale;
}
newVel += c * rule2Scale + vel1[index];
float speed = glm::length(newVel);
if (speed > maxSpeed) {
newVel = newVel / speed * maxSpeed;
}
vel2[index] = newVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce) , dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
hipLaunchKernelGGL(( kernUpdatePos) , dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid1((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <fullBlocksPerGrid1, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
kernResetIntBuffer << <fullBlocksPerGrid1, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
dim3 fullBlocksPerGrid2((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid2, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernIdentifyCellStartEnd << <fullBlocksPerGrid2, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid2, blockSize >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos << <fullBlocksPerGrid2, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid1((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <fullBlocksPerGrid1, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
kernResetIntBuffer << <fullBlocksPerGrid1, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
dim3 fullBlocksPerGrid2((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid2, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernIdentifyCellStartEnd << <fullBlocksPerGrid2, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernReorder << <fullBlocksPerGrid2, blockSize >> > (numObjects, dev_particleArrayIndices, dev_pos,
dev_vel1, dev_pos_coherent, dev_vel_coherent);
checkCUDAErrorWithLine("kernReorder failed!");
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid2, blockSize >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos_coherent, dev_vel_coherent, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos << <fullBlocksPerGrid2, blockSize >> > (numObjects, dt, dev_pos_coherent, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
hipMemcpy(dev_pos, dev_pos_coherent, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleGridIndices);
hipFree(dev_particleArrayIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_pos_coherent);
hipFree(dev_vel_coherent);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| 26d9860abedeb870f9340b93756d89ad6665ab08.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_pos_coherent;
glm::vec3 *dev_vel_coherent;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_pointer_cast<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_pointer_cast<int>(dev_particleGridIndices);
cudaMalloc((void**)&dev_pos_coherent, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos_coherent failed!");
cudaMalloc((void**)&dev_vel_coherent, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel_coherent failed!");
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 perceivedCen = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 c = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedVel = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 change = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 thisPos = pos[iSelf];
glm::vec3 thisVel = vel[iSelf];
int neighbor1 = 0;
int neighbor2 = 0;
for (int i = 0; i < N; i++) {
if (iSelf == i) {
continue;
}
float dist = glm::distance(pos[i], thisPos);
if (dist < rule1Distance) {
neighbor1++;
perceivedCen += pos[i];
}
if (dist < rule2Distance) {
c -= pos[i] - thisPos;
}
if (dist < rule3Distance) {
neighbor2++;
perceivedVel += vel[i];
}
}
if (neighbor1 > 0) {
perceivedCen /= neighbor1;
change += (perceivedCen - thisPos) * rule1Scale;
}
if (neighbor2 > 0) {
perceivedVel /= neighbor2;
change += perceivedVel * rule3Scale;
}
change += c * rule2Scale;
return change;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its velocity based on its current velocity and position.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 newVel = computeVelocityChange(N, index, pos, vel1) + vel1[index];
float speed = glm::length(newVel);
if (speed > maxSpeed) {
newVel = newVel / speed * maxSpeed;
}
vel2[index] = newVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
indices[index] = index;
int x = floor((pos[index].x - gridMin.x)) * inverseCellWidth;
int y = floor((pos[index].y - gridMin.y)) * inverseCellWidth;
int z = floor((pos[index].z - gridMin.z)) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(x, y, z, gridResolution);
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int current = particleGridIndices[index];
if (index >= N) {
return;
}
else if (index == 0) {
gridCellStartIndices[current] = 0;
}
else if (index > 0 && index < N - 1) {
int previous = particleGridIndices[index - 1];
if (previous != current) {
gridCellStartIndices[current] = index;
gridCellEndIndices[previous] = index-1;
}
}
else {
int previous = particleGridIndices[index - 1];
if (previous != current) {
gridCellStartIndices[current] = index;
gridCellEndIndices[previous] = index - 1;
}
gridCellEndIndices[current] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
glm::vec3 perceivedCen(0.0f, 0.0f, 0.0f);
glm::vec3 c(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedVel(0.0f, 0.0f, 0.0f);
glm::vec3 newVel(0.0f, 0.0f, 0.0f);
float halfCellWidth = cellWidth / 2;
int left = imax(floor(thisPos.x - gridMin.x - halfCellWidth) * inverseCellWidth, 0);
int right = imin(floor(thisPos.x - gridMin.x + halfCellWidth) * inverseCellWidth, gridResolution-1);
int down = imax(floor(thisPos.y - gridMin.y - halfCellWidth) * inverseCellWidth, 0);
int top = imin(floor(thisPos.y - gridMin.y + halfCellWidth) * inverseCellWidth, gridResolution - 1);
int back = imax(floor(thisPos.z - gridMin.z - halfCellWidth) * inverseCellWidth, 0);
int front = imin(floor(thisPos.z - gridMin.z + halfCellWidth) * inverseCellWidth, gridResolution - 1);
int neighbor1 = 0;
int neighbor2 = 0;
for (int i = back; i <= front; i++) {
for (int j = down; j <= top; j++) {
for (int k = left; k <= right; k++) {
int idx = gridIndex3Dto1D(k, j, i, gridResolution);
int start = gridCellStartIndices[idx];
if (start < 0) {
continue;
}
int end = gridCellEndIndices[idx];
for (int m = start; m <= end;m++) {
int boid = particleArrayIndices[m];
if (boid == index) {continue;}
glm::vec3 thatPos = pos[boid];
float dist = glm::distance(thisPos, thatPos);
// rule 1
if (dist < rule1Distance) {
neighbor1++;
perceivedCen += thatPos;
}
// rule 2
if (dist < rule2Distance) {
c -= thatPos - thisPos;
}
// rule 3
if (dist < rule3Distance) {
neighbor2++;
perceivedVel += vel1[boid];
}
}
}
}
}
if (neighbor1 > 0) {
perceivedCen /= neighbor1;
newVel += (perceivedCen - thisPos) * rule1Scale;
}
if (neighbor2 > 0) {
perceivedVel /= neighbor2;
newVel += perceivedVel * rule3Scale;
}
newVel += c * rule2Scale + vel1[index];
float speed = glm::length(newVel);
if (speed > maxSpeed) {
newVel = newVel / speed * maxSpeed;
}
vel2[index] = newVel;
}
__global__ void kernReorder(int N,
int *particleArrayIndices, glm::vec3 *pos,
glm::vec3 *vel, glm::vec3 *pos_coherent, glm::vec3 *vel_coherent) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
pos_coherent[index] = pos[particleArrayIndices[index]];
vel_coherent[index] = vel[particleArrayIndices[index]];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
glm::vec3 perceivedCen(0.0f, 0.0f, 0.0f);
glm::vec3 c(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedVel(0.0f, 0.0f, 0.0f);
glm::vec3 newVel(0.0f, 0.0f, 0.0f);
float halfCellWidth = cellWidth / 2;
int left = imax(floor(thisPos.x - gridMin.x - halfCellWidth) * inverseCellWidth, 0);
int right = imin(floor(thisPos.x - gridMin.x + halfCellWidth) * inverseCellWidth, gridResolution - 1);
int down = imax(floor(thisPos.y - gridMin.y - halfCellWidth) * inverseCellWidth, 0);
int top = imin(floor(thisPos.y - gridMin.y + halfCellWidth) * inverseCellWidth, gridResolution - 1);
int back = imax(floor(thisPos.z - gridMin.z - halfCellWidth) * inverseCellWidth, 0);
int front = imin(floor(thisPos.z - gridMin.z + halfCellWidth) * inverseCellWidth, gridResolution - 1);
int neighbor1 = 0;
int neighbor2 = 0;
for (int i = back; i <= front; i++) {
for (int j = down; j <= top; j++) {
for (int k = left; k <= right; k++) {
int idx = gridIndex3Dto1D(k, j, i, gridResolution);
int start = gridCellStartIndices[idx];
if (start < 0) {
continue;
}
int end = gridCellEndIndices[idx];
for (int m = start; m <= end; m++) {
if (m == index) {
continue;
}
glm::vec3 thatPos = pos[m];
float dist = glm::distance(thisPos, thatPos);
// rule 1
if (dist < rule1Distance) {
neighbor1++;
perceivedCen += thatPos;
}
// rule 2
if (dist < rule2Distance) {
c -= thatPos - thisPos;
}
// rule 3
if (dist < rule3Distance) {
neighbor2++;
perceivedVel += vel1[m];
}
}
}
}
}
if (neighbor1 > 0) {
perceivedCen /= neighbor1;
newVel += (perceivedCen - thisPos) * rule1Scale;
}
if (neighbor2 > 0) {
perceivedVel /= neighbor2;
newVel += perceivedVel * rule3Scale;
}
newVel += c * rule2Scale + vel1[index];
float speed = glm::length(newVel);
if (speed > maxSpeed) {
newVel = newVel / speed * maxSpeed;
}
vel2[index] = newVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce <<<fullBlocksPerGrid, blockSize>>> (numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
kernUpdatePos <<<fullBlocksPerGrid, blockSize>>> (numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid1((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <fullBlocksPerGrid1, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
kernResetIntBuffer << <fullBlocksPerGrid1, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
dim3 fullBlocksPerGrid2((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid2, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernIdentifyCellStartEnd << <fullBlocksPerGrid2, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid2, blockSize >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos << <fullBlocksPerGrid2, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid1((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <fullBlocksPerGrid1, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
kernResetIntBuffer << <fullBlocksPerGrid1, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
dim3 fullBlocksPerGrid2((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid2, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernIdentifyCellStartEnd << <fullBlocksPerGrid2, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernReorder << <fullBlocksPerGrid2, blockSize >> > (numObjects, dev_particleArrayIndices, dev_pos,
dev_vel1, dev_pos_coherent, dev_vel_coherent);
checkCUDAErrorWithLine("kernReorder failed!");
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid2, blockSize >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos_coherent, dev_vel_coherent, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos << <fullBlocksPerGrid2, blockSize >> > (numObjects, dt, dev_pos_coherent, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_pos, dev_pos_coherent, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleGridIndices);
cudaFree(dev_particleArrayIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_pos_coherent);
cudaFree(dev_vel_coherent);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
40d6b76c686d5a1a245ef0c285574cc169c57090.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "additionally.h"
#include "gpu.h"
extern int gpu_index;
#define BLOCK 512
void pull_batchnorm_layer(layer l) {} // not required now
void push_batchnorm_layer(layer l) {} // not required now
void pull_local_layer(local_layer l) {} // not required now
void push_local_layer(local_layer l) {} // not required now
void pull_connected_layer(local_layer l) {} // not required now
void push_connected_layer(local_layer l) {} // not required now
void check_error(hipError_t status)
{
//hipDeviceSynchronize();
hipError_t status2 = hipGetLastError();
if (status != hipSuccess)
{
const char *s = hipGetErrorString(status);
char buffer[256];
printf("CUDA Error: %s\n", s);
assert(0);
snprintf(buffer, 256, "CUDA Error: %s", s);
error(buffer);
}
if (status2 != hipSuccess)
{
const char *s = hipGetErrorString(status);
char buffer[256];
printf("CUDA Error Prev: %s\n", s);
assert(0);
snprintf(buffer, 256, "CUDA Error Prev: %s", s);
error(buffer);
}
}
void cuda_set_device(int n)
{
gpu_index = n;
hipError_t status = hipSetDevice(n);
check_error(status);
}
int cuda_get_device()
{
int n = 0;
hipError_t status = hipGetDevice(&n);
check_error(status);
return n;
}
#ifdef CUDNN
cudnnHandle_t cudnn_handle()
{
static int init[16] = { 0 };
static cudnnHandle_t handle[16];
int i = cuda_get_device();
if (!init[i]) {
cudnnCreate(&handle[i]);
init[i] = 1;
}
return handle[i];
}
#endif
float *cuda_make_array(float *x, size_t n)
{
float *x_gpu;
size_t size = sizeof(float)*n;
hipError_t status = hipMalloc((void **)&x_gpu, size);
check_error(status);
if (x) {
status = hipMemcpy(x_gpu, x, size, hipMemcpyHostToDevice);
check_error(status);
}
if (!x_gpu) error("Cuda malloc failed\n");
return x_gpu;
}
int *cuda_make_int_array(size_t n)
{
int *x_gpu;
size_t size = sizeof(int)*n;
hipError_t status = hipMalloc((void **)&x_gpu, size);
check_error(status);
return x_gpu;
}
void cuda_free(float *x_gpu)
{
hipError_t status = hipFree(x_gpu);
check_error(status);
}
void cuda_push_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
hipError_t status = hipMemcpy(x_gpu, x, size, hipMemcpyHostToDevice);
check_error(status);
}
void cuda_pull_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
hipError_t status = hipMemcpy(x, x_gpu, size, hipMemcpyDeviceToHost);
check_error(status);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if (l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
dim3 cuda_gridsize(size_t n) {
size_t k = (n - 1) / BLOCK + 1;
size_t x = k;
size_t y = 1;
if (x > 65535) {
x = ceil(sqrtf(k));
y = (n - 1) / (x*BLOCK) + 1;
}
dim3 d;
d.x = x;
d.y = y;
d.z = 1;
//printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK);
return d;
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
if (layer.batch_normalize) {
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
if (layer.batch_normalize) {
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
}
// -------------------- CUDA functions -------------------
// add BIAS
__global__ void add_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] += biases[filter];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size);
check_error(hipPeekAtLastError());
}
// normalization
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index / spatial) % filters;
x[index] = (x[index] - mean[f]) / (sqrtf(variance[f]) + .000001f);
}
void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
size_t N = batch*filters*spatial;
normalize_kernel << <cuda_gridsize(N), BLOCK >> >(N, x, mean, variance, batch, filters, spatial);
check_error(hipPeekAtLastError());
}
// fill array
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) X[i*INCX] = ALPHA;
}
void fill_ongpu(int N, float ALPHA, float * X, int INCX)
{
fill_kernel << <cuda_gridsize(N), BLOCK >> >(N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
// scale BIAS
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
scale_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size);
check_error(hipPeekAtLastError());
}
// max-pool layer
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size) / stride + 1;
int w = (in_w + pad - size) / stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
if (layer.stride == layer.size) {
//if(1) {
cudnnStatus_t maxpool_status;
float alpha = 1, beta = 0;
maxpool_status = cudnnPoolingForward(
cudnn_handle(),
layer.poolingDesc,
&alpha,
layer.srcTensorDesc,
state.input,
&beta,
layer.dstTensorDesc,
layer.output_gpu);
//maxpool_status = cudnnDestroyPoolingDescriptor(poolingDesc);
//cudnnDestroyTensorDescriptor(layer.srcTensorDesc);
//cudnnDestroyTensorDescriptor(layer.dstTensorDesc);
}
else {
int h = layer.out_h;
int w = layer.out_w;
int c = layer.c;
size_t n = h*w*c*layer.batch;
forward_maxpool_layer_kernel << <cuda_gridsize(n), BLOCK >> > (n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, state.input, layer.output_gpu, layer.indexes_gpu);
check_error(hipPeekAtLastError());
}
}
// flatten
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_s = i%spatial;
i = i / spatial;
int in_c = i%layers;
i = i / layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
flatten_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, spatial, layers, batch, forward, out);
check_error(hipPeekAtLastError());
}
// activations
__device__ float lhtan_activate_kernel(float x)
{
if (x < 0) return .001*x;
if (x > 1) return .001*(x - 1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if (x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x) { return x; }
__device__ float logistic_activate_kernel(float x) { return 1. / (1. + exp(-x)); }
__device__ float loggy_activate_kernel(float x) { return 2. / (1. + exp(-x)) - 1; }
__device__ float relu_activate_kernel(float x) { return x*(x>0); }
__device__ float elu_activate_kernel(float x) { return (x >= 0)*x + (x < 0)*(exp(x) - 1); }
__device__ float relie_activate_kernel(float x) { return (x>0) ? x : .01*x; }
__device__ float ramp_activate_kernel(float x) { return x*(x>0) + .1*x; }
__device__ float leaky_activate_kernel(float x) { return (x>0) ? x : .1*x; }
__device__ float tanh_activate_kernel(float x) { return (2 / (1 + exp(-2 * x)) - 1); }
__device__ float plse_activate_kernel(float x)
{
if (x < -4) return .01 * (x + 4);
if (x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
__device__ float stair_activate_kernel(float x)
{
int n = floor(x);
if (n % 2 == 0) return floor(x / 2.);
else return (x - n) + floor(x / 2.);
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch (a) {
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) x[i] = activate_kernel(x[i], a);
}
__global__ void activate_array_leaky_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
float val = x[index];
x[index] = (val > 0) ? val : val / 10;
}
}
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
if (a == LEAKY) activate_array_leaky_kernel << <(n / BLOCK + 1), BLOCK, 0, 0 >> >(x, n);
else activate_array_kernel << <cuda_gridsize(n), BLOCK, 0, 0 >> >(x, n, a);
check_error(hipPeekAtLastError());
}
// softmax layer
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for (i = 0; i < n; ++i) {
int val = input[i];
largest = (val>largest) ? val : largest;
}
for (i = 0; i < n; ++i) {
float e = expf(input[i] / temp - largest / temp);
sum += e;
output[i] = e;
}
for (i = 0; i < n; ++i) {
output[i] /= sum;
}
}
__global__ void softmax_kernel(int n, int offset, int batch, float *input, float temp, float *output)
{
int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (b >= batch) return;
softmax_device(n, input + b*offset, temp, output + b*offset);
}
void softmax_gpu(float *input, int n, int offset, int groups, float temp, float *output)
{
int inputs = n;
int batch = groups;
softmax_kernel << <cuda_gridsize(batch), BLOCK >> >(inputs, offset, batch, input, temp, output);
check_error(hipPeekAtLastError());
}
// reorg layer
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_index = i;
int in_w = i%w;
i = i / w;
int in_h = i%h;
i = i / h;
int in_c = i%c;
i = i / c;
int b = i%batch;
int out_c = c / (stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
out[in_index] = x[out_index];
}
void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
reorg_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, w, h, c, batch, stride, forward, out);
check_error(hipPeekAtLastError());
}
// upsample layer
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int out_index = i;
int out_w = i % (w*stride);
i = i / (w*stride);
int out_h = i % (h*stride);
i = i / (h*stride);
int out_c = i%c;
i = i / c;
int b = i%batch;
int in_w = out_w / stride;
int in_h = out_h / stride;
int in_c = out_c;
int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
if (forward) out[out_index] += scale * x[in_index];
else atomicAdd(x + in_index, scale * out[out_index]);
}
extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t size = w*h*c*batch*stride*stride;
upsample_kernel << <cuda_gridsize(size), BLOCK >> >(size, in, w, h, c, batch, stride, forward, scale, out);
check_error(hipPeekAtLastError());
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
copy_kernel << <cuda_gridsize(N), BLOCK>> >(N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
// shortcut layer
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
}
extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1 / w2;
int sample = w2 / w1;
assert(stride == h1 / h2);
assert(sample == h2 / h1);
if (stride < 1) stride = 1;
if (sample < 1) sample = 1;
int size = batch * minw * minh * minc;
shortcut_kernel << <cuda_gridsize(size), BLOCK>> >(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
check_error(hipPeekAtLastError());
}
// ----------- Quantinization --------------
__host__ __device__ int max_abs(int src, int max_val) {
if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val;
return src;
}
__global__ void cuda_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = max_abs(input_f32[idx] * multipler, max_val); // 7-bit (1-bit sign)
}
void cuda_convert_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val) {
cuda_f32_to_int8 << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler, max_val);
}
__global__ void cuda_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = input_f32[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_convert_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler) {
cuda_f32_to_int8_nomax << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler);
}
__global__ void cuda_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = input_int8[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_convert_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler) {
cuda_int8_to_f32 << < size / BLOCK + 1, BLOCK >> >(input_int8, size, output_f32, multipler);
}
__global__ void cuda_multiply_f32(float *input_output, size_t size, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) input_output[idx] = input_output[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_do_multiply_f32(float *input_output, size_t size, float multipler) {
cuda_multiply_f32 << < size / BLOCK + 1, BLOCK >> >(input_output, size, multipler);
}
// --------------------------------
// ------------- XNOR -------------
// --------------------------------
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for (i = 0; i < size; ++i) {
mean += fabs(weights[f*size + i]);
}
mean = mean / size;
for (i = 0; i < size; ++i) {
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel << <cuda_gridsize(n), BLOCK >> >(weights, n, size, binary);
check_error(hipPeekAtLastError());
}
// --------------------------------
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel << <cuda_gridsize(n), BLOCK >> >(x, n, binary);
check_error(hipPeekAtLastError());
}
// --------------------------------
void swap_binary(convolutional_layer *l)
{
float *swap = l->weights;
l->weights = l->binary_weights;
l->binary_weights = swap;
#ifdef GPU
swap = l->weights_gpu;
l->weights_gpu = l->binary_weights_gpu;
l->binary_weights_gpu = swap;
#endif
}
// --------------------------------
#define WARP_SIZE 32
__global__ void im2col_align_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col, const int bit_align)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
for (; index < n; index += blockDim.x*gridDim.x) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
//data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
data_col_ptr += channel_out * bit_align + h_out * width_col + w_out;
float* data_col_ptr_32 = data_col + (channel_out * bit_align + h_out * width_col + w_out) / 32;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//float src_val = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
//if (threadIdx.x % WARP_SIZE == 0) *((unsigned int*)data_col_ptr_32) = bit_mask;
//data_col_ptr_32 += bit_align / 32;
//data_col_ptr += height_col * width_col;
data_col_ptr += bit_align;
}
}
}
}
void im2col_align_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col, int bit_align)
{
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_align_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
BLOCK, 0, 0>> >(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col, bit_align);
}
// --------------------------------
// binary im2col - stride=1
__global__ void im2col_align_bin_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize, const int channels,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col, const int bit_align)
{
__shared__ float tmp_s[1];
__shared__ ulonglong4 tmp256_s[1];
//#define SHRED_VALS ((BLOCK / 169) * )
//__shared__ float dst_s[1024];
//__shared__ float dst_s[1024];
//__shared__ uint32_t bit_s[32];
//__shared__ uint8_t bit_s[128];
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (; index < n; index += blockDim.x*gridDim.x)
{
int c_index = index;
int channel_in = c_index % channels;
//int h_out = index % height_col;
//int c_index = index / height_col;
//int channel_in = c_index % channels;
int channel_out = channel_in * ksize * ksize;
int j_index = c_index / channels;
int j = j_index % ksize;
int i = j_index / ksize;
int pre_out_index = (channel_out + i*ksize + j) * bit_align;
int j_pad = (j - pad);
int i_pad = (i - pad);
for (int wh_index = 0; wh_index < (height_col*width_col); wh_index += 32)
//for (int h_out = 0; h_out < height_col; ++h_out)
{
// the end of padding
//if(0)
//for (int w_out = 0; w_out < (width_col); w_out += 32)
{
const int w_out = wh_index % width_col;
const int h_out = wh_index / width_col;
const int w = w_out + j_pad;
const int h = h_out + i_pad;
int pre_in_index = channel_in * height * width;
int pre_in_wh_index = h * width + w;
int send_wh_index = wh_index;
if (i >= ksize) send_wh_index = height_col*width_col;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t)
{
const int lane_id = threadIdx.x % WARP_SIZE;
const int cur_wh_index = __shfl(send_wh_index, t) + lane_id;
if (cur_wh_index < (width_col*height_col))// && (cur_i_pad+pad) < ksize)
{
const int cur_pre_out_index = __shfl(pre_out_index, t);
const int cur_pre_in_index = __shfl(pre_in_index, t);
const int cur_pre_in_wh_index = __shfl(pre_in_wh_index, t) + lane_id;
int w = cur_pre_in_wh_index % width;
int h = cur_pre_in_wh_index / width;
int in_index = cur_pre_in_index + cur_pre_in_wh_index;
int out_index = cur_pre_out_index + cur_wh_index;
float val = (w >= 0 && w < width && h >= 0 && h < height) ?
data_im[in_index] : float();
//data_col[out_index] = val;
//tmp_s[0] = val;
uint32_t bit_mask = __ballot(val > 0);
if (lane_id == 0) {
uint8_t *bit8_ptr = &(((uint8_t *)data_col)[out_index / 8]);
uint32_t *bit32_ptr = (uint32_t *)bit8_ptr;
*bit32_ptr = bit_mask;
}
}
}
}// w_out
}
}
}
void im2col_align_bin_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col, int bit_align) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
//int num_kernels = channels * height_col * width_col * ksize * ksize;
//int num_kernels = channels * ksize * ksize * height_col;
int num_kernels = channels * ksize * ksize;
int num_blocks = num_kernels / BLOCK + 1;
//im2col_align_bin_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
im2col_align_bin_gpu_kernel << <num_blocks,
BLOCK, 0, 0 >> >(
num_kernels, im, height, width, ksize, channels, pad,
stride, height_col,
width_col, data_col, bit_align);
}
// --------------------------------
__global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t size)
{
//const int size_aligned = size + (WARP_SIZE - size % WARP_SIZE);
int index = blockIdx.x*blockDim.x + threadIdx.x;
float src_val;
//for (; index < size_aligned; index += blockDim.x*gridDim.x)
{
//src_val = src[index];
if (index < size) src_val = src[index];
else src_val = 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
unsigned int bit_mask = __ballot(src_val > 0);
if (threadIdx.x % WARP_SIZE == 0) ((unsigned int*)dst)[index / 32] = bit_mask;
}
}
void float_to_bit_gpu(float *src, unsigned char *dst, size_t size)
{
const int num_blocks = size / BLOCK + 1;
float_to_bit_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, dst, size);
}
// --------------------------------
__device__ __host__ static inline void remove_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] &= ~(1 << dst_shift);
}
__device__ __host__ static inline void set_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] |= 1 << dst_shift;
//dst[dst_i] |= 1 << (8 - dst_shift);
}
__device__ __host__ static inline unsigned char get_bit(unsigned char const*const src, size_t index) {
size_t src_i = index / 8;
int src_shift = index % 8;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
//unsigned char val = (src[src_i] & (1 << (8 - src_shift))) > 0;
return val;
}
// Intel CPUs and nVidia CUDA GPU are little endian
__device__ __host__ unsigned char reverse_byte(unsigned char a)
{
return ((a & 0x1) << 7) | ((a & 0x2) << 5) |
((a & 0x4) << 3) | ((a & 0x8) << 1) |
((a & 0x10) >> 1) | ((a & 0x20) >> 3) |
((a & 0x40) >> 5) | ((a & 0x80) >> 7);
}
__device__ unsigned char reverse_byte_CUDA(unsigned char a)
{
uint32_t tmp = __brev(a);
return tmp >> 24;
}
__device__ __host__ unsigned char reverse_byte_2(unsigned char a)
{
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
__device__ void transpose8rS32_reversed_diagonale(unsigned char* A, int m, int n, unsigned char* B)
{
unsigned x, y, t;
// Load the array and pack it into x and y.
x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m];
y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m];
t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14);
t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14);
t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[7 * n] = reverse_byte_CUDA(x >> 24); B[6 * n] = reverse_byte_CUDA(x >> 16); B[5 * n] = reverse_byte_CUDA(x >> 8); B[4 * n] = reverse_byte_CUDA(x);
B[3 * n] = reverse_byte_CUDA(y >> 24); B[2 * n] = reverse_byte_CUDA(y >> 16); B[1 * n] = reverse_byte_CUDA(y >> 8); B[0 * n] = reverse_byte_CUDA(y);
}
__global__ void transpose_bin_gpu_kernel(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (i = 0; i < n; i += 8)
{
i = (index * 8) % n;
int j;
//for (j = 0; j < m - 8; j += 8)
{
j = ((index * 8) / n) * 8;
if (j < m - 8) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose8rS32_reversed_diagonale(&A[a_index / 8], lda / 8, ldb / 8, &B[b_index / 8]);
}
else if (j < m) {
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
else remove_bit(B, j*ldb + i);
}
}
}
}
}
__device__ __host__ uint8_t reverse_8_bit(uint8_t a) {
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
__device__ uint32_t reverse_32_bit(uint32_t a)
{
// __device__ unsigned int __brev(unsigned int x) // CUDA
// unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
return __brev(a);
//return (reverse_8_bit(a >> 24) << 0) |
// (reverse_8_bit(a >> 16) << 8) |
// (reverse_8_bit(a >> 8) << 16) |
// (reverse_8_bit(a >> 0) << 24);
}
#define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j);
__device__ void transpose32_optimized(uint32_t A[32]) {
int j, k;
unsigned m, t;
//m = 0x0000FFFF;
//for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) {
// for (k = 0; k < 32; k = (k + j + 1) & ~j) {
// t = (A[k] ^ (A[k + j] >> j)) & m;
// A[k] = A[k] ^ t;
// A[k + j] = A[k + j] ^ (t << j);
// }
//}
j = 16;
m = 0x0000FFFF;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 8;
m = 0x00ff00ff;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 4;
m = 0x0f0f0f0f;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 2;
m = 0x33333333;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 1;
m = 0x55555555;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
// reverse Y
for (j = 0; j < 16; ++j) {
uint32_t tmp = A[j];
A[j] = reverse_32_bit(A[31 - j]);
A[31 - j] = reverse_32_bit(tmp);
}
}
#define BLOCK_TRANSPOSE32 256
__device__ void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n)
{
//unsigned A_tmp[32];
//int i;
//#pragma unroll
//for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
//transpose32_optimized(A_tmp);
//#pragma unroll
//for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
__shared__ uint32_t A_shared[32 * BLOCK_TRANSPOSE32];
uint32_t *A_tmp = &A_shared[32 * threadIdx.x];
int i;
#pragma unroll 32
for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
transpose32_optimized(A_tmp);
#pragma unroll 32
for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
}
// transpose 32x32 bit
__global__ void transpose_bin_gpu_kernel_32(uint32_t *A, uint32_t *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
int index = (blockIdx.x*blockDim.x + threadIdx.x) * 32;
//for (i = 0; i < n; i += 8)
{
i = index % n;
int j;
//for (j = 0; j < m - 8; j += 8)
{
j = (index / n) * 32;
if (j < m) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32);
}
}
}
}
void transpose_bin_gpu(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
size_t size = n*m / (8 * 8) + 1;
size_t size32 = n*m / (32 * 32) + 1;
const int num_blocks = size / BLOCK + 1;
const int num_blocks32 = size32 / BLOCK_TRANSPOSE32 + 1;
transpose_bin_gpu_kernel_32 << <num_blocks32, BLOCK_TRANSPOSE32, 0, 0 >> >((uint32_t *)A, (uint32_t *)B, n, m, lda, ldb, block_size);
//transpose_bin_gpu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(A, B, n, m, lda, ldb, block_size);
}
// --------------------------------
__global__ void fill_int8_gpu_kernel(unsigned char *src, unsigned char val, size_t size) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) src[index] = 0;
}
void fill_int8_gpu(unsigned char *src, unsigned char val, size_t size)
{
const int num_blocks = size / BLOCK + 1;
fill_int8_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, val, size);
}
// --------------------------------
//typedef unsigned long long int uint64_t;
//typedef unsigned int uint32_t;
//typedef unsigned char uint8_t;
//typedef char int8_t;
__device__ __host__ static inline uint64_t broadcast_bit_1_to_64(uint8_t src) {
return (src > 0) ? 0xFFFFFFFFFFFFFFFF : 0;
}
__device__ __host__ static inline uint8_t xnor_bit1(uint8_t a, uint8_t b) {
return ~(a^b) & 0b1;
}
__device__ __host__ static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
return ~(a^b);
}
__device__ __host__ static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
return ~(a^b);
}
__device__ __host__ static inline uint4 xnor_int128(uint4 a, uint4 b) {
uint4 res;
res.w = ~(a.w^b.w);
res.x = ~(a.x^b.x);
res.y = ~(a.y^b.y);
res.z = ~(a.z^b.z);
return res;
}
__device__ __host__ static inline ulonglong4 xnor_int256(ulonglong4 a, ulonglong4 b) {
ulonglong4 res;
res.w = ~(a.w^b.w);
res.x = ~(a.x^b.x);
res.y = ~(a.y^b.y);
res.z = ~(a.z^b.z);
return res;
}
/*
// A (weights) in the shared_memory
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`]
//__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`]
int start_i = blockIdx.x*blockDim.x / N;
int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1;
size_t shared_size = lda * (end_i - start_i);
int i_cur = index / N;
int local_i = i_cur - start_i;
for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) {
int x = start_i*lda + k;
if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8));
}
//if (i_cur < M && (index % N == 0 || threadIdx.x == 0)) {
//for (int k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//*((uint64_t *)(A_s + (local_i*lda + k) / 8)) = *((uint64_t *)(A + (i_cur*lda + k) / 8)); // weights
// }
//}
__syncthreads();
int i, j, k, h;
j = index % N;
{ // out_h*out_w - one channel output size [169 - 173056]
i = index / N;
if (i < M) // l.n - filters [16 - 55 - 1024]
{
float mean_val = mean_arr[i];
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
#include <cstdio>
void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
size_t size = M*N;
const int num_blocks = size / BLOCK + 1;
gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr);
}
*/
// --------------------------------
__inline__ __device__
int warpAllReduceSum(int val) {
for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2)
val += __shfl_xor(val, mask);
return val;
}
// Coalesced memory access
// A (weights) in the shared_memory - GOOD
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias_arr)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint8_t A_s[6144 * 8 / 4];
//__shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`]
//__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`]
int start_i = blockIdx.x*blockDim.x / N;
int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1;
size_t shared_size = lda * (end_i - start_i);
int i_cur = index / N;
int local_i = i_cur - start_i;
for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) {
int x = start_i*lda + k;
if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8));
}
__syncthreads();
int i, j, k, h;
j = index % N;
{ // out_h*out_w - one channel output size [169 - 173056]
i = index / N;
//if (i < M) // l.n - filters [16 - 55 - 1024]
{
int count = 0;
k = 0;
//#ifdef NOT_USED
// 32 thread X 64 bit = 2048 bit
for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t c_bit64;
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 8 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 8 * lane_id;
{
//uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + A_i)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input
c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
//#ifdef NOT_USED
// 32 thread X 32 bit = 1024 bit
for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216]
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 4 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 4 * lane_id;
{
//uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
uint32_t a_bit32 = *((uint32_t *)(A_s + A_i)); // weights
uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
int tmp_count = __popc(c_bit32);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
if (i < M)
{
float mean_val = mean_arr[i];
float bias_val = bias_arr[i];
//#ifdef NOT_USED
for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
//ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights
ulonglong4 a_bit256 = *((ulonglong4 *)(A_s + (local_i*lda + k) / 8)); // weights
ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input
ulonglong4 c_bit256 = xnor_int256(a_bit256, b_bit256);
count += __popcll(c_bit256.w) + __popcll(c_bit256.x) +
__popcll(c_bit256.y) + __popcll(c_bit256.z);
}
//#endif
#ifdef NOT_USED
for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
count += __popcll(c_bit64);
}
#endif
const int bit_step = 256;
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) *mean_val + bias_val;
}
}
}
}
/*
// Coalescing
// B (input) in the shared_memory - GOOD
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias_arr)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint8_t B_s[4096*8]; // 32 KB // [ldb x N`] // max = 262 144 bits
//__shared__ uint64_t B_s[4096]; // 32 KB // [ldb x N`] // max = 262 144 bits
int start_j = blockIdx.x*blockDim.x / M;
int end_j = (blockIdx.x*blockDim.x + blockDim.x) / M + 1;
size_t shared_size = ldb * (end_j - start_j);
int j_cur = index / M;
int local_j = j_cur - start_j;
for (int k = threadIdx.x * 256; k < shared_size; k += blockDim.x * 256) {
int x = start_j*ldb + k;
if (x < (N*ldb)) *((ulonglong4 *)(B_s + k / 8)) = *((ulonglong4 *)(B + x / 8));
}
__syncthreads();
int i, j, k;
i = index % M; // l.n - filters [16 - 55 - 1024]
{
j = index / M; // out_h*out_w - one channel output size [169 - 173056]
if (j < N)
{
int count = 0;
k = 0;
//#ifdef NOT_USED
// 32 thread X 64 bit = 2048 bit
for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t c_bit64;
int64_t A_cur_index = (i*lda + k) / 8;
//int64_t B_cur_index = (j*ldb + k) / 8;
int64_t B_cur_index = (local_j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 8 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 8 * lane_id;
{
uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
//uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input
uint64_t b_bit64 = *((uint64_t *)(B_s + B_i)); // input
c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
//#ifdef NOT_USED
// 32 thread X 32 bit = 1024 bit
for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216]
int64_t A_cur_index = (i*lda + k) / 8;
//int64_t B_cur_index = (j*ldb + k) / 8;
int64_t B_cur_index = (local_j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 4 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 4 * lane_id;
{
uint32_t a_bit32 = *((uint32_t *)(A + A_i)); // weights
//uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input
uint32_t b_bit32 = *((uint32_t *)(B_s + B_i)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
int tmp_count = __popc(c_bit32);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
if (i < M)
{
float mean_val = mean_arr[i];
float bias_val = bias_arr[i];
//#ifdef NOT_USED
for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights
//ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input
ulonglong4 b_bit256 = *((ulonglong4 *)(B_s + (local_j*ldb + k) / 8)); // input
ulonglong4 c_bit256 = xnor_int256(a_bit256, b_bit256);
count += __popcll(c_bit256.w) + __popcll(c_bit256.x) +
__popcll(c_bit256.y) + __popcll(c_bit256.z);
}
//#endif
#ifdef NOT_USED
for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
//uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t b_bit64 = *((uint64_t *)(B_s + (local_j*ldb + k) / 8)); // input
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
count += __popcll(c_bit64);
}
#endif
const int bit_step = 256;
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) * mean_val + bias_val;
}
}
}
}
*/
// Coalesced memory access - GOOD
void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias)
{
size_t size = M*N;
const int num_blocks = size / BLOCK + 1;
/*
printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n",
size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024);
printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512);
*/
//printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda);
gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr, bias);
}
// --------------------------------
// --------------------------------
// --------------------------------
// sequentially - B (input) in the shared_memory - BAD
// --------------------------------
__global__ void gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
//__shared__ float mean_shared[32];
//__shared__ uint32_t B_s[8192]; // 32 KB // [ldb x N`] // max = 262 144 bits
//__shared__ uint32_t B_s[4096]; // 16 KB // [ldb x N`] // max = 131 072 bits
__shared__ uint8_t B_s[4096 * 4]; // 16 KB // [ldb x N`] // max = 131 072 bits
const int K_items = WARP_SIZE;
int start_j = blockIdx.x*blockDim.x / (K_items * M);
{
int end_j = (blockIdx.x*blockDim.x + blockDim.x) / (K_items * M) + 1;
if (end_j > N) end_j = N;
size_t shared_size = ldb * (end_j - start_j);
if (shared_size != 0) {
//if(threadIdx.x == 0) printf(" start_j = %d, end_j = %d, shared_size = %d \n", start_j, end_j, shared_size);
int k;
for (int k = threadIdx.x * 32; k < shared_size; k += blockDim.x * 32) {
int x = start_j*ldb + k;
if (x < (N*ldb)) *((uint32_t *)(B_s + k / 8)) = *((uint32_t *)(B + x / 8));
}
}
}
__syncthreads();
int index = blockIdx.x*blockDim.x + threadIdx.x;
{
int i; // l.n
int j; // out_h*out_w
int k; // l.size * l.size * l.c
const int index2 = index / K_items;
i = index2 % M; // max M
j = index2 / M; // max N
int local_j = j - start_j;
//if (i <= 1 && j <= 1 ) printf(" k = %d, K = %d, K_items = %d, i = %d, j = %d, lda = %d, ldb = %d, ldc = %d \n",
// k, K, K_items, i, j, lda, ldb, ldc);
{ // l.n - filters [16 - 55 - 1024]
// further improvements: for (l.n == 1024) iterate several (j)
if (j < N)
{ // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
const int bit_step = 32;
for (k = (threadIdx.x % WARP_SIZE) * bit_step; k < K; k += bit_step*WARP_SIZE)
{ // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
uint32_t a_bit32 = *((uint32_t *)(A + (i*lda + k) / 8)); // weights
//uint32_t b_bit32 = *((uint32_t *)(B + (j*ldb + k) / 8)); // input
uint32_t b_bit32 = *((uint32_t *)(B_s + (local_j*ldb + k) / 8)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
count += __popc(c_bit32);
}
for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2)
count += __shfl_down(count, offset);
if (threadIdx.x % WARP_SIZE == 0) {
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1;
float mean_val = mean_arr[i];
C[i*ldc + j] = (2 * count - K) * mean_val;
//B_s[threadIdx.x / WARP_SIZE] = (2 * count - K) * mean_val;
}
}
}
}
}
// sequentially - BAD
void gemm_nn_custom_bin_mean_transposed_sequentially_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
//size_t size = M*N;
size_t size = M*N * 32;
const int num_blocks = size / BLOCK + 1;
//printf(" K = %d \n", K);
/*
printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n",
size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024);
printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512);
*/
//printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda);
gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr);
}
// --------------------------------
| 40d6b76c686d5a1a245ef0c285574cc169c57090.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "additionally.h"
#include "gpu.h"
extern int gpu_index;
#define BLOCK 512
void pull_batchnorm_layer(layer l) {} // not required now
void push_batchnorm_layer(layer l) {} // not required now
void pull_local_layer(local_layer l) {} // not required now
void push_local_layer(local_layer l) {} // not required now
void pull_connected_layer(local_layer l) {} // not required now
void push_connected_layer(local_layer l) {} // not required now
void check_error(cudaError_t status)
{
//cudaDeviceSynchronize();
cudaError_t status2 = cudaGetLastError();
if (status != cudaSuccess)
{
const char *s = cudaGetErrorString(status);
char buffer[256];
printf("CUDA Error: %s\n", s);
assert(0);
snprintf(buffer, 256, "CUDA Error: %s", s);
error(buffer);
}
if (status2 != cudaSuccess)
{
const char *s = cudaGetErrorString(status);
char buffer[256];
printf("CUDA Error Prev: %s\n", s);
assert(0);
snprintf(buffer, 256, "CUDA Error Prev: %s", s);
error(buffer);
}
}
void cuda_set_device(int n)
{
gpu_index = n;
cudaError_t status = cudaSetDevice(n);
check_error(status);
}
int cuda_get_device()
{
int n = 0;
cudaError_t status = cudaGetDevice(&n);
check_error(status);
return n;
}
#ifdef CUDNN
cudnnHandle_t cudnn_handle()
{
static int init[16] = { 0 };
static cudnnHandle_t handle[16];
int i = cuda_get_device();
if (!init[i]) {
cudnnCreate(&handle[i]);
init[i] = 1;
}
return handle[i];
}
#endif
float *cuda_make_array(float *x, size_t n)
{
float *x_gpu;
size_t size = sizeof(float)*n;
cudaError_t status = cudaMalloc((void **)&x_gpu, size);
check_error(status);
if (x) {
status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
check_error(status);
}
if (!x_gpu) error("Cuda malloc failed\n");
return x_gpu;
}
int *cuda_make_int_array(size_t n)
{
int *x_gpu;
size_t size = sizeof(int)*n;
cudaError_t status = cudaMalloc((void **)&x_gpu, size);
check_error(status);
return x_gpu;
}
void cuda_free(float *x_gpu)
{
cudaError_t status = cudaFree(x_gpu);
check_error(status);
}
void cuda_push_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
check_error(status);
}
void cuda_pull_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost);
check_error(status);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if (l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
dim3 cuda_gridsize(size_t n) {
size_t k = (n - 1) / BLOCK + 1;
size_t x = k;
size_t y = 1;
if (x > 65535) {
x = ceil(sqrtf(k));
y = (n - 1) / (x*BLOCK) + 1;
}
dim3 d;
d.x = x;
d.y = y;
d.z = 1;
//printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK);
return d;
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
if (layer.batch_normalize) {
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
if (layer.batch_normalize) {
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
}
// -------------------- CUDA functions -------------------
// add BIAS
__global__ void add_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] += biases[filter];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size);
check_error(cudaPeekAtLastError());
}
// normalization
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index / spatial) % filters;
x[index] = (x[index] - mean[f]) / (sqrtf(variance[f]) + .000001f);
}
void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
size_t N = batch*filters*spatial;
normalize_kernel << <cuda_gridsize(N), BLOCK >> >(N, x, mean, variance, batch, filters, spatial);
check_error(cudaPeekAtLastError());
}
// fill array
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) X[i*INCX] = ALPHA;
}
void fill_ongpu(int N, float ALPHA, float * X, int INCX)
{
fill_kernel << <cuda_gridsize(N), BLOCK >> >(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
// scale BIAS
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
scale_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size);
check_error(cudaPeekAtLastError());
}
// max-pool layer
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size) / stride + 1;
int w = (in_w + pad - size) / stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
if (layer.stride == layer.size) {
//if(1) {
cudnnStatus_t maxpool_status;
float alpha = 1, beta = 0;
maxpool_status = cudnnPoolingForward(
cudnn_handle(),
layer.poolingDesc,
&alpha,
layer.srcTensorDesc,
state.input,
&beta,
layer.dstTensorDesc,
layer.output_gpu);
//maxpool_status = cudnnDestroyPoolingDescriptor(poolingDesc);
//cudnnDestroyTensorDescriptor(layer.srcTensorDesc);
//cudnnDestroyTensorDescriptor(layer.dstTensorDesc);
}
else {
int h = layer.out_h;
int w = layer.out_w;
int c = layer.c;
size_t n = h*w*c*layer.batch;
forward_maxpool_layer_kernel << <cuda_gridsize(n), BLOCK >> > (n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, state.input, layer.output_gpu, layer.indexes_gpu);
check_error(cudaPeekAtLastError());
}
}
// flatten
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_s = i%spatial;
i = i / spatial;
int in_c = i%layers;
i = i / layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
flatten_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, spatial, layers, batch, forward, out);
check_error(cudaPeekAtLastError());
}
// activations
__device__ float lhtan_activate_kernel(float x)
{
if (x < 0) return .001*x;
if (x > 1) return .001*(x - 1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if (x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x) { return x; }
__device__ float logistic_activate_kernel(float x) { return 1. / (1. + exp(-x)); }
__device__ float loggy_activate_kernel(float x) { return 2. / (1. + exp(-x)) - 1; }
__device__ float relu_activate_kernel(float x) { return x*(x>0); }
__device__ float elu_activate_kernel(float x) { return (x >= 0)*x + (x < 0)*(exp(x) - 1); }
__device__ float relie_activate_kernel(float x) { return (x>0) ? x : .01*x; }
__device__ float ramp_activate_kernel(float x) { return x*(x>0) + .1*x; }
__device__ float leaky_activate_kernel(float x) { return (x>0) ? x : .1*x; }
__device__ float tanh_activate_kernel(float x) { return (2 / (1 + exp(-2 * x)) - 1); }
__device__ float plse_activate_kernel(float x)
{
if (x < -4) return .01 * (x + 4);
if (x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
__device__ float stair_activate_kernel(float x)
{
int n = floor(x);
if (n % 2 == 0) return floor(x / 2.);
else return (x - n) + floor(x / 2.);
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch (a) {
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) x[i] = activate_kernel(x[i], a);
}
__global__ void activate_array_leaky_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
float val = x[index];
x[index] = (val > 0) ? val : val / 10;
}
}
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
if (a == LEAKY) activate_array_leaky_kernel << <(n / BLOCK + 1), BLOCK, 0, 0 >> >(x, n);
else activate_array_kernel << <cuda_gridsize(n), BLOCK, 0, 0 >> >(x, n, a);
check_error(cudaPeekAtLastError());
}
// softmax layer
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for (i = 0; i < n; ++i) {
int val = input[i];
largest = (val>largest) ? val : largest;
}
for (i = 0; i < n; ++i) {
float e = expf(input[i] / temp - largest / temp);
sum += e;
output[i] = e;
}
for (i = 0; i < n; ++i) {
output[i] /= sum;
}
}
__global__ void softmax_kernel(int n, int offset, int batch, float *input, float temp, float *output)
{
int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (b >= batch) return;
softmax_device(n, input + b*offset, temp, output + b*offset);
}
void softmax_gpu(float *input, int n, int offset, int groups, float temp, float *output)
{
int inputs = n;
int batch = groups;
softmax_kernel << <cuda_gridsize(batch), BLOCK >> >(inputs, offset, batch, input, temp, output);
check_error(cudaPeekAtLastError());
}
// reorg layer
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_index = i;
int in_w = i%w;
i = i / w;
int in_h = i%h;
i = i / h;
int in_c = i%c;
i = i / c;
int b = i%batch;
int out_c = c / (stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
out[in_index] = x[out_index];
}
void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
reorg_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, w, h, c, batch, stride, forward, out);
check_error(cudaPeekAtLastError());
}
// upsample layer
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int out_index = i;
int out_w = i % (w*stride);
i = i / (w*stride);
int out_h = i % (h*stride);
i = i / (h*stride);
int out_c = i%c;
i = i / c;
int b = i%batch;
int in_w = out_w / stride;
int in_h = out_h / stride;
int in_c = out_c;
int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
if (forward) out[out_index] += scale * x[in_index];
else atomicAdd(x + in_index, scale * out[out_index]);
}
extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t size = w*h*c*batch*stride*stride;
upsample_kernel << <cuda_gridsize(size), BLOCK >> >(size, in, w, h, c, batch, stride, forward, scale, out);
check_error(cudaPeekAtLastError());
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
copy_kernel << <cuda_gridsize(N), BLOCK>> >(N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
// shortcut layer
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
}
extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1 / w2;
int sample = w2 / w1;
assert(stride == h1 / h2);
assert(sample == h2 / h1);
if (stride < 1) stride = 1;
if (sample < 1) sample = 1;
int size = batch * minw * minh * minc;
shortcut_kernel << <cuda_gridsize(size), BLOCK>> >(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
check_error(cudaPeekAtLastError());
}
// ----------- Quantinization --------------
__host__ __device__ int max_abs(int src, int max_val) {
if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val;
return src;
}
__global__ void cuda_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = max_abs(input_f32[idx] * multipler, max_val); // 7-bit (1-bit sign)
}
void cuda_convert_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val) {
cuda_f32_to_int8 << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler, max_val);
}
__global__ void cuda_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = input_f32[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_convert_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler) {
cuda_f32_to_int8_nomax << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler);
}
__global__ void cuda_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = input_int8[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_convert_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler) {
cuda_int8_to_f32 << < size / BLOCK + 1, BLOCK >> >(input_int8, size, output_f32, multipler);
}
__global__ void cuda_multiply_f32(float *input_output, size_t size, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) input_output[idx] = input_output[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_do_multiply_f32(float *input_output, size_t size, float multipler) {
cuda_multiply_f32 << < size / BLOCK + 1, BLOCK >> >(input_output, size, multipler);
}
// --------------------------------
// ------------- XNOR -------------
// --------------------------------
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for (i = 0; i < size; ++i) {
mean += fabs(weights[f*size + i]);
}
mean = mean / size;
for (i = 0; i < size; ++i) {
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel << <cuda_gridsize(n), BLOCK >> >(weights, n, size, binary);
check_error(cudaPeekAtLastError());
}
// --------------------------------
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel << <cuda_gridsize(n), BLOCK >> >(x, n, binary);
check_error(cudaPeekAtLastError());
}
// --------------------------------
void swap_binary(convolutional_layer *l)
{
float *swap = l->weights;
l->weights = l->binary_weights;
l->binary_weights = swap;
#ifdef GPU
swap = l->weights_gpu;
l->weights_gpu = l->binary_weights_gpu;
l->binary_weights_gpu = swap;
#endif
}
// --------------------------------
#define WARP_SIZE 32
__global__ void im2col_align_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col, const int bit_align)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
for (; index < n; index += blockDim.x*gridDim.x) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
//data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
data_col_ptr += channel_out * bit_align + h_out * width_col + w_out;
float* data_col_ptr_32 = data_col + (channel_out * bit_align + h_out * width_col + w_out) / 32;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//float src_val = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
//if (threadIdx.x % WARP_SIZE == 0) *((unsigned int*)data_col_ptr_32) = bit_mask;
//data_col_ptr_32 += bit_align / 32;
//data_col_ptr += height_col * width_col;
data_col_ptr += bit_align;
}
}
}
}
void im2col_align_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col, int bit_align)
{
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_align_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
BLOCK, 0, 0>> >(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col, bit_align);
}
// --------------------------------
// binary im2col - stride=1
__global__ void im2col_align_bin_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize, const int channels,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col, const int bit_align)
{
__shared__ float tmp_s[1];
__shared__ ulonglong4 tmp256_s[1];
//#define SHRED_VALS ((BLOCK / 169) * )
//__shared__ float dst_s[1024];
//__shared__ float dst_s[1024];
//__shared__ uint32_t bit_s[32];
//__shared__ uint8_t bit_s[128];
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (; index < n; index += blockDim.x*gridDim.x)
{
int c_index = index;
int channel_in = c_index % channels;
//int h_out = index % height_col;
//int c_index = index / height_col;
//int channel_in = c_index % channels;
int channel_out = channel_in * ksize * ksize;
int j_index = c_index / channels;
int j = j_index % ksize;
int i = j_index / ksize;
int pre_out_index = (channel_out + i*ksize + j) * bit_align;
int j_pad = (j - pad);
int i_pad = (i - pad);
for (int wh_index = 0; wh_index < (height_col*width_col); wh_index += 32)
//for (int h_out = 0; h_out < height_col; ++h_out)
{
// the end of padding
//if(0)
//for (int w_out = 0; w_out < (width_col); w_out += 32)
{
const int w_out = wh_index % width_col;
const int h_out = wh_index / width_col;
const int w = w_out + j_pad;
const int h = h_out + i_pad;
int pre_in_index = channel_in * height * width;
int pre_in_wh_index = h * width + w;
int send_wh_index = wh_index;
if (i >= ksize) send_wh_index = height_col*width_col;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t)
{
const int lane_id = threadIdx.x % WARP_SIZE;
const int cur_wh_index = __shfl(send_wh_index, t) + lane_id;
if (cur_wh_index < (width_col*height_col))// && (cur_i_pad+pad) < ksize)
{
const int cur_pre_out_index = __shfl(pre_out_index, t);
const int cur_pre_in_index = __shfl(pre_in_index, t);
const int cur_pre_in_wh_index = __shfl(pre_in_wh_index, t) + lane_id;
int w = cur_pre_in_wh_index % width;
int h = cur_pre_in_wh_index / width;
int in_index = cur_pre_in_index + cur_pre_in_wh_index;
int out_index = cur_pre_out_index + cur_wh_index;
float val = (w >= 0 && w < width && h >= 0 && h < height) ?
data_im[in_index] : float();
//data_col[out_index] = val;
//tmp_s[0] = val;
uint32_t bit_mask = __ballot(val > 0);
if (lane_id == 0) {
uint8_t *bit8_ptr = &(((uint8_t *)data_col)[out_index / 8]);
uint32_t *bit32_ptr = (uint32_t *)bit8_ptr;
*bit32_ptr = bit_mask;
}
}
}
}// w_out
}
}
}
void im2col_align_bin_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col, int bit_align) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
//int num_kernels = channels * height_col * width_col * ksize * ksize;
//int num_kernels = channels * ksize * ksize * height_col;
int num_kernels = channels * ksize * ksize;
int num_blocks = num_kernels / BLOCK + 1;
//im2col_align_bin_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
im2col_align_bin_gpu_kernel << <num_blocks,
BLOCK, 0, 0 >> >(
num_kernels, im, height, width, ksize, channels, pad,
stride, height_col,
width_col, data_col, bit_align);
}
// --------------------------------
__global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t size)
{
//const int size_aligned = size + (WARP_SIZE - size % WARP_SIZE);
int index = blockIdx.x*blockDim.x + threadIdx.x;
float src_val;
//for (; index < size_aligned; index += blockDim.x*gridDim.x)
{
//src_val = src[index];
if (index < size) src_val = src[index];
else src_val = 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
unsigned int bit_mask = __ballot(src_val > 0);
if (threadIdx.x % WARP_SIZE == 0) ((unsigned int*)dst)[index / 32] = bit_mask;
}
}
void float_to_bit_gpu(float *src, unsigned char *dst, size_t size)
{
const int num_blocks = size / BLOCK + 1;
float_to_bit_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, dst, size);
}
// --------------------------------
__device__ __host__ static inline void remove_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] &= ~(1 << dst_shift);
}
__device__ __host__ static inline void set_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] |= 1 << dst_shift;
//dst[dst_i] |= 1 << (8 - dst_shift);
}
__device__ __host__ static inline unsigned char get_bit(unsigned char const*const src, size_t index) {
size_t src_i = index / 8;
int src_shift = index % 8;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
//unsigned char val = (src[src_i] & (1 << (8 - src_shift))) > 0;
return val;
}
// Intel CPUs and nVidia CUDA GPU are little endian
__device__ __host__ unsigned char reverse_byte(unsigned char a)
{
return ((a & 0x1) << 7) | ((a & 0x2) << 5) |
((a & 0x4) << 3) | ((a & 0x8) << 1) |
((a & 0x10) >> 1) | ((a & 0x20) >> 3) |
((a & 0x40) >> 5) | ((a & 0x80) >> 7);
}
__device__ unsigned char reverse_byte_CUDA(unsigned char a)
{
uint32_t tmp = __brev(a);
return tmp >> 24;
}
__device__ __host__ unsigned char reverse_byte_2(unsigned char a)
{
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
__device__ void transpose8rS32_reversed_diagonale(unsigned char* A, int m, int n, unsigned char* B)
{
unsigned x, y, t;
// Load the array and pack it into x and y.
x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m];
y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m];
t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14);
t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14);
t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[7 * n] = reverse_byte_CUDA(x >> 24); B[6 * n] = reverse_byte_CUDA(x >> 16); B[5 * n] = reverse_byte_CUDA(x >> 8); B[4 * n] = reverse_byte_CUDA(x);
B[3 * n] = reverse_byte_CUDA(y >> 24); B[2 * n] = reverse_byte_CUDA(y >> 16); B[1 * n] = reverse_byte_CUDA(y >> 8); B[0 * n] = reverse_byte_CUDA(y);
}
__global__ void transpose_bin_gpu_kernel(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (i = 0; i < n; i += 8)
{
i = (index * 8) % n;
int j;
//for (j = 0; j < m - 8; j += 8)
{
j = ((index * 8) / n) * 8;
if (j < m - 8) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose8rS32_reversed_diagonale(&A[a_index / 8], lda / 8, ldb / 8, &B[b_index / 8]);
}
else if (j < m) {
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
else remove_bit(B, j*ldb + i);
}
}
}
}
}
__device__ __host__ uint8_t reverse_8_bit(uint8_t a) {
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
__device__ uint32_t reverse_32_bit(uint32_t a)
{
// __device__ unsigned int __brev(unsigned int x) // CUDA
// unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
return __brev(a);
//return (reverse_8_bit(a >> 24) << 0) |
// (reverse_8_bit(a >> 16) << 8) |
// (reverse_8_bit(a >> 8) << 16) |
// (reverse_8_bit(a >> 0) << 24);
}
#define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j);
__device__ void transpose32_optimized(uint32_t A[32]) {
int j, k;
unsigned m, t;
//m = 0x0000FFFF;
//for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) {
// for (k = 0; k < 32; k = (k + j + 1) & ~j) {
// t = (A[k] ^ (A[k + j] >> j)) & m;
// A[k] = A[k] ^ t;
// A[k + j] = A[k + j] ^ (t << j);
// }
//}
j = 16;
m = 0x0000FFFF;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 8;
m = 0x00ff00ff;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 4;
m = 0x0f0f0f0f;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 2;
m = 0x33333333;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 1;
m = 0x55555555;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
// reverse Y
for (j = 0; j < 16; ++j) {
uint32_t tmp = A[j];
A[j] = reverse_32_bit(A[31 - j]);
A[31 - j] = reverse_32_bit(tmp);
}
}
#define BLOCK_TRANSPOSE32 256
__device__ void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n)
{
//unsigned A_tmp[32];
//int i;
//#pragma unroll
//for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
//transpose32_optimized(A_tmp);
//#pragma unroll
//for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
__shared__ uint32_t A_shared[32 * BLOCK_TRANSPOSE32];
uint32_t *A_tmp = &A_shared[32 * threadIdx.x];
int i;
#pragma unroll 32
for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
transpose32_optimized(A_tmp);
#pragma unroll 32
for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
}
// transpose 32x32 bit
__global__ void transpose_bin_gpu_kernel_32(uint32_t *A, uint32_t *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
int index = (blockIdx.x*blockDim.x + threadIdx.x) * 32;
//for (i = 0; i < n; i += 8)
{
i = index % n;
int j;
//for (j = 0; j < m - 8; j += 8)
{
j = (index / n) * 32;
if (j < m) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32);
}
}
}
}
void transpose_bin_gpu(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
size_t size = n*m / (8 * 8) + 1;
size_t size32 = n*m / (32 * 32) + 1;
const int num_blocks = size / BLOCK + 1;
const int num_blocks32 = size32 / BLOCK_TRANSPOSE32 + 1;
transpose_bin_gpu_kernel_32 << <num_blocks32, BLOCK_TRANSPOSE32, 0, 0 >> >((uint32_t *)A, (uint32_t *)B, n, m, lda, ldb, block_size);
//transpose_bin_gpu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(A, B, n, m, lda, ldb, block_size);
}
// --------------------------------
__global__ void fill_int8_gpu_kernel(unsigned char *src, unsigned char val, size_t size) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) src[index] = 0;
}
void fill_int8_gpu(unsigned char *src, unsigned char val, size_t size)
{
const int num_blocks = size / BLOCK + 1;
fill_int8_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, val, size);
}
// --------------------------------
//typedef unsigned long long int uint64_t;
//typedef unsigned int uint32_t;
//typedef unsigned char uint8_t;
//typedef char int8_t;
__device__ __host__ static inline uint64_t broadcast_bit_1_to_64(uint8_t src) {
return (src > 0) ? 0xFFFFFFFFFFFFFFFF : 0;
}
__device__ __host__ static inline uint8_t xnor_bit1(uint8_t a, uint8_t b) {
return ~(a^b) & 0b1;
}
__device__ __host__ static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
return ~(a^b);
}
__device__ __host__ static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
return ~(a^b);
}
__device__ __host__ static inline uint4 xnor_int128(uint4 a, uint4 b) {
uint4 res;
res.w = ~(a.w^b.w);
res.x = ~(a.x^b.x);
res.y = ~(a.y^b.y);
res.z = ~(a.z^b.z);
return res;
}
__device__ __host__ static inline ulonglong4 xnor_int256(ulonglong4 a, ulonglong4 b) {
ulonglong4 res;
res.w = ~(a.w^b.w);
res.x = ~(a.x^b.x);
res.y = ~(a.y^b.y);
res.z = ~(a.z^b.z);
return res;
}
/*
// A (weights) in the shared_memory
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`]
//__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`]
int start_i = blockIdx.x*blockDim.x / N;
int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1;
size_t shared_size = lda * (end_i - start_i);
int i_cur = index / N;
int local_i = i_cur - start_i;
for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) {
int x = start_i*lda + k;
if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8));
}
//if (i_cur < M && (index % N == 0 || threadIdx.x == 0)) {
//for (int k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//*((uint64_t *)(A_s + (local_i*lda + k) / 8)) = *((uint64_t *)(A + (i_cur*lda + k) / 8)); // weights
// }
//}
__syncthreads();
int i, j, k, h;
j = index % N;
{ // out_h*out_w - one channel output size [169 - 173056]
i = index / N;
if (i < M) // l.n - filters [16 - 55 - 1024]
{
float mean_val = mean_arr[i];
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
#include <cstdio>
void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
size_t size = M*N;
const int num_blocks = size / BLOCK + 1;
gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr);
}
*/
// --------------------------------
__inline__ __device__
int warpAllReduceSum(int val) {
for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2)
val += __shfl_xor(val, mask);
return val;
}
// Coalesced memory access
// A (weights) in the shared_memory - GOOD
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias_arr)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint8_t A_s[6144 * 8 / 4];
//__shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`]
//__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`]
int start_i = blockIdx.x*blockDim.x / N;
int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1;
size_t shared_size = lda * (end_i - start_i);
int i_cur = index / N;
int local_i = i_cur - start_i;
for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) {
int x = start_i*lda + k;
if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8));
}
__syncthreads();
int i, j, k, h;
j = index % N;
{ // out_h*out_w - one channel output size [169 - 173056]
i = index / N;
//if (i < M) // l.n - filters [16 - 55 - 1024]
{
int count = 0;
k = 0;
//#ifdef NOT_USED
// 32 thread X 64 bit = 2048 bit
for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t c_bit64;
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 8 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 8 * lane_id;
{
//uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + A_i)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input
c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
//#ifdef NOT_USED
// 32 thread X 32 bit = 1024 bit
for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216]
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 4 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 4 * lane_id;
{
//uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
uint32_t a_bit32 = *((uint32_t *)(A_s + A_i)); // weights
uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
int tmp_count = __popc(c_bit32);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
if (i < M)
{
float mean_val = mean_arr[i];
float bias_val = bias_arr[i];
//#ifdef NOT_USED
for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
//ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights
ulonglong4 a_bit256 = *((ulonglong4 *)(A_s + (local_i*lda + k) / 8)); // weights
ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input
ulonglong4 c_bit256 = xnor_int256(a_bit256, b_bit256);
count += __popcll(c_bit256.w) + __popcll(c_bit256.x) +
__popcll(c_bit256.y) + __popcll(c_bit256.z);
}
//#endif
#ifdef NOT_USED
for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
count += __popcll(c_bit64);
}
#endif
const int bit_step = 256;
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) *mean_val + bias_val;
}
}
}
}
/*
// Coalescing
// B (input) in the shared_memory - GOOD
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias_arr)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint8_t B_s[4096*8]; // 32 KB // [ldb x N`] // max = 262 144 bits
//__shared__ uint64_t B_s[4096]; // 32 KB // [ldb x N`] // max = 262 144 bits
int start_j = blockIdx.x*blockDim.x / M;
int end_j = (blockIdx.x*blockDim.x + blockDim.x) / M + 1;
size_t shared_size = ldb * (end_j - start_j);
int j_cur = index / M;
int local_j = j_cur - start_j;
for (int k = threadIdx.x * 256; k < shared_size; k += blockDim.x * 256) {
int x = start_j*ldb + k;
if (x < (N*ldb)) *((ulonglong4 *)(B_s + k / 8)) = *((ulonglong4 *)(B + x / 8));
}
__syncthreads();
int i, j, k;
i = index % M; // l.n - filters [16 - 55 - 1024]
{
j = index / M; // out_h*out_w - one channel output size [169 - 173056]
if (j < N)
{
int count = 0;
k = 0;
//#ifdef NOT_USED
// 32 thread X 64 bit = 2048 bit
for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t c_bit64;
int64_t A_cur_index = (i*lda + k) / 8;
//int64_t B_cur_index = (j*ldb + k) / 8;
int64_t B_cur_index = (local_j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 8 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 8 * lane_id;
{
uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
//uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input
uint64_t b_bit64 = *((uint64_t *)(B_s + B_i)); // input
c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
//#ifdef NOT_USED
// 32 thread X 32 bit = 1024 bit
for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216]
int64_t A_cur_index = (i*lda + k) / 8;
//int64_t B_cur_index = (j*ldb + k) / 8;
int64_t B_cur_index = (local_j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl(A_cur_index, t) + 4 * lane_id;
const int64_t B_i = __shfl(B_cur_index, t) + 4 * lane_id;
{
uint32_t a_bit32 = *((uint32_t *)(A + A_i)); // weights
//uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input
uint32_t b_bit32 = *((uint32_t *)(B_s + B_i)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
int tmp_count = __popc(c_bit32);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
if (i < M)
{
float mean_val = mean_arr[i];
float bias_val = bias_arr[i];
//#ifdef NOT_USED
for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights
//ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input
ulonglong4 b_bit256 = *((ulonglong4 *)(B_s + (local_j*ldb + k) / 8)); // input
ulonglong4 c_bit256 = xnor_int256(a_bit256, b_bit256);
count += __popcll(c_bit256.w) + __popcll(c_bit256.x) +
__popcll(c_bit256.y) + __popcll(c_bit256.z);
}
//#endif
#ifdef NOT_USED
for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
//uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t b_bit64 = *((uint64_t *)(B_s + (local_j*ldb + k) / 8)); // input
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
count += __popcll(c_bit64);
}
#endif
const int bit_step = 256;
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) * mean_val + bias_val;
}
}
}
}
*/
// Coalesced memory access - GOOD
void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias)
{
size_t size = M*N;
const int num_blocks = size / BLOCK + 1;
/*
printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n",
size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024);
printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512);
*/
//printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda);
gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr, bias);
}
// --------------------------------
// --------------------------------
// --------------------------------
// sequentially - B (input) in the shared_memory - BAD
// --------------------------------
__global__ void gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
//__shared__ float mean_shared[32];
//__shared__ uint32_t B_s[8192]; // 32 KB // [ldb x N`] // max = 262 144 bits
//__shared__ uint32_t B_s[4096]; // 16 KB // [ldb x N`] // max = 131 072 bits
__shared__ uint8_t B_s[4096 * 4]; // 16 KB // [ldb x N`] // max = 131 072 bits
const int K_items = WARP_SIZE;
int start_j = blockIdx.x*blockDim.x / (K_items * M);
{
int end_j = (blockIdx.x*blockDim.x + blockDim.x) / (K_items * M) + 1;
if (end_j > N) end_j = N;
size_t shared_size = ldb * (end_j - start_j);
if (shared_size != 0) {
//if(threadIdx.x == 0) printf(" start_j = %d, end_j = %d, shared_size = %d \n", start_j, end_j, shared_size);
int k;
for (int k = threadIdx.x * 32; k < shared_size; k += blockDim.x * 32) {
int x = start_j*ldb + k;
if (x < (N*ldb)) *((uint32_t *)(B_s + k / 8)) = *((uint32_t *)(B + x / 8));
}
}
}
__syncthreads();
int index = blockIdx.x*blockDim.x + threadIdx.x;
{
int i; // l.n
int j; // out_h*out_w
int k; // l.size * l.size * l.c
const int index2 = index / K_items;
i = index2 % M; // max M
j = index2 / M; // max N
int local_j = j - start_j;
//if (i <= 1 && j <= 1 ) printf(" k = %d, K = %d, K_items = %d, i = %d, j = %d, lda = %d, ldb = %d, ldc = %d \n",
// k, K, K_items, i, j, lda, ldb, ldc);
{ // l.n - filters [16 - 55 - 1024]
// further improvements: for (l.n == 1024) iterate several (j)
if (j < N)
{ // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
const int bit_step = 32;
for (k = (threadIdx.x % WARP_SIZE) * bit_step; k < K; k += bit_step*WARP_SIZE)
{ // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
uint32_t a_bit32 = *((uint32_t *)(A + (i*lda + k) / 8)); // weights
//uint32_t b_bit32 = *((uint32_t *)(B + (j*ldb + k) / 8)); // input
uint32_t b_bit32 = *((uint32_t *)(B_s + (local_j*ldb + k) / 8)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
count += __popc(c_bit32);
}
for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2)
count += __shfl_down(count, offset);
if (threadIdx.x % WARP_SIZE == 0) {
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1;
float mean_val = mean_arr[i];
C[i*ldc + j] = (2 * count - K) * mean_val;
//B_s[threadIdx.x / WARP_SIZE] = (2 * count - K) * mean_val;
}
}
}
}
}
// sequentially - BAD
void gemm_nn_custom_bin_mean_transposed_sequentially_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
//size_t size = M*N;
size_t size = M*N * 32;
const int num_blocks = size / BLOCK + 1;
//printf(" K = %d \n", K);
/*
printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n",
size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024);
printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512);
*/
//printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda);
gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr);
}
// --------------------------------
|
5200ef6e966a802e6ec37d777e54ac2d4dd70e77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_BILINEAR_RESIZE_LAYER_INSTANTIATE
#include "lbann/layers/image/bilinear_resize.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
template <int block_size, typename TensorDataType>
__global__ void fp_kernel(El::Int num_samples,
El::Int num_channels,
El::Int input_height,
El::Int input_width,
const TensorDataType* __restrict__ input,
El::Int input_ldim,
El::Int output_height,
El::Int output_width,
TensorDataType* __restrict__ output,
El::Int output_ldim) {
// Useful constants
const TensorDataType half = 0.5;
const TensorDataType one = 1.;
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
// Stride between interpolation points
const auto& x_stride = TensorDataType(input_width) / TensorDataType(output_width);
const auto& y_stride = TensorDataType(input_height) / TensorDataType(output_height);
const auto& size = (num_samples * num_channels
* output_height * output_width);
for (El::Int pos = gid; pos < size; pos += num_threads) {
// Indices
const auto& sample = pos / (num_channels * output_height * output_width);
const auto& channel = (pos / (output_height * output_width)) % num_channels;
const auto& output_row = (pos / output_width) % output_height;
const auto& output_col = pos % output_width;
// Interpolation point
const auto& x = (TensorDataType(output_col) + half) * x_stride;
const auto& y = (TensorDataType(output_row) + half) * y_stride;
// Find input pixels near interpolation point
const auto input_col = static_cast<El::Int>(gpu_lib::floor(x - half));
const auto& input_col0 = gpu_lib::max(input_col, El::Int(0));
const auto& input_col1 = gpu_lib::min(input_col+1, input_width-1);
const auto input_row = static_cast<El::Int>(gpu_lib::floor(y - half));
const auto& input_row0 = gpu_lib::max(input_row, El::Int(0));
const auto& input_row1 = gpu_lib::min(input_row+1, input_height-1);
// Interpolation point relative to input pixel centers
const auto& unit_x = x - (TensorDataType(input_col) + half);
const auto& unit_y = y - (TensorDataType(input_row) + half);
// Input and output pixels
const auto& pixel00 = input[sample * input_ldim
+ channel * input_height * input_width
+ input_row0 * input_width
+ input_col0];
const auto& pixel01 = input[sample * input_ldim
+ channel * input_height * input_width
+ input_row0 * input_width
+ input_col1];
const auto& pixel10 = input[sample * input_ldim
+ channel * input_height * input_width
+ input_row1 * input_width
+ input_col0];
const auto& pixel11 = input[sample * input_ldim
+ channel * input_height * input_width
+ input_row1 * input_width
+ input_col1];
auto& result = output[sample * output_ldim
+ channel * output_height * output_width
+ output_row * output_width
+ output_col];
// Bilinear interpolation
result = (pixel00 * (one - unit_x) * (one - unit_y)
+ pixel01 * unit_x * (one - unit_y)
+ pixel10 * (one - unit_x) * unit_y
+ pixel11 * unit_x * unit_y);
}
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void bilinear_resize_layer<TensorDataType, Layout, Device>::fp_compute() {
// Matrices
const auto& local_input = this->get_local_prev_activations();
auto& local_output = this->get_local_activations();
// Dimensions
const auto& input_dims = this->get_input_dims();
const auto& num_dims = input_dims.size();
const auto& num_samples = local_input.Width();
const El::Int num_channels = std::accumulate(input_dims.begin(),
input_dims.end()-2,
1,
std::multiplies<int>());
const El::Int input_height = input_dims[num_dims-2];
const El::Int input_width = input_dims[num_dims-1];
// Get GPU grid dimensions
// Note: Maximum CUDA grid dimension is 2^32-1
// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications).
// TODO: HIP/ROCM notes
const El::Int size = local_output.Height() * local_output.Width();
constexpr El::Int block_dim = 256;
El::Int grid_dim = (size + block_dim - 1) / block_dim;
if (sizeof(El::Int) > sizeof(uint32_t)
&& grid_dim > std::numeric_limits<uint32_t>::max()) {
grid_dim = std::numeric_limits<uint32_t>::max();
}
// Launch GPU kernel
if (grid_dim > 0) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input),
gpu::get_sync_info(local_output));
hydrogen::gpu::LaunchKernel(
fp_kernel<block_dim, TensorDataType>,
grid_dim, block_dim, 0, multisync,
num_samples, num_channels,
input_height, input_width,
local_input.LockedBuffer(), local_input.LDim(),
this->m_height, this->m_width,
local_output.Buffer(), local_output.LDim());
}
}
#define PROTO(T) \
template class bilinear_resize_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| 5200ef6e966a802e6ec37d777e54ac2d4dd70e77.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_BILINEAR_RESIZE_LAYER_INSTANTIATE
#include "lbann/layers/image/bilinear_resize.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
template <int block_size, typename TensorDataType>
__global__ void fp_kernel(El::Int num_samples,
El::Int num_channels,
El::Int input_height,
El::Int input_width,
const TensorDataType* __restrict__ input,
El::Int input_ldim,
El::Int output_height,
El::Int output_width,
TensorDataType* __restrict__ output,
El::Int output_ldim) {
// Useful constants
const TensorDataType half = 0.5;
const TensorDataType one = 1.;
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
// Stride between interpolation points
const auto& x_stride = TensorDataType(input_width) / TensorDataType(output_width);
const auto& y_stride = TensorDataType(input_height) / TensorDataType(output_height);
const auto& size = (num_samples * num_channels
* output_height * output_width);
for (El::Int pos = gid; pos < size; pos += num_threads) {
// Indices
const auto& sample = pos / (num_channels * output_height * output_width);
const auto& channel = (pos / (output_height * output_width)) % num_channels;
const auto& output_row = (pos / output_width) % output_height;
const auto& output_col = pos % output_width;
// Interpolation point
const auto& x = (TensorDataType(output_col) + half) * x_stride;
const auto& y = (TensorDataType(output_row) + half) * y_stride;
// Find input pixels near interpolation point
const auto input_col = static_cast<El::Int>(gpu_lib::floor(x - half));
const auto& input_col0 = gpu_lib::max(input_col, El::Int(0));
const auto& input_col1 = gpu_lib::min(input_col+1, input_width-1);
const auto input_row = static_cast<El::Int>(gpu_lib::floor(y - half));
const auto& input_row0 = gpu_lib::max(input_row, El::Int(0));
const auto& input_row1 = gpu_lib::min(input_row+1, input_height-1);
// Interpolation point relative to input pixel centers
const auto& unit_x = x - (TensorDataType(input_col) + half);
const auto& unit_y = y - (TensorDataType(input_row) + half);
// Input and output pixels
const auto& pixel00 = input[sample * input_ldim
+ channel * input_height * input_width
+ input_row0 * input_width
+ input_col0];
const auto& pixel01 = input[sample * input_ldim
+ channel * input_height * input_width
+ input_row0 * input_width
+ input_col1];
const auto& pixel10 = input[sample * input_ldim
+ channel * input_height * input_width
+ input_row1 * input_width
+ input_col0];
const auto& pixel11 = input[sample * input_ldim
+ channel * input_height * input_width
+ input_row1 * input_width
+ input_col1];
auto& result = output[sample * output_ldim
+ channel * output_height * output_width
+ output_row * output_width
+ output_col];
// Bilinear interpolation
result = (pixel00 * (one - unit_x) * (one - unit_y)
+ pixel01 * unit_x * (one - unit_y)
+ pixel10 * (one - unit_x) * unit_y
+ pixel11 * unit_x * unit_y);
}
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void bilinear_resize_layer<TensorDataType, Layout, Device>::fp_compute() {
// Matrices
const auto& local_input = this->get_local_prev_activations();
auto& local_output = this->get_local_activations();
// Dimensions
const auto& input_dims = this->get_input_dims();
const auto& num_dims = input_dims.size();
const auto& num_samples = local_input.Width();
const El::Int num_channels = std::accumulate(input_dims.begin(),
input_dims.end()-2,
1,
std::multiplies<int>());
const El::Int input_height = input_dims[num_dims-2];
const El::Int input_width = input_dims[num_dims-1];
// Get GPU grid dimensions
// Note: Maximum CUDA grid dimension is 2^32-1
// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications).
// TODO: HIP/ROCM notes
const El::Int size = local_output.Height() * local_output.Width();
constexpr El::Int block_dim = 256;
El::Int grid_dim = (size + block_dim - 1) / block_dim;
if (sizeof(El::Int) > sizeof(uint32_t)
&& grid_dim > std::numeric_limits<uint32_t>::max()) {
grid_dim = std::numeric_limits<uint32_t>::max();
}
// Launch GPU kernel
if (grid_dim > 0) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input),
gpu::get_sync_info(local_output));
hydrogen::gpu::LaunchKernel(
fp_kernel<block_dim, TensorDataType>,
grid_dim, block_dim, 0, multisync,
num_samples, num_channels,
input_height, input_width,
local_input.LockedBuffer(), local_input.LDim(),
this->m_height, this->m_width,
local_output.Buffer(), local_output.LDim());
}
}
#define PROTO(T) \
template class bilinear_resize_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
3fa006aed45637c16c5e13f2bd076e4cb92a4b9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_cuda.h>
#include "convolutionFFT2D_common.h"
#include "convolutionFFT2D.cuh"
////////////////////////////////////////////////////////////////////////////////
/// Position convolution kernel center at (0, 0) in the image
////////////////////////////////////////////////////////////////////////////////
extern "C" void padKernel(
float *d_Dst,
float *d_Src,
int fftH,
int fftW,
int kernelH,
int kernelW,
int kernelY,
int kernelX
)
{
assert(d_Src != d_Dst);
dim3 threads(32, 8);
dim3 grid(iDivUp(kernelW, threads.x), iDivUp(kernelH, threads.y));
SET_FLOAT_BASE;
hipLaunchKernelGGL(( padKernel_kernel), dim3(grid), dim3(threads), 0, 0,
d_Dst,
d_Src,
fftH,
fftW,
kernelH,
kernelW,
kernelY,
kernelX
);
getLastCudaError("padKernel_kernel<<<>>> execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Prepare data for "pad to border" addressing mode
////////////////////////////////////////////////////////////////////////////////
extern "C" void padDataClampToBorder(
float *d_Dst,
float *d_Src,
int fftH,
int fftW,
int dataH,
int dataW,
int kernelW,
int kernelH,
int kernelY,
int kernelX
)
{
assert(d_Src != d_Dst);
//dim3 threads(32, 8);
//dim3 threads(16, 8);
//dim3 threads(8, 8);
dim3 threads(64, 8);
dim3 grid(iDivUp(fftW, threads.x), iDivUp(fftH, threads.y));
SET_FLOAT_BASE;
hipLaunchKernelGGL(( padDataClampToBorder_kernel), dim3(grid), dim3(threads), 0, 0,
d_Dst,
d_Src,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX
);
getLastCudaError("padDataClampToBorder_kernel<<<>>> execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Modulate Fourier image of padded data by Fourier image of padded kernel
// and normalize by FFT size
////////////////////////////////////////////////////////////////////////////////
extern "C" void modulateAndNormalize(
fComplex *d_Dst,
fComplex *d_Src,
int fftH,
int fftW,
int padding
)
{
assert(fftW % 2 == 0);
const int dataSize = fftH * (fftW / 2 + padding);
hipLaunchKernelGGL(( modulateAndNormalize_kernel), dim3(iDivUp(dataSize, 256)), dim3(256), 0, 0,
d_Dst,
d_Src,
dataSize,
1.0f / (float)(fftW *fftH)
);
getLastCudaError("modulateAndNormalize() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// 2D R2C / C2R post/preprocessing kernels
////////////////////////////////////////////////////////////////////////////////
static const double PI = 3.1415926535897932384626433832795;
static const uint BLOCKDIM = 1024;
//static const uint BLOCKDIM = 512;
//static const uint BLOCKDIM = 256;
//static const uint BLOCKDIM = 128;
//static const uint BLOCKDIM = 64;
//static const uint BLOCKDIM = 32;
//static const uint BLOCKDIM = 16;
extern "C" void spPostprocess2D(
void *d_Dst,
void *d_Src,
uint DY,
uint DX,
uint padding,
int dir
)
{
assert(d_Src != d_Dst);
assert(DX % 2 == 0);
#if(POWER_OF_TWO)
uint log2DX, log2DY;
uint factorizationRemX = factorRadix2(log2DX, DX);
uint factorizationRemY = factorRadix2(log2DY, DY);
assert(factorizationRemX == 1 && factorizationRemY == 1);
#endif
const uint threadCount = DY * (DX / 2);
const double phaseBase = dir * PI / (double)DX;
SET_FCOMPLEX_BASE;
hipLaunchKernelGGL(( spPostprocess2D_kernel), dim3(iDivUp(threadCount, BLOCKDIM)), dim3(BLOCKDIM), 0, 0,
(fComplex *)d_Dst,
(fComplex *)d_Src,
DY, DX, threadCount, padding,
(float)phaseBase
);
getLastCudaError("spPostprocess2D_kernel<<<>>> execution failed\n");
}
extern "C" void spPreprocess2D(
void *d_Dst,
void *d_Src,
uint DY,
uint DX,
uint padding,
int dir
)
{
assert(d_Src != d_Dst);
assert(DX % 2 == 0);
#if(POWER_OF_TWO)
uint log2DX, log2DY;
uint factorizationRemX = factorRadix2(log2DX, DX);
uint factorizationRemY = factorRadix2(log2DY, DY);
assert(factorizationRemX == 1 && factorizationRemY == 1);
#endif
const uint threadCount = DY * (DX / 2);
const double phaseBase = -dir * PI / (double)DX;
SET_FCOMPLEX_BASE;
hipLaunchKernelGGL(( spPreprocess2D_kernel), dim3(iDivUp(threadCount, BLOCKDIM)), dim3(BLOCKDIM), 0, 0,
(fComplex *)d_Dst,
(fComplex *)d_Src,
DY, DX, threadCount, padding,
(float)phaseBase
);
getLastCudaError("spPreprocess2D_kernel<<<>>> execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Combined spPostprocess2D + modulateAndNormalize + spPreprocess2D
////////////////////////////////////////////////////////////////////////////////
extern "C" void spProcess2D(
void *d_Dst,
void *d_SrcA,
void *d_SrcB,
uint DY,
uint DX,
int dir
)
{
assert(DY % 2 == 0);
#if(POWER_OF_TWO)
uint log2DX, log2DY;
uint factorizationRemX = factorRadix2(log2DX, DX);
uint factorizationRemY = factorRadix2(log2DY, DY);
assert(factorizationRemX == 1 && factorizationRemY == 1);
#endif
const uint threadCount = (DY / 2) * DX;
const double phaseBase = dir * PI / (double)DX;
SET_FCOMPLEX_BASE_A;
SET_FCOMPLEX_BASE_B;
hipLaunchKernelGGL(( spProcess2D_kernel), dim3(iDivUp(threadCount, BLOCKDIM)), dim3(BLOCKDIM), 0, 0,
(fComplex *)d_Dst,
(fComplex *)d_SrcA,
(fComplex *)d_SrcB,
DY, DX, threadCount,
(float)phaseBase,
0.5f / (float)(DY *DX)
);
getLastCudaError("spProcess2D_kernel<<<>>> execution failed\n");
}
| 3fa006aed45637c16c5e13f2bd076e4cb92a4b9d.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_cuda.h>
#include "convolutionFFT2D_common.h"
#include "convolutionFFT2D.cuh"
////////////////////////////////////////////////////////////////////////////////
/// Position convolution kernel center at (0, 0) in the image
////////////////////////////////////////////////////////////////////////////////
extern "C" void padKernel(
float *d_Dst,
float *d_Src,
int fftH,
int fftW,
int kernelH,
int kernelW,
int kernelY,
int kernelX
)
{
assert(d_Src != d_Dst);
dim3 threads(32, 8);
dim3 grid(iDivUp(kernelW, threads.x), iDivUp(kernelH, threads.y));
SET_FLOAT_BASE;
padKernel_kernel<<<grid, threads>>>(
d_Dst,
d_Src,
fftH,
fftW,
kernelH,
kernelW,
kernelY,
kernelX
);
getLastCudaError("padKernel_kernel<<<>>> execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Prepare data for "pad to border" addressing mode
////////////////////////////////////////////////////////////////////////////////
extern "C" void padDataClampToBorder(
float *d_Dst,
float *d_Src,
int fftH,
int fftW,
int dataH,
int dataW,
int kernelW,
int kernelH,
int kernelY,
int kernelX
)
{
assert(d_Src != d_Dst);
//dim3 threads(32, 8);
//dim3 threads(16, 8);
//dim3 threads(8, 8);
dim3 threads(64, 8);
dim3 grid(iDivUp(fftW, threads.x), iDivUp(fftH, threads.y));
SET_FLOAT_BASE;
padDataClampToBorder_kernel<<<grid, threads>>>(
d_Dst,
d_Src,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX
);
getLastCudaError("padDataClampToBorder_kernel<<<>>> execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Modulate Fourier image of padded data by Fourier image of padded kernel
// and normalize by FFT size
////////////////////////////////////////////////////////////////////////////////
extern "C" void modulateAndNormalize(
fComplex *d_Dst,
fComplex *d_Src,
int fftH,
int fftW,
int padding
)
{
assert(fftW % 2 == 0);
const int dataSize = fftH * (fftW / 2 + padding);
modulateAndNormalize_kernel<<<iDivUp(dataSize, 256), 256>>>(
d_Dst,
d_Src,
dataSize,
1.0f / (float)(fftW *fftH)
);
getLastCudaError("modulateAndNormalize() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// 2D R2C / C2R post/preprocessing kernels
////////////////////////////////////////////////////////////////////////////////
static const double PI = 3.1415926535897932384626433832795;
static const uint BLOCKDIM = 1024;
//static const uint BLOCKDIM = 512;
//static const uint BLOCKDIM = 256;
//static const uint BLOCKDIM = 128;
//static const uint BLOCKDIM = 64;
//static const uint BLOCKDIM = 32;
//static const uint BLOCKDIM = 16;
extern "C" void spPostprocess2D(
void *d_Dst,
void *d_Src,
uint DY,
uint DX,
uint padding,
int dir
)
{
assert(d_Src != d_Dst);
assert(DX % 2 == 0);
#if(POWER_OF_TWO)
uint log2DX, log2DY;
uint factorizationRemX = factorRadix2(log2DX, DX);
uint factorizationRemY = factorRadix2(log2DY, DY);
assert(factorizationRemX == 1 && factorizationRemY == 1);
#endif
const uint threadCount = DY * (DX / 2);
const double phaseBase = dir * PI / (double)DX;
SET_FCOMPLEX_BASE;
spPostprocess2D_kernel<<<iDivUp(threadCount, BLOCKDIM), BLOCKDIM>>>(
(fComplex *)d_Dst,
(fComplex *)d_Src,
DY, DX, threadCount, padding,
(float)phaseBase
);
getLastCudaError("spPostprocess2D_kernel<<<>>> execution failed\n");
}
extern "C" void spPreprocess2D(
void *d_Dst,
void *d_Src,
uint DY,
uint DX,
uint padding,
int dir
)
{
assert(d_Src != d_Dst);
assert(DX % 2 == 0);
#if(POWER_OF_TWO)
uint log2DX, log2DY;
uint factorizationRemX = factorRadix2(log2DX, DX);
uint factorizationRemY = factorRadix2(log2DY, DY);
assert(factorizationRemX == 1 && factorizationRemY == 1);
#endif
const uint threadCount = DY * (DX / 2);
const double phaseBase = -dir * PI / (double)DX;
SET_FCOMPLEX_BASE;
spPreprocess2D_kernel<<<iDivUp(threadCount, BLOCKDIM), BLOCKDIM>>>(
(fComplex *)d_Dst,
(fComplex *)d_Src,
DY, DX, threadCount, padding,
(float)phaseBase
);
getLastCudaError("spPreprocess2D_kernel<<<>>> execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Combined spPostprocess2D + modulateAndNormalize + spPreprocess2D
////////////////////////////////////////////////////////////////////////////////
extern "C" void spProcess2D(
void *d_Dst,
void *d_SrcA,
void *d_SrcB,
uint DY,
uint DX,
int dir
)
{
assert(DY % 2 == 0);
#if(POWER_OF_TWO)
uint log2DX, log2DY;
uint factorizationRemX = factorRadix2(log2DX, DX);
uint factorizationRemY = factorRadix2(log2DY, DY);
assert(factorizationRemX == 1 && factorizationRemY == 1);
#endif
const uint threadCount = (DY / 2) * DX;
const double phaseBase = dir * PI / (double)DX;
SET_FCOMPLEX_BASE_A;
SET_FCOMPLEX_BASE_B;
spProcess2D_kernel<<<iDivUp(threadCount, BLOCKDIM), BLOCKDIM>>>(
(fComplex *)d_Dst,
(fComplex *)d_SrcA,
(fComplex *)d_SrcB,
DY, DX, threadCount,
(float)phaseBase,
0.5f / (float)(DY *DX)
);
getLastCudaError("spProcess2D_kernel<<<>>> execution failed\n");
}
|
263615147941ed8be69403050c024a239ff8bc8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
/*
* This example implements a 2D stencil computation, spreading the computation
* across multiple GPUs. This requires communicating halo regions between GPUs
* on every iteration of the stencil as well as managing multiple GPUs from a
* single host application. Here, kernels and transfers are issued in
* breadth-first order to each CUDA stream. Each CUDA stream is associated with
* a single CUDA device.
*/
#define a0 -3.0124472f
#define a1 1.7383092f
#define a2 -0.2796695f
#define a3 0.0547837f
#define a4 -0.0073118f
// cnst for gpu
#define BDIMX 32
#define NPAD 4
#define NPAD2 8
// constant memories for 8 order FD coefficients
__device__ __constant__ float coef[5];
// set up fd coefficients
void setup_coef (void)
{
const float h_coef[] = {a0, a1, a2, a3, a4};
CHECK( hipMemcpyToSymbol( coef, h_coef, 5 * sizeof(float) ));
}
void saveSnapshotIstep(
int istep,
int nx,
int ny,
int ngpus,
float **g_u2)
{
float *iwave = (float *)malloc(nx * ny * sizeof(float));
if (ngpus > 1)
{
unsigned int skiptop = nx * 4;
unsigned int gsize = nx * ny / 2;
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
int iskip = (i == 0 ? 0 : skiptop);
int ioff = (i == 0 ? 0 : gsize);
CHECK(hipMemcpy(iwave + ioff, g_u2[i] + iskip,
gsize * sizeof(float), hipMemcpyDeviceToHost));
}
}
else
{
unsigned int isize = nx * ny;
CHECK(hipMemcpy (iwave, g_u2[0], isize * sizeof(float),
hipMemcpyDeviceToHost));
}
char fname[20];
sprintf(fname, "snap_at_step_%d", istep);
FILE *fp_snap = fopen(fname, "w");
fwrite(iwave, sizeof(float), nx * ny, fp_snap);
printf("%s: nx = %d ny = %d istep = %d\n", fname, nx, ny, istep);
fflush(stdout);
fclose(fp_snap);
free(iwave);
return;
}
inline bool isCapableP2P(int ngpus)
{
hipDeviceProp_t prop[ngpus];
int iCount = 0;
for (int i = 0; i < ngpus; i++)
{
CHECK(hipGetDeviceProperties(&prop[i], i));
if (prop[i].major >= 2) iCount++;
printf("> GPU%d: %s %s capable of Peer-to-Peer access\n", i,
prop[i].name, (prop[i].major >= 2 ? "is" : "not"));
fflush(stdout);
}
if(iCount != ngpus)
{
printf("> no enough device to run this application\n");
fflush(stdout);
}
return (iCount == ngpus);
}
/*
* enable P2P memcopies between GPUs (all GPUs must be compute capability 2.0 or
* later (Fermi or later))
*/
inline void enableP2P (int ngpus)
{
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
for (int j = 0; j < ngpus; j++)
{
if (i == j) continue;
int peer_access_available = 0;
CHECK(hipDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available) CHECK(hipDeviceEnablePeerAccess(j, 0));
}
}
}
inline bool isUnifiedAddressing (int ngpus)
{
hipDeviceProp_t prop[ngpus];
for (int i = 0; i < ngpus; i++)
{
CHECK(hipGetDeviceProperties(&prop[i], i));
}
const bool iuva = (prop[0].unifiedAddressing && prop[1].unifiedAddressing);
printf("> GPU%d: %s %s unified addressing\n", 0, prop[0].name,
(prop[0].unifiedAddressing ? "support" : "not support"));
printf("> GPU%d: %s %s unified addressing\n", 1, prop[1].name,
(prop[1].unifiedAddressing ? "support" : "not support"));
fflush(stdout);
return iuva;
}
inline void calcIndex(int *haloStart, int *haloEnd, int *bodyStart,
int *bodyEnd, const int ngpus, const int iny)
{
// for halo
for (int i = 0; i < ngpus; i++)
{
if (i == 0 && ngpus == 2)
{
haloStart[i] = iny - NPAD2;
haloEnd[i] = iny - NPAD;
}
else
{
haloStart[i] = NPAD;
haloEnd[i] = NPAD2;
}
}
// for body
for (int i = 0; i < ngpus; i++)
{
if (i == 0 && ngpus == 2)
{
bodyStart[i] = NPAD;
bodyEnd[i] = iny - NPAD2;
}
else
{
bodyStart[i] = NPAD + NPAD;
bodyEnd[i] = iny - NPAD;
}
}
}
inline void calcSkips(int *src_skip, int *dst_skip, const int nx,
const int iny)
{
src_skip[0] = nx * (iny - NPAD2);
dst_skip[0] = 0;
src_skip[1] = NPAD * nx;
dst_skip[1] = (iny - NPAD) * nx;
}
// wavelet
__global__ void kernel_add_wavelet ( float *g_u2, float wavelets, const int nx,
const int ny, const int ngpus)
{
// global grid idx for (x,y) plane
int ipos = (ngpus == 2 ? ny - 10 : ny / 2 - 10);
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idx = ipos * nx + ix;
if(ix == nx / 2) g_u2[idx] += wavelets;
}
// fd kernel function
__global__ void kernel_2dfd_last(float *g_u1, float *g_u2, const int nx,
const int iStart, const int iEnd)
{
// global to slice : global grid idx for (x,y) plane
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
// smem idx for current point
unsigned int stx = threadIdx.x + NPAD;
unsigned int idx = ix + iStart * nx;
// shared memory for u2 with size [4+16+4][4+16+4]
__shared__ float tile[BDIMX + NPAD2];
const float alpha = 0.12f;
// register for y value
float yval[9];
for (int i = 0; i < 8; i++) yval[i] = g_u2[idx + (i - 4) * nx];
// to be used in z loop
int iskip = NPAD * nx;
#pragma unroll 9
for (int iy = iStart; iy < iEnd; iy++)
{
// get front3 here
yval[8] = g_u2[idx + iskip];
if(threadIdx.x < NPAD)
{
tile[threadIdx.x] = g_u2[idx - NPAD];
tile[stx + BDIMX] = g_u2[idx + BDIMX];
}
tile[stx] = yval[4];
__syncthreads();
if ( (ix >= NPAD) && (ix < nx - NPAD) )
{
// 8rd fd operator
float tmp = coef[0] * tile[stx] * 2.0f;
#pragma unroll
for(int d = 1; d <= 4; d++)
{
tmp += coef[d] * (tile[stx - d] + tile[stx + d]);
}
#pragma unroll
for(int d = 1; d <= 4; d++)
{
tmp += coef[d] * (yval[4 - d] + yval[4 + d]);
}
// time dimension
g_u1[idx] = yval[4] + yval[4] - g_u1[idx] + alpha * tmp;
}
#pragma unroll 8
for (int i = 0; i < 8 ; i++)
{
yval[i] = yval[i + 1];
}
// advancd on global idx
idx += nx;
__syncthreads();
}
}
__global__ void kernel_2dfd(float *g_u1, float *g_u2, const int nx,
const int iStart, const int iEnd)
{
// global to line index
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
// smem idx for current point
unsigned int stx = threadIdx.x + NPAD;
unsigned int idx = ix + iStart * nx;
// shared memory for x dimension
__shared__ float line[BDIMX + NPAD2];
// a coefficient related to physical properties
const float alpha = 0.12f;
// register for y value
float yval[9];
for (int i = 0; i < 8; i++) yval[i] = g_u2[idx + (i - 4) * nx];
// skip for the bottom most y value
int iskip = NPAD * nx;
#pragma unroll 9
for (int iy = iStart; iy < iEnd; iy++)
{
// get yval[8] here
yval[8] = g_u2[idx + iskip];
// read halo part
if(threadIdx.x < NPAD)
{
line[threadIdx.x] = g_u2[idx - NPAD];
line[stx + BDIMX] = g_u2[idx + BDIMX];
}
line[stx] = yval[4];
__syncthreads();
// 8rd fd operator
if ( (ix >= NPAD) && (ix < nx - NPAD) )
{
// center point
float tmp = coef[0] * line[stx] * 2.0f;
#pragma unroll
for(int d = 1; d <= 4; d++)
{
tmp += coef[d] * ( line[stx - d] + line[stx + d]);
}
#pragma unroll
for(int d = 1; d <= 4; d++)
{
tmp += coef[d] * (yval[4 - d] + yval[4 + d]);
}
// time dimension
g_u1[idx] = yval[4] + yval[4] - g_u1[idx] + alpha * tmp;
}
#pragma unroll 8
for (int i = 0; i < 8 ; i++)
{
yval[i] = yval[i + 1];
}
// advancd on global idx
idx += nx;
__syncthreads();
}
}
int main( int argc, char *argv[] )
{
int ngpus;
// check device count
CHECK(hipGetDeviceCount(&ngpus));
printf("> CUDA-capable device count: %i\n", ngpus);
// check p2p capability
isCapableP2P(ngpus);
isUnifiedAddressing(ngpus);
// get it from command line
if (argc > 1)
{
if (atoi(argv[1]) > ngpus)
{
fprintf(stderr, "Invalid number of GPUs specified: %d is greater "
"than the total number of GPUs in this platform (%d)\n",
atoi(argv[1]), ngpus);
exit(1);
}
ngpus = atoi(argv[1]);
}
int iMovie = 10000;
if(argc >= 3) iMovie = atoi(argv[2]);
printf("> run with device: %i\n", ngpus);
// size
const int nsteps = 600;
const int nx = 512;
const int ny = 512;
const int iny = ny / ngpus + NPAD * (ngpus - 1);
size_t isize = nx * iny;
size_t ibyte = isize * sizeof(float);
size_t iexchange = NPAD * nx * sizeof(float);
// set up gpu card
float *d_u2[ngpus], *d_u1[ngpus];
for(int i = 0; i < ngpus; i++)
{
// set device
CHECK(hipSetDevice(i));
// allocate device memories
CHECK(hipMalloc ((void **) &d_u1[i], ibyte));
CHECK(hipMalloc ((void **) &d_u2[i], ibyte));
CHECK(hipMemset (d_u1[i], 0, ibyte));
CHECK(hipMemset (d_u2[i], 0, ibyte));
printf("GPU %i: allocated %.2f MB gmem\n", i,
(4.f * ibyte) / (1024.f * 1024.f) );
setup_coef ();
}
// stream definition
hipStream_t stream_halo[ngpus], stream_body[ngpus];
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
CHECK(hipStreamCreate( &stream_halo[i] ));
CHECK(hipStreamCreate( &stream_body[i] ));
}
// calculate index for computation
int haloStart[ngpus], bodyStart[ngpus], haloEnd[ngpus], bodyEnd[ngpus];
calcIndex(haloStart, haloEnd, bodyStart, bodyEnd, ngpus, iny);
int src_skip[ngpus], dst_skip[ngpus];
if(ngpus > 1) calcSkips(src_skip, dst_skip, nx, iny);
// kernel launch configuration
dim3 block(BDIMX);
dim3 grid(nx / block.x);
// set up event for timing
CHECK(hipSetDevice(0));
hipEvent_t start, stop;
CHECK (hipEventCreate(&start));
CHECK (hipEventCreate(&stop ));
CHECK(hipEventRecord( start, 0 ));
// main loop for wave propagation
for(int istep = 0; istep < nsteps; istep++)
{
// save snap image
if(iMovie == istep) saveSnapshotIstep(istep, nx, ny, ngpus, d_u2);
// add wavelet only onto gpu0
if (istep == 0)
{
CHECK(hipSetDevice(0));
hipLaunchKernelGGL(( kernel_add_wavelet), dim3(grid), dim3(block), 0, 0, d_u2[0], 20.0, nx, iny, ngpus);
}
// halo part
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
// compute halo
hipLaunchKernelGGL(( kernel_2dfd), dim3(grid), dim3(block), 0, stream_halo[i], d_u1[i], d_u2[i],
nx, haloStart[i], haloEnd[i]);
// compute internal
hipLaunchKernelGGL(( kernel_2dfd), dim3(grid), dim3(block), 0, stream_body[i], d_u1[i], d_u2[i],
nx, bodyStart[i], bodyEnd[i]);
}
// exchange halo
if (ngpus > 1)
{
CHECK(hipMemcpyAsync(d_u1[1] + dst_skip[0], d_u1[0] + src_skip[0],
iexchange, hipMemcpyDefault, stream_halo[0]));
CHECK(hipMemcpyAsync(d_u1[0] + dst_skip[1], d_u1[1] + src_skip[1],
iexchange, hipMemcpyDefault, stream_halo[1]));
}
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
CHECK(hipDeviceSynchronize());
float *tmpu0 = d_u1[i];
d_u1[i] = d_u2[i];
d_u2[i] = tmpu0;
}
}
CHECK(hipSetDevice( 0 ));
CHECK(hipEventRecord( stop, 0 ));
CHECK(hipDeviceSynchronize());
CHECK (hipGetLastError());
float elapsed_time_ms = 0.0f;
CHECK (hipEventElapsedTime( &elapsed_time_ms, start, stop ));
elapsed_time_ms /= nsteps;
printf("gputime: %8.2fms ", elapsed_time_ms);
printf("performance: %8.2f MCells/s\n",
(double) nx * ny / (elapsed_time_ms * 1e3f) );
fflush(stdout);
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
// clear
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
CHECK (hipStreamDestroy( stream_halo[i] ));
CHECK (hipStreamDestroy( stream_body[i] ));
CHECK (hipFree (d_u1[i]));
CHECK (hipFree (d_u2[i]));
CHECK(hipDeviceReset());
}
exit(EXIT_SUCCESS);
}
| 263615147941ed8be69403050c024a239ff8bc8e.cu | #include "../common/common.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
/*
* This example implements a 2D stencil computation, spreading the computation
* across multiple GPUs. This requires communicating halo regions between GPUs
* on every iteration of the stencil as well as managing multiple GPUs from a
* single host application. Here, kernels and transfers are issued in
* breadth-first order to each CUDA stream. Each CUDA stream is associated with
* a single CUDA device.
*/
#define a0 -3.0124472f
#define a1 1.7383092f
#define a2 -0.2796695f
#define a3 0.0547837f
#define a4 -0.0073118f
// cnst for gpu
#define BDIMX 32
#define NPAD 4
#define NPAD2 8
// constant memories for 8 order FD coefficients
__device__ __constant__ float coef[5];
// set up fd coefficients
void setup_coef (void)
{
const float h_coef[] = {a0, a1, a2, a3, a4};
CHECK( cudaMemcpyToSymbol( coef, h_coef, 5 * sizeof(float) ));
}
void saveSnapshotIstep(
int istep,
int nx,
int ny,
int ngpus,
float **g_u2)
{
float *iwave = (float *)malloc(nx * ny * sizeof(float));
if (ngpus > 1)
{
unsigned int skiptop = nx * 4;
unsigned int gsize = nx * ny / 2;
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
int iskip = (i == 0 ? 0 : skiptop);
int ioff = (i == 0 ? 0 : gsize);
CHECK(cudaMemcpy(iwave + ioff, g_u2[i] + iskip,
gsize * sizeof(float), cudaMemcpyDeviceToHost));
}
}
else
{
unsigned int isize = nx * ny;
CHECK(cudaMemcpy (iwave, g_u2[0], isize * sizeof(float),
cudaMemcpyDeviceToHost));
}
char fname[20];
sprintf(fname, "snap_at_step_%d", istep);
FILE *fp_snap = fopen(fname, "w");
fwrite(iwave, sizeof(float), nx * ny, fp_snap);
printf("%s: nx = %d ny = %d istep = %d\n", fname, nx, ny, istep);
fflush(stdout);
fclose(fp_snap);
free(iwave);
return;
}
inline bool isCapableP2P(int ngpus)
{
cudaDeviceProp prop[ngpus];
int iCount = 0;
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaGetDeviceProperties(&prop[i], i));
if (prop[i].major >= 2) iCount++;
printf("> GPU%d: %s %s capable of Peer-to-Peer access\n", i,
prop[i].name, (prop[i].major >= 2 ? "is" : "not"));
fflush(stdout);
}
if(iCount != ngpus)
{
printf("> no enough device to run this application\n");
fflush(stdout);
}
return (iCount == ngpus);
}
/*
* enable P2P memcopies between GPUs (all GPUs must be compute capability 2.0 or
* later (Fermi or later))
*/
inline void enableP2P (int ngpus)
{
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
for (int j = 0; j < ngpus; j++)
{
if (i == j) continue;
int peer_access_available = 0;
CHECK(cudaDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available) CHECK(cudaDeviceEnablePeerAccess(j, 0));
}
}
}
inline bool isUnifiedAddressing (int ngpus)
{
cudaDeviceProp prop[ngpus];
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaGetDeviceProperties(&prop[i], i));
}
const bool iuva = (prop[0].unifiedAddressing && prop[1].unifiedAddressing);
printf("> GPU%d: %s %s unified addressing\n", 0, prop[0].name,
(prop[0].unifiedAddressing ? "support" : "not support"));
printf("> GPU%d: %s %s unified addressing\n", 1, prop[1].name,
(prop[1].unifiedAddressing ? "support" : "not support"));
fflush(stdout);
return iuva;
}
inline void calcIndex(int *haloStart, int *haloEnd, int *bodyStart,
int *bodyEnd, const int ngpus, const int iny)
{
// for halo
for (int i = 0; i < ngpus; i++)
{
if (i == 0 && ngpus == 2)
{
haloStart[i] = iny - NPAD2;
haloEnd[i] = iny - NPAD;
}
else
{
haloStart[i] = NPAD;
haloEnd[i] = NPAD2;
}
}
// for body
for (int i = 0; i < ngpus; i++)
{
if (i == 0 && ngpus == 2)
{
bodyStart[i] = NPAD;
bodyEnd[i] = iny - NPAD2;
}
else
{
bodyStart[i] = NPAD + NPAD;
bodyEnd[i] = iny - NPAD;
}
}
}
inline void calcSkips(int *src_skip, int *dst_skip, const int nx,
const int iny)
{
src_skip[0] = nx * (iny - NPAD2);
dst_skip[0] = 0;
src_skip[1] = NPAD * nx;
dst_skip[1] = (iny - NPAD) * nx;
}
// wavelet
__global__ void kernel_add_wavelet ( float *g_u2, float wavelets, const int nx,
const int ny, const int ngpus)
{
// global grid idx for (x,y) plane
int ipos = (ngpus == 2 ? ny - 10 : ny / 2 - 10);
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idx = ipos * nx + ix;
if(ix == nx / 2) g_u2[idx] += wavelets;
}
// fd kernel function
__global__ void kernel_2dfd_last(float *g_u1, float *g_u2, const int nx,
const int iStart, const int iEnd)
{
// global to slice : global grid idx for (x,y) plane
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
// smem idx for current point
unsigned int stx = threadIdx.x + NPAD;
unsigned int idx = ix + iStart * nx;
// shared memory for u2 with size [4+16+4][4+16+4]
__shared__ float tile[BDIMX + NPAD2];
const float alpha = 0.12f;
// register for y value
float yval[9];
for (int i = 0; i < 8; i++) yval[i] = g_u2[idx + (i - 4) * nx];
// to be used in z loop
int iskip = NPAD * nx;
#pragma unroll 9
for (int iy = iStart; iy < iEnd; iy++)
{
// get front3 here
yval[8] = g_u2[idx + iskip];
if(threadIdx.x < NPAD)
{
tile[threadIdx.x] = g_u2[idx - NPAD];
tile[stx + BDIMX] = g_u2[idx + BDIMX];
}
tile[stx] = yval[4];
__syncthreads();
if ( (ix >= NPAD) && (ix < nx - NPAD) )
{
// 8rd fd operator
float tmp = coef[0] * tile[stx] * 2.0f;
#pragma unroll
for(int d = 1; d <= 4; d++)
{
tmp += coef[d] * (tile[stx - d] + tile[stx + d]);
}
#pragma unroll
for(int d = 1; d <= 4; d++)
{
tmp += coef[d] * (yval[4 - d] + yval[4 + d]);
}
// time dimension
g_u1[idx] = yval[4] + yval[4] - g_u1[idx] + alpha * tmp;
}
#pragma unroll 8
for (int i = 0; i < 8 ; i++)
{
yval[i] = yval[i + 1];
}
// advancd on global idx
idx += nx;
__syncthreads();
}
}
__global__ void kernel_2dfd(float *g_u1, float *g_u2, const int nx,
const int iStart, const int iEnd)
{
// global to line index
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
// smem idx for current point
unsigned int stx = threadIdx.x + NPAD;
unsigned int idx = ix + iStart * nx;
// shared memory for x dimension
__shared__ float line[BDIMX + NPAD2];
// a coefficient related to physical properties
const float alpha = 0.12f;
// register for y value
float yval[9];
for (int i = 0; i < 8; i++) yval[i] = g_u2[idx + (i - 4) * nx];
// skip for the bottom most y value
int iskip = NPAD * nx;
#pragma unroll 9
for (int iy = iStart; iy < iEnd; iy++)
{
// get yval[8] here
yval[8] = g_u2[idx + iskip];
// read halo part
if(threadIdx.x < NPAD)
{
line[threadIdx.x] = g_u2[idx - NPAD];
line[stx + BDIMX] = g_u2[idx + BDIMX];
}
line[stx] = yval[4];
__syncthreads();
// 8rd fd operator
if ( (ix >= NPAD) && (ix < nx - NPAD) )
{
// center point
float tmp = coef[0] * line[stx] * 2.0f;
#pragma unroll
for(int d = 1; d <= 4; d++)
{
tmp += coef[d] * ( line[stx - d] + line[stx + d]);
}
#pragma unroll
for(int d = 1; d <= 4; d++)
{
tmp += coef[d] * (yval[4 - d] + yval[4 + d]);
}
// time dimension
g_u1[idx] = yval[4] + yval[4] - g_u1[idx] + alpha * tmp;
}
#pragma unroll 8
for (int i = 0; i < 8 ; i++)
{
yval[i] = yval[i + 1];
}
// advancd on global idx
idx += nx;
__syncthreads();
}
}
int main( int argc, char *argv[] )
{
int ngpus;
// check device count
CHECK(cudaGetDeviceCount(&ngpus));
printf("> CUDA-capable device count: %i\n", ngpus);
// check p2p capability
isCapableP2P(ngpus);
isUnifiedAddressing(ngpus);
// get it from command line
if (argc > 1)
{
if (atoi(argv[1]) > ngpus)
{
fprintf(stderr, "Invalid number of GPUs specified: %d is greater "
"than the total number of GPUs in this platform (%d)\n",
atoi(argv[1]), ngpus);
exit(1);
}
ngpus = atoi(argv[1]);
}
int iMovie = 10000;
if(argc >= 3) iMovie = atoi(argv[2]);
printf("> run with device: %i\n", ngpus);
// size
const int nsteps = 600;
const int nx = 512;
const int ny = 512;
const int iny = ny / ngpus + NPAD * (ngpus - 1);
size_t isize = nx * iny;
size_t ibyte = isize * sizeof(float);
size_t iexchange = NPAD * nx * sizeof(float);
// set up gpu card
float *d_u2[ngpus], *d_u1[ngpus];
for(int i = 0; i < ngpus; i++)
{
// set device
CHECK(cudaSetDevice(i));
// allocate device memories
CHECK(cudaMalloc ((void **) &d_u1[i], ibyte));
CHECK(cudaMalloc ((void **) &d_u2[i], ibyte));
CHECK(cudaMemset (d_u1[i], 0, ibyte));
CHECK(cudaMemset (d_u2[i], 0, ibyte));
printf("GPU %i: allocated %.2f MB gmem\n", i,
(4.f * ibyte) / (1024.f * 1024.f) );
setup_coef ();
}
// stream definition
cudaStream_t stream_halo[ngpus], stream_body[ngpus];
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
CHECK(cudaStreamCreate( &stream_halo[i] ));
CHECK(cudaStreamCreate( &stream_body[i] ));
}
// calculate index for computation
int haloStart[ngpus], bodyStart[ngpus], haloEnd[ngpus], bodyEnd[ngpus];
calcIndex(haloStart, haloEnd, bodyStart, bodyEnd, ngpus, iny);
int src_skip[ngpus], dst_skip[ngpus];
if(ngpus > 1) calcSkips(src_skip, dst_skip, nx, iny);
// kernel launch configuration
dim3 block(BDIMX);
dim3 grid(nx / block.x);
// set up event for timing
CHECK(cudaSetDevice(0));
cudaEvent_t start, stop;
CHECK (cudaEventCreate(&start));
CHECK (cudaEventCreate(&stop ));
CHECK(cudaEventRecord( start, 0 ));
// main loop for wave propagation
for(int istep = 0; istep < nsteps; istep++)
{
// save snap image
if(iMovie == istep) saveSnapshotIstep(istep, nx, ny, ngpus, d_u2);
// add wavelet only onto gpu0
if (istep == 0)
{
CHECK(cudaSetDevice(0));
kernel_add_wavelet<<<grid, block>>>(d_u2[0], 20.0, nx, iny, ngpus);
}
// halo part
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
// compute halo
kernel_2dfd<<<grid, block, 0, stream_halo[i]>>>(d_u1[i], d_u2[i],
nx, haloStart[i], haloEnd[i]);
// compute internal
kernel_2dfd<<<grid, block, 0, stream_body[i]>>>(d_u1[i], d_u2[i],
nx, bodyStart[i], bodyEnd[i]);
}
// exchange halo
if (ngpus > 1)
{
CHECK(cudaMemcpyAsync(d_u1[1] + dst_skip[0], d_u1[0] + src_skip[0],
iexchange, cudaMemcpyDefault, stream_halo[0]));
CHECK(cudaMemcpyAsync(d_u1[0] + dst_skip[1], d_u1[1] + src_skip[1],
iexchange, cudaMemcpyDefault, stream_halo[1]));
}
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
CHECK(cudaDeviceSynchronize());
float *tmpu0 = d_u1[i];
d_u1[i] = d_u2[i];
d_u2[i] = tmpu0;
}
}
CHECK(cudaSetDevice( 0 ));
CHECK(cudaEventRecord( stop, 0 ));
CHECK(cudaDeviceSynchronize());
CHECK (cudaGetLastError());
float elapsed_time_ms = 0.0f;
CHECK (cudaEventElapsedTime( &elapsed_time_ms, start, stop ));
elapsed_time_ms /= nsteps;
printf("gputime: %8.2fms ", elapsed_time_ms);
printf("performance: %8.2f MCells/s\n",
(double) nx * ny / (elapsed_time_ms * 1e3f) );
fflush(stdout);
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
// clear
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
CHECK (cudaStreamDestroy( stream_halo[i] ));
CHECK (cudaStreamDestroy( stream_body[i] ));
CHECK (cudaFree (d_u1[i]));
CHECK (cudaFree (d_u2[i]));
CHECK(cudaDeviceReset());
}
exit(EXIT_SUCCESS);
}
|
1481f891ea479c4e03461dc832ffbecf8d88403b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Cambricon Corporation: 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <utility>
#include <vector>
#include "caffe/layers/batch_reindex_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
__global__ void BRForward(const int count, const int inner_dim, const Dtype* in,
const Dtype* permut, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / (inner_dim);
int in_n = static_cast<int>(permut[n]);
out[index] = in[in_n * (inner_dim) + index % (inner_dim)];
}
}
template<typename Dtype>
void BatchReindexLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
check_batch_reindex(bottom[0]->shape(0), bottom[1]->count(),
bottom[1]->cpu_data());
if (top[0]->count() == 0) {
return;
}
int threads = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BRForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->count(), bottom[0]->count() / bottom[0]->shape(0),
bottom[0]->gpu_data(), bottom[1]->gpu_data(), top[0]->mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
}
template<typename Dtype>
__global__ void BRBackward(const int count, const int inner_dim,
const Dtype* in, const Dtype* top_indexes,
const Dtype* begins, const Dtype* counts,
Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / (inner_dim);
out[index] = 0;
int lower = static_cast<int>(begins[n]);
int upper = lower + static_cast<int>(counts[n]);
for (int i = lower; i < upper; ++i) {
int in_n = static_cast<int>(top_indexes[i]);
out[index] += in[in_n * (inner_dim) + index % (inner_dim)];
}
}
}
template<typename Dtype>
void BatchReindexLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
CHECK(!propagate_down[1]) << "Cannot backprop to index.";
if (!propagate_down[0]) {
return;
}
vector<std::pair<int, int> > mapping;
const Dtype* perm = bottom[1]->cpu_data();
for (int i = 0; i < bottom[1]->count(); ++i) {
mapping.push_back(pair<int, int>(static_cast<int>(perm[i]), i));
}
std::sort(mapping.begin(), mapping.end(), pair_sort_first());
// Each element of the bottom diff is potentially the sum of many top diffs.
// However, we'd like each CUDA thread to handle exactly one output. Hence,
// we first pre-compute a list of lists of indices that need to be summed for
// each output. `top_indexes` holds the data of this list of lists. The
// k'th element of `begins` points to the location in `top_indexes` where the
// list for the k'th example begin, and the k'th element of `counts` is the
// length of that list.
vector<int> shape;
shape.push_back(bottom[1]->count());
Blob<Dtype> top_indexes(shape);
shape[0] = bottom[0]->shape(0);
Blob<Dtype> counts(shape);
Blob<Dtype> begins(shape);
Dtype* t_i_data = top_indexes.mutable_cpu_data();
Dtype* c_data = counts.mutable_cpu_data();
Dtype* b_data = begins.mutable_cpu_data();
caffe_set(begins.count(), Dtype(-1), b_data);
caffe_set(counts.count(), Dtype(0), c_data);
for (int i = 0; i < mapping.size(); ++i) {
t_i_data[i] = mapping[i].second;
if (b_data[mapping[i].first] == -1) {
b_data[mapping[i].first] = i;
}
c_data[mapping[i].first] += 1;
}
int threads = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BRBackward<Dtype>) , dim3(CAFFE_GET_BLOCKS(threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(), bottom[0]->count() / bottom[0]->shape(0),
top[0]->gpu_diff(), top_indexes.gpu_data(), begins.gpu_data(),
counts.gpu_data(), bottom[0]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(BatchReindexLayer);
} // namespace caffe
| 1481f891ea479c4e03461dc832ffbecf8d88403b.cu | /*
All modification made by Cambricon Corporation: © 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <utility>
#include <vector>
#include "caffe/layers/batch_reindex_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
__global__ void BRForward(const int count, const int inner_dim, const Dtype* in,
const Dtype* permut, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / (inner_dim);
int in_n = static_cast<int>(permut[n]);
out[index] = in[in_n * (inner_dim) + index % (inner_dim)];
}
}
template<typename Dtype>
void BatchReindexLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
check_batch_reindex(bottom[0]->shape(0), bottom[1]->count(),
bottom[1]->cpu_data());
if (top[0]->count() == 0) {
return;
}
int threads = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BRForward<Dtype> <<<CAFFE_GET_BLOCKS(threads), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->count(), bottom[0]->count() / bottom[0]->shape(0),
bottom[0]->gpu_data(), bottom[1]->gpu_data(), top[0]->mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
}
template<typename Dtype>
__global__ void BRBackward(const int count, const int inner_dim,
const Dtype* in, const Dtype* top_indexes,
const Dtype* begins, const Dtype* counts,
Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / (inner_dim);
out[index] = 0;
int lower = static_cast<int>(begins[n]);
int upper = lower + static_cast<int>(counts[n]);
for (int i = lower; i < upper; ++i) {
int in_n = static_cast<int>(top_indexes[i]);
out[index] += in[in_n * (inner_dim) + index % (inner_dim)];
}
}
}
template<typename Dtype>
void BatchReindexLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
CHECK(!propagate_down[1]) << "Cannot backprop to index.";
if (!propagate_down[0]) {
return;
}
vector<std::pair<int, int> > mapping;
const Dtype* perm = bottom[1]->cpu_data();
for (int i = 0; i < bottom[1]->count(); ++i) {
mapping.push_back(pair<int, int>(static_cast<int>(perm[i]), i));
}
std::sort(mapping.begin(), mapping.end(), pair_sort_first());
// Each element of the bottom diff is potentially the sum of many top diffs.
// However, we'd like each CUDA thread to handle exactly one output. Hence,
// we first pre-compute a list of lists of indices that need to be summed for
// each output. `top_indexes` holds the data of this list of lists. The
// k'th element of `begins` points to the location in `top_indexes` where the
// list for the k'th example begin, and the k'th element of `counts` is the
// length of that list.
vector<int> shape;
shape.push_back(bottom[1]->count());
Blob<Dtype> top_indexes(shape);
shape[0] = bottom[0]->shape(0);
Blob<Dtype> counts(shape);
Blob<Dtype> begins(shape);
Dtype* t_i_data = top_indexes.mutable_cpu_data();
Dtype* c_data = counts.mutable_cpu_data();
Dtype* b_data = begins.mutable_cpu_data();
caffe_set(begins.count(), Dtype(-1), b_data);
caffe_set(counts.count(), Dtype(0), c_data);
for (int i = 0; i < mapping.size(); ++i) {
t_i_data[i] = mapping[i].second;
if (b_data[mapping[i].first] == -1) {
b_data[mapping[i].first] = i;
}
c_data[mapping[i].first] += 1;
}
int threads = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BRBackward<Dtype> <<<CAFFE_GET_BLOCKS(threads), CAFFE_CUDA_NUM_THREADS>>>(
bottom[0]->count(), bottom[0]->count() / bottom[0]->shape(0),
top[0]->gpu_diff(), top_indexes.gpu_data(), begins.gpu_data(),
counts.gpu_data(), bottom[0]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(BatchReindexLayer);
} // namespace caffe
|
1c1152d8ae2b502d674eb851cbd0905221cb16ce.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <assert.h>
#include <stdio.h>
#include <hipcub/hipcub.hpp>
#include <functional>
#include <stdint.h>
#include "NvInfer.h"
#include "plugin.h"
// CUB's bug workaround:
// To work properly for large batch size CUB segmented sort needs ridiculous
// workspace alignment.
const uintptr_t ALIGNMENT = 1 << 20;
template <typename TFloat>
struct Bbox
{
TFloat x1, y1, x2, y2;
};
typedef nvinfer1::DataType DType_t;
typedef enum
{
NCHW = 0,
NC4HW = 1
} DLayout_t;
typedef pluginStatus_t frcnnStatus_t;
#define DEBUG_RPN_ENABLE 0
#define FRCNN_ASSERT_PARAM(exp) \
do \
{ \
if (!(exp)) \
{ \
DEBUG_FPRINTF(stderr, "Bad param - " #exp ", %s:%d\n", __FILE__, __LINE__); \
return STATUS_BAD_PARAM; \
} \
} while (0)
#define DEBUG_FPRINTF(...) \
do \
{ \
if (DEBUG_RPN_ENABLE) \
{ \
fprintf(__VA_ARGS__); \
} \
} while (0)
#define CUDA_MEM_ALIGN 256
unsigned int hash(const void* array_, size_t size);
int8_t* alignPtr(int8_t* ptr, uintptr_t to);
__global__ void setOffset(int stride, int size, int* output);
frcnnStatus_t nms(hipStream_t stream,
const int N,
const int R,
const int preNmsTop,
const int nmsMaxOut,
const float iouThreshold,
const DType_t t_fgScores,
const DLayout_t l_fgScores,
void* fgScores,
const DType_t t_proposals,
const DLayout_t l_proposals,
const void* proposals,
void* workspace,
const DType_t t_rois,
void* rois);
int8_t* nextWorkspacePtr(int8_t* ptr, uintptr_t previousWorkspaceSize);
template <typename TFloat>
__device__ __host__ inline float IoU(const Bbox<TFloat>& a, const Bbox<TFloat>& b)
{
TFloat left = max(a.x1, b.x1), right = min(a.x2, b.x2);
TFloat top = max(a.y1, b.y1), bottom = min(a.y2, b.y2);
TFloat width = max((TFloat)(right - left + (TFloat) 1.0), (TFloat) 0.0);
TFloat height = max((TFloat)(bottom - top + (TFloat) 1.0), (TFloat) 0.0);
TFloat interS = width * height;
TFloat Sa = (a.x2 - a.x1 + (TFloat) 1) * (a.y2 - a.y1 + (TFloat) 1);
TFloat Sb = (b.x2 - b.x1 + (TFloat) 1) * (b.y2 - b.y1 + (TFloat) 1);
return (float) interS / (float) (Sa + Sb - interS);
}
// NMS KERNEL FOR SMALL BATCH SIZE {{{
template <typename T_PROPOSALS, typename T_ROIS, int DIM, int TSIZE>
__global__ __launch_bounds__(DIM) void nmsKernel1(const int propSize,
Bbox<T_PROPOSALS> const* __restrict__ preNmsProposals,
T_ROIS* __restrict__ afterNmsProposals,
const int preNmsTopN,
const float nmsThres,
const int afterNmsTopN)
{
__shared__ bool kept_boxes[TSIZE * DIM];
int kept = 0;
int batch_offset = blockIdx.x * propSize;
int max_box_idx = batch_offset + preNmsTopN;
int batch_offset_out = blockIdx.x * afterNmsTopN;
int flag_idx[TSIZE];
int boxes_idx[TSIZE];
Bbox<T_PROPOSALS> cur_boxes[TSIZE];
// initialize kept_boxes
#pragma unroll
for (int i = 0; i < TSIZE; i++)
{
boxes_idx[i] = threadIdx.x + batch_offset + DIM * i;
flag_idx[i] = threadIdx.x + DIM * i;
if (boxes_idx[i] < max_box_idx)
{
cur_boxes[i] = preNmsProposals[boxes_idx[i]];
kept_boxes[flag_idx[i]] = true;
}
else
{
kept_boxes[flag_idx[i]] = false;
boxes_idx[i] = -1.0f;
flag_idx[i] = -1.0f;
}
}
int ref_box_idx = 0 + batch_offset;
// remove the overlapped boxes
while ((kept < afterNmsTopN) && (ref_box_idx < max_box_idx))
{
Bbox<T_PROPOSALS> ref_box;
ref_box = preNmsProposals[ref_box_idx];
#pragma unroll
for (int i = 0; i < TSIZE; i++)
{
if (boxes_idx[i] > ref_box_idx)
{
if (IoU(ref_box, cur_boxes[i]) > nmsThres)
{
kept_boxes[flag_idx[i]] = false;
}
}
else if (boxes_idx[i] == ref_box_idx)
{
afterNmsProposals[(batch_offset_out + kept) * 4 + 0] = ref_box.x1;
afterNmsProposals[(batch_offset_out + kept) * 4 + 1] = ref_box.y1;
afterNmsProposals[(batch_offset_out + kept) * 4 + 2] = ref_box.x2;
afterNmsProposals[(batch_offset_out + kept) * 4 + 3] = ref_box.y2;
}
}
__syncthreads();
do
{
ref_box_idx++;
}
while (!kept_boxes[ref_box_idx - batch_offset] && ref_box_idx < max_box_idx);
kept++;
}
}
// }}}
// NMS KERNEL FOR LARGE BATCH SIZE {{{
template <typename T_PROPOSALS, typename T_ROIS, int DIM, int TSIZE>
__global__ __launch_bounds__(DIM) void nmsKernel2(const int propSize,
Bbox<T_PROPOSALS> const* __restrict__ proposals,
T_ROIS* __restrict__ filtered,
const int preNmsTopN,
const float nmsThres,
const int afterNmsTopN)
{
Bbox<T_PROPOSALS> const* cProposals = proposals + blockIdx.x * propSize;
Bbox<T_PROPOSALS> t[TSIZE];
uint64_t del = 0;
for (int i = 0; i < TSIZE; i++)
{
if (i < TSIZE - 1 || i * DIM + threadIdx.x < preNmsTopN)
{
t[i] = cProposals[i * DIM + threadIdx.x];
}
}
__shared__ Bbox<T_PROPOSALS> last;
__shared__ bool kept;
__shared__ int foundBatch;
if (threadIdx.x == 0)
{
foundBatch = 0;
}
for (int i = 0; i < TSIZE; i++)
{
for (int j = 0; j < DIM; j++)
{
int offset = i * DIM;
int index = offset + j;
if (index >= preNmsTopN)
{
break;
}
__syncthreads();
if (threadIdx.x == j)
{
kept = 0 == (del & ((uint64_t) 1 << i));
last = t[i];
if (kept)
{
int cnt = blockIdx.x * afterNmsTopN + foundBatch;
filtered[cnt * 4 + 0] = t[i].x1;
filtered[cnt * 4 + 1] = t[i].y1;
filtered[cnt * 4 + 2] = t[i].x2;
filtered[cnt * 4 + 3] = t[i].y2;
foundBatch++;
}
}
__syncthreads();
if (foundBatch == afterNmsTopN)
{
return;
}
if (kept)
{
Bbox<T_PROPOSALS> test = last;
for (int k = 0; k < TSIZE; k++)
{
if (index < k * DIM + threadIdx.x
&& IoU<T_PROPOSALS>(test, t[k]) > nmsThres)
{
del |= (uint64_t) 1 << k;
}
}
}
}
}
}
// }}}
// NMS LAUNCH {{{
template <typename T_PROPOSALS, DLayout_t L_PROPOSALS, typename T_ROIS>
frcnnStatus_t nmsLaunch(hipStream_t stream,
const int batch,
const int propSize,
void* proposals,
void* filtered,
const int preNmsTopN,
const float nmsThres,
const int afterNmsTopN)
{
const int blockSize = 1024;
#define P1(tsize) nmsKernel1<T_PROPOSALS, T_ROIS, blockSize, (tsize)>
#define P2(tsize) nmsKernel2<T_PROPOSALS, T_ROIS, blockSize, (tsize)>
void (*kernel[64])(int, Bbox<T_PROPOSALS> const*, T_ROIS*, int, float, int) =
{
P1(1), P1(2), P1(3), P1(4), P1(5), P1(6), P1(7), P1(8), P1(9), P1(10), P1(11), P1(12), P2(13), P2(14), P2(15), P2(16),
P2(17), P2(18), P2(19), P2(20), P2(21), P2(22), P2(23), P2(24), P2(25), P2(26), P2(27), P2(28), P2(29), P2(30), P2(31), P2(32),
P2(33), P2(34), P2(35), P2(36), P2(37), P2(38), P2(39), P2(40), P2(41), P2(42), P2(43), P2(44), P2(45), P2(46), P2(47), P2(48),
P2(49), P2(50), P2(51), P2(52), P2(53), P2(54), P2(55), P2(56), P2(57), P2(58), P2(59), P2(60), P2(61), P2(62), P2(63), P2(64)
};
FRCNN_ASSERT_PARAM(preNmsTopN < 64 * blockSize);
CSC(hipMemsetAsync(filtered, 0, batch * afterNmsTopN * 4 * sizeof(T_ROIS), stream),
STATUS_FAILURE);
kernel[(preNmsTopN + blockSize - 1) / blockSize -hipLaunchKernelGGL(( 1)] , dim3(batch), dim3(blockSize), 0, stream, propSize,
(Bbox<T_PROPOSALS>*) proposals,
(T_ROIS*) filtered,
preNmsTopN,
nmsThres,
afterNmsTopN);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// }}}
// NMS GPU {{{
template <typename T_SCORES, typename T_ROIS>
frcnnStatus_t nmsGpu(hipStream_t stream,
const int N,
const int R,
const int preNmsTop,
const int nmsMaxOut,
const float iouThreshold,
void* fgScores,
const void* proposals,
void* workspace,
void* rois)
{
int8_t* vworkspace = alignPtr((int8_t*) workspace, ALIGNMENT);
DEBUG_PRINTF("&&&& [NMS] PROPOSALS %u\n", hash(proposals, N * R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(fgScores, N * R * sizeof(float)));
frcnnStatus_t error;
DEBUG_PRINTF("&&&& [NMS] DISCARD\n");
DEBUG_PRINTF("&&&& [NMS] PROPOSALS %u\n", hash(proposals, N * R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(fgScores, N * R * sizeof(float)));
// Generate offsets
int* offsets = (int*) vworkspace;
hipLaunchKernelGGL(( setOffset) , dim3(1), dim3(1024), 0, stream, R, N + 1, offsets);
CSC(hipGetLastError(), STATUS_FAILURE);
vworkspace = vworkspace + N + 1;
vworkspace = alignPtr(vworkspace, ALIGNMENT);
// Sort (batched)
std::size_t tempStorageBytes = 0;
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
NULL, tempStorageBytes,
(T_SCORES*) fgScores, (T_SCORES*) fgScores,
(Bbox<T_ROIS>*) proposals, (Bbox<T_ROIS>*) proposals,
N * R, N,
offsets, offsets + 1, 0, 8 * sizeof(T_SCORES), stream);
CSC(hipGetLastError(), STATUS_FAILURE);
T_SCORES* scoresOut = (T_SCORES*) vworkspace;
vworkspace = (int8_t*) (scoresOut + N * R);
vworkspace = alignPtr(vworkspace, ALIGNMENT);
Bbox<T_ROIS>* proposalsOut = (Bbox<T_ROIS>*) vworkspace;
vworkspace = (int8_t*) (proposalsOut + N * R);
vworkspace = alignPtr(vworkspace, ALIGNMENT);
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
vworkspace, tempStorageBytes,
(T_SCORES*) fgScores, (T_SCORES*) scoresOut,
(Bbox<T_ROIS>*) proposals, (Bbox<T_ROIS>*) proposalsOut,
N * R, N,
offsets, offsets + 1,
0, 8 * sizeof(T_SCORES), stream);
CSC(hipGetLastError(), STATUS_FAILURE);
DEBUG_PRINTF("&&&& [NMS] POST CUB\n");
DEBUG_PRINTF("&&&& [NMS] PROPOSALS %u\n", hash(proposalsOut, N * R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(scoresOut, N * R * sizeof(float)));
error = nmsLaunch<T_ROIS, NC4HW, T_ROIS>(stream,
N,
R,
proposalsOut,
rois,
preNmsTop,
iouThreshold,
nmsMaxOut);
DEBUG_PRINTF("&&&& [NMS] POST LAUNCH\n");
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(rois, N * nmsMaxOut * 4 * sizeof(float)));
if (error != STATUS_SUCCESS)
{
return error;
}
return STATUS_SUCCESS;
}
// }}}
// NMS LAUNCH CONFIG {{{
typedef frcnnStatus_t (*nmsFun)(hipStream_t,
const int, // N
const int, // R
const int, // preNmsTop
const int, // nmsMaxOut
const float, // iouThreshold
void*, // fgScores
const void*, // proposals,
void*, // workspace,
void*); // rois
struct nmsLaunchConfig
{
DType_t t_fgScores;
DLayout_t l_fgScores;
DType_t t_proposals;
DLayout_t l_proposals;
DType_t t_rois;
nmsFun function;
nmsLaunchConfig(DType_t t_fgScores,
DLayout_t l_fgScores,
DType_t t_proposals,
DLayout_t l_proposals,
DType_t t_rois,
nmsFun function)
: t_fgScores(t_fgScores)
, l_fgScores(l_fgScores)
, t_proposals(t_proposals)
, l_proposals(l_proposals)
, t_rois(t_rois)
, function(function)
{
}
nmsLaunchConfig(DType_t t_fgScores,
DLayout_t l_fgScores,
DType_t t_proposals,
DLayout_t l_proposals,
DType_t t_rois)
: t_fgScores(t_fgScores)
, l_fgScores(l_fgScores)
, t_proposals(t_proposals)
, l_proposals(l_proposals)
, t_rois(t_rois)
{
}
bool operator==(const nmsLaunchConfig& other)
{
return (t_fgScores == other.t_fgScores) && (l_fgScores == other.l_fgScores)
&& (t_proposals == other.t_proposals) && (l_proposals == other.l_proposals)
&& (t_rois == other.t_rois);
}
};
static std::vector<nmsLaunchConfig> nmsLCVec;
#define FLOAT32 nvinfer1::DataType::kFLOAT
__global__ void _inverse_transform_gpu(const float* RPN_prob, const float* RPN_regr, int N,
int INPUT_H, int INPUT_W, int RPN_H, int RPN_W, float RPN_STD_SCALING, int RPN_STRIDE,
float* ANCHOR_SIZES, int anc_size_num, float* ANCHOR_RATIOS, int anc_ratio_num, float bbox_min_size,
float* fg_scores, float* proposal_out)
{
int nthreads = N * RPN_H * RPN_W * anc_size_num * anc_ratio_num;
int num_ancs = anc_size_num * anc_ratio_num;
for (int out_idx = threadIdx.x + blockDim.x * blockIdx.x; out_idx < nthreads;
out_idx += blockDim.x * gridDim.x)
{
//input RPN_regr: (N, A4, H, W), thread: (N, A, H, W)
int idx = out_idx;
int w = idx % RPN_W;
idx /= RPN_W;
int h = idx % RPN_H;
idx /= RPN_H;
int a = idx % num_ancs;
int n = idx / num_ancs;
// normalize by RPN_STD_SCALING
int ptr_1 = ((((n * num_ancs) + a) * 4) * RPN_H + h) * RPN_W + w;
int ptr_2 = ((((n * num_ancs) + a) * 4 + 1) * RPN_H + h) * RPN_W + w;
int ptr_3 = ((((n * num_ancs) + a) * 4 + 2) * RPN_H + h) * RPN_W + w;
int ptr_4 = ((((n * num_ancs) + a) * 4 + 3) * RPN_H + h) * RPN_W + w;
float tx = RPN_regr[ptr_1] / RPN_STD_SCALING;
float ty = RPN_regr[ptr_2] / RPN_STD_SCALING;
float tw = RPN_regr[ptr_3] / RPN_STD_SCALING;
float th = RPN_regr[ptr_4] / RPN_STD_SCALING;
// do inverse transform
int ar = a % anc_ratio_num;
int as = a / anc_ratio_num;
float anchor_w = ANCHOR_SIZES[as] * ANCHOR_RATIOS[ar];
float anchor_h = ANCHOR_SIZES[as] / ANCHOR_RATIOS[ar];
float anchor_cx = (w + 0.5f) * RPN_STRIDE;
float anchor_cy = (h + 0.5f) * RPN_STRIDE;
float cx1 = anchor_cx + anchor_w * tx;
float cy1 = anchor_cy + anchor_h * ty;
float w1 = __expf(tw) * anchor_w;
float h1 = __expf(th) * anchor_h;
tx = cx1 - w1 / 2.0f;
ty = cy1 - h1 / 2.0f;
tw = w1;
th = h1;
tw += tx;
th += ty;
// clip to min
tx = (tx >= 0.0f) ? tx : 0.0f;
ty = (ty >= 0.0f) ? ty : 0.0f;
tw = (tw >= 0.0f) ? tw : 0.0f;
th = (th >= 0.0f) ? th : 0.0f;
//clip to max
tx = (tx <= INPUT_W - 1.0f) ? tx : (INPUT_W - 1.0f);
ty = (ty <= INPUT_H - 1.0f) ? ty : (INPUT_H - 1.0f);
tw = (tw <= INPUT_W - 1.0f) ? tw : (INPUT_W - 1.0f);
th = (th <= INPUT_H - 1.0f) ? th : (INPUT_H - 1.0f);
// filter out small boxes by setting the confidence to -inf
int ininf = 0xff800000;
float ninf = *(float*) &ininf;
if (tw - tx <= bbox_min_size || th - ty <= bbox_min_size)
{
fg_scores[out_idx] = ninf;
}
// copy to proposal_out, output shape: (N, A, H, W, 4)
proposal_out[out_idx * 4] = tx;
proposal_out[out_idx * 4 + 1] = ty;
proposal_out[out_idx * 4 + 2] = tw;
proposal_out[out_idx * 4 + 3] = th;
}
}
void _inverse_transform_wrapper(const float* RPN_prob, const float* RPN_regr, int N, int INPUT_H,
int INPUT_W, int RPN_H, int RPN_W, float RPN_STD_SCALING, int RPN_STRIDE, float* ANCHOR_SIZES,
int anc_size_num, float* ANCHOR_RATIOS, int anc_ratio_num, float bbox_min_size, float* fg_scores,
float* proposal_out, hipStream_t stream)
{
const int block_size = 1024;
const int grid_size = (N * anc_size_num * anc_ratio_num * RPN_H * RPN_W + block_size - 1) /
(block_size);
hipLaunchKernelGGL(( _inverse_transform_gpu) , dim3(grid_size), dim3(block_size), 0, stream, RPN_prob, RPN_regr, N, INPUT_H,
INPUT_W, RPN_H, RPN_W, RPN_STD_SCALING, RPN_STRIDE, ANCHOR_SIZES, anc_size_num, ANCHOR_RATIOS,
anc_ratio_num, bbox_min_size, fg_scores, proposal_out);
}
size_t _proposalsForwardNMSWorkspaceSize(int N,
int A,
int H,
int W,
int nmsMaxOut)
{
return N * A * H * W * 5 * 5 * sizeof(float) + (1 << 22);
}
size_t _proposalsForwardBboxWorkspaceSize(int N, int A, int H, int W)
{
return N * A * H * W * 4 * sizeof(float);
}
size_t _proposalForwardFgScoresWorkspaceSize(int N, int A, int H, int W)
{
return N * A * H * W * sizeof(float);
}
size_t anchors_buf_size(int anc_size_num, int anc_ratio_num)
{
return (anc_size_num + anc_ratio_num) * sizeof(float);
}
size_t calculateTotalWorkspaceSize(size_t* workspaces, int count);
size_t _get_workspace_size(int N,
int anc_size_num,
int anc_ratio_num,
int H,
int W,
int nmsMaxOut)
{
size_t wss[4];
int A = anc_size_num * anc_ratio_num;
wss[0] = _proposalsForwardNMSWorkspaceSize(N, A, H, W, nmsMaxOut);
wss[1] = _proposalsForwardBboxWorkspaceSize(N, A, H, W);
wss[2] = _proposalForwardFgScoresWorkspaceSize(N, A, H, W);
wss[3] = anchors_buf_size(anc_size_num, anc_ratio_num);
return calculateTotalWorkspaceSize(wss, 4);
}
template <typename T>
frcnnStatus_t extractFgScores_gpu(hipStream_t stream,
int N,
int A,
int H,
int W,
const void* scores,
void* fgScores)
{
//TODO custom kernel for this
size_t size = A * H * W * sizeof(T);
for (int n = 0; n < N; n++)
{
size_t offset_ld = n * A * H * W;
size_t offset_st = n * A * H * W;
CSC(hipMemcpyAsync(((T*) fgScores) + offset_st, ((T*) scores) + offset_ld, size,
hipMemcpyDeviceToDevice, stream), STATUS_FAILURE);
}
return STATUS_SUCCESS;
}
void _copy_anchors_to_gpu(hipStream_t stream, float* ANCHOR_SIZES, int anc_size_num,
float* ANCHOR_RATIOS, int anc_ratio_num, void* anchor_size_buf)
{
hipMemcpyAsync(anchor_size_buf, static_cast<void*>(ANCHOR_SIZES), sizeof(float) * anc_size_num,
hipMemcpyHostToDevice, stream);
hipMemcpyAsync(static_cast<void*>(static_cast<float*>(anchor_size_buf) + anc_size_num), static_cast<void*>(ANCHOR_RATIOS), sizeof(float) * anc_ratio_num,
hipMemcpyHostToDevice, stream);
}
__global__ void _normalize_rois_kernel(float* roi_after_nms, int nthreads, int width, int height)
{
for(int i = threadIdx.x + blockDim.x * blockIdx.x; i < nthreads; i += blockDim.x * gridDim.x)
{
float x1 = roi_after_nms[i * 4];
float y1 = roi_after_nms[i * 4 + 1];
float x2 = roi_after_nms[i * 4 + 2];
float y2 = roi_after_nms[i * 4 + 3];
roi_after_nms[i * 4] = y1 / (height - 1.0f);
roi_after_nms[i * 4 + 1] = x1 / (width - 1.0f);
roi_after_nms[i * 4 + 2] = y2 / (height - 1.0f);
roi_after_nms[i * 4 + 3] = x2 / (width - 1.0f);
}
}
void _normalize_rois(float* roi_after_nms, int n, int max_box_num, int input_width,
int input_height, hipStream_t stream)
{
const int block_size = 1024;
const int grid_size = (n * max_box_num + block_size - 1) / block_size;
hipLaunchKernelGGL(( _normalize_rois_kernel) , dim3(grid_size), dim3(block_size), 0, stream, roi_after_nms, n * max_box_num,
input_width, input_height);
}
int proposalInference_gpu(
hipStream_t stream,
const void* rpn_prob,
const void* rpn_regr,
int batch_size,
int input_height,
int input_width,
int rpn_height,
int rpn_width,
int MAX_BOX_NUM,
int RPN_PRE_NMS_TOP_N,
float* ANCHOR_SIZES,
int anc_size_num,
float* ANCHOR_RATIOS,
int anc_ratio_num,
float rpn_std_scaling,
int rpn_stride,
float bbox_min_size,
float nms_iou_threshold,
void * workspace,
void* output)
{
size_t nmsWorkspaceSize = _proposalsForwardNMSWorkspaceSize(batch_size, anc_size_num * anc_ratio_num,
rpn_height, rpn_width, MAX_BOX_NUM);
void* nmsWorkspace = workspace;
size_t proposalsSize = _proposalsForwardBboxWorkspaceSize(batch_size, anc_size_num * anc_ratio_num,
rpn_height, rpn_width);
const DType_t t_proposals = nvinfer1::DataType::kFLOAT;
const DLayout_t l_proposals = NC4HW;
void* proposals = nextWorkspacePtr((int8_t*) nmsWorkspace, nmsWorkspaceSize);
void* fg_scores = nextWorkspacePtr((int8_t*) proposals, proposalsSize);
size_t fg_scores_size = _proposalForwardFgScoresWorkspaceSize(batch_size,
anc_size_num * anc_ratio_num, rpn_height, rpn_width);
void* anchor_size_buf = nextWorkspacePtr((int8_t*) fg_scores, fg_scores_size);
void* anchor_ratio_buf = static_cast<void*>(static_cast<float*>(anchor_size_buf) + anc_size_num);
frcnnStatus_t status;
_copy_anchors_to_gpu(stream, ANCHOR_SIZES, anc_size_num, ANCHOR_RATIOS, anc_ratio_num,
anchor_size_buf);
status = extractFgScores_gpu<float>(stream,
batch_size,
anc_size_num * anc_ratio_num,
rpn_height,
rpn_width,
rpn_prob,
fg_scores);
ASSERT(status == 0);
_inverse_transform_wrapper(static_cast<const float*>(rpn_prob), static_cast<const float*>(rpn_regr),
batch_size, input_height, input_width, rpn_height, rpn_width, rpn_std_scaling, rpn_stride,
static_cast<float*>(anchor_size_buf), anc_size_num, static_cast<float*>(anchor_ratio_buf),
anc_ratio_num, bbox_min_size, static_cast<float*>(fg_scores), static_cast<float*>(proposals),
stream);
status = nms(stream,
batch_size,
anc_size_num * anc_ratio_num * rpn_height * rpn_width,
RPN_PRE_NMS_TOP_N,
MAX_BOX_NUM,
nms_iou_threshold,
nvinfer1::DataType::kFLOAT,
NCHW,
fg_scores,
t_proposals,
l_proposals,
proposals,
workspace,
nvinfer1::DataType::kFLOAT,
output);
ASSERT(status == 0);
_normalize_rois(static_cast<float*>(output), batch_size, MAX_BOX_NUM, input_width, input_height,
stream);
return 0;
}
| 1c1152d8ae2b502d674eb851cbd0905221cb16ce.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <assert.h>
#include <stdio.h>
#include <cub/cub.cuh>
#include <functional>
#include <stdint.h>
#include "NvInfer.h"
#include "plugin.h"
// CUB's bug workaround:
// To work properly for large batch size CUB segmented sort needs ridiculous
// workspace alignment.
const uintptr_t ALIGNMENT = 1 << 20;
template <typename TFloat>
struct Bbox
{
TFloat x1, y1, x2, y2;
};
typedef nvinfer1::DataType DType_t;
typedef enum
{
NCHW = 0,
NC4HW = 1
} DLayout_t;
typedef pluginStatus_t frcnnStatus_t;
#define DEBUG_RPN_ENABLE 0
#define FRCNN_ASSERT_PARAM(exp) \
do \
{ \
if (!(exp)) \
{ \
DEBUG_FPRINTF(stderr, "Bad param - " #exp ", %s:%d\n", __FILE__, __LINE__); \
return STATUS_BAD_PARAM; \
} \
} while (0)
#define DEBUG_FPRINTF(...) \
do \
{ \
if (DEBUG_RPN_ENABLE) \
{ \
fprintf(__VA_ARGS__); \
} \
} while (0)
#define CUDA_MEM_ALIGN 256
unsigned int hash(const void* array_, size_t size);
int8_t* alignPtr(int8_t* ptr, uintptr_t to);
__global__ void setOffset(int stride, int size, int* output);
frcnnStatus_t nms(cudaStream_t stream,
const int N,
const int R,
const int preNmsTop,
const int nmsMaxOut,
const float iouThreshold,
const DType_t t_fgScores,
const DLayout_t l_fgScores,
void* fgScores,
const DType_t t_proposals,
const DLayout_t l_proposals,
const void* proposals,
void* workspace,
const DType_t t_rois,
void* rois);
int8_t* nextWorkspacePtr(int8_t* ptr, uintptr_t previousWorkspaceSize);
template <typename TFloat>
__device__ __host__ inline float IoU(const Bbox<TFloat>& a, const Bbox<TFloat>& b)
{
TFloat left = max(a.x1, b.x1), right = min(a.x2, b.x2);
TFloat top = max(a.y1, b.y1), bottom = min(a.y2, b.y2);
TFloat width = max((TFloat)(right - left + (TFloat) 1.0), (TFloat) 0.0);
TFloat height = max((TFloat)(bottom - top + (TFloat) 1.0), (TFloat) 0.0);
TFloat interS = width * height;
TFloat Sa = (a.x2 - a.x1 + (TFloat) 1) * (a.y2 - a.y1 + (TFloat) 1);
TFloat Sb = (b.x2 - b.x1 + (TFloat) 1) * (b.y2 - b.y1 + (TFloat) 1);
return (float) interS / (float) (Sa + Sb - interS);
}
// NMS KERNEL FOR SMALL BATCH SIZE {{{
template <typename T_PROPOSALS, typename T_ROIS, int DIM, int TSIZE>
__global__ __launch_bounds__(DIM) void nmsKernel1(const int propSize,
Bbox<T_PROPOSALS> const* __restrict__ preNmsProposals,
T_ROIS* __restrict__ afterNmsProposals,
const int preNmsTopN,
const float nmsThres,
const int afterNmsTopN)
{
__shared__ bool kept_boxes[TSIZE * DIM];
int kept = 0;
int batch_offset = blockIdx.x * propSize;
int max_box_idx = batch_offset + preNmsTopN;
int batch_offset_out = blockIdx.x * afterNmsTopN;
int flag_idx[TSIZE];
int boxes_idx[TSIZE];
Bbox<T_PROPOSALS> cur_boxes[TSIZE];
// initialize kept_boxes
#pragma unroll
for (int i = 0; i < TSIZE; i++)
{
boxes_idx[i] = threadIdx.x + batch_offset + DIM * i;
flag_idx[i] = threadIdx.x + DIM * i;
if (boxes_idx[i] < max_box_idx)
{
cur_boxes[i] = preNmsProposals[boxes_idx[i]];
kept_boxes[flag_idx[i]] = true;
}
else
{
kept_boxes[flag_idx[i]] = false;
boxes_idx[i] = -1.0f;
flag_idx[i] = -1.0f;
}
}
int ref_box_idx = 0 + batch_offset;
// remove the overlapped boxes
while ((kept < afterNmsTopN) && (ref_box_idx < max_box_idx))
{
Bbox<T_PROPOSALS> ref_box;
ref_box = preNmsProposals[ref_box_idx];
#pragma unroll
for (int i = 0; i < TSIZE; i++)
{
if (boxes_idx[i] > ref_box_idx)
{
if (IoU(ref_box, cur_boxes[i]) > nmsThres)
{
kept_boxes[flag_idx[i]] = false;
}
}
else if (boxes_idx[i] == ref_box_idx)
{
afterNmsProposals[(batch_offset_out + kept) * 4 + 0] = ref_box.x1;
afterNmsProposals[(batch_offset_out + kept) * 4 + 1] = ref_box.y1;
afterNmsProposals[(batch_offset_out + kept) * 4 + 2] = ref_box.x2;
afterNmsProposals[(batch_offset_out + kept) * 4 + 3] = ref_box.y2;
}
}
__syncthreads();
do
{
ref_box_idx++;
}
while (!kept_boxes[ref_box_idx - batch_offset] && ref_box_idx < max_box_idx);
kept++;
}
}
// }}}
// NMS KERNEL FOR LARGE BATCH SIZE {{{
template <typename T_PROPOSALS, typename T_ROIS, int DIM, int TSIZE>
__global__ __launch_bounds__(DIM) void nmsKernel2(const int propSize,
Bbox<T_PROPOSALS> const* __restrict__ proposals,
T_ROIS* __restrict__ filtered,
const int preNmsTopN,
const float nmsThres,
const int afterNmsTopN)
{
Bbox<T_PROPOSALS> const* cProposals = proposals + blockIdx.x * propSize;
Bbox<T_PROPOSALS> t[TSIZE];
uint64_t del = 0;
for (int i = 0; i < TSIZE; i++)
{
if (i < TSIZE - 1 || i * DIM + threadIdx.x < preNmsTopN)
{
t[i] = cProposals[i * DIM + threadIdx.x];
}
}
__shared__ Bbox<T_PROPOSALS> last;
__shared__ bool kept;
__shared__ int foundBatch;
if (threadIdx.x == 0)
{
foundBatch = 0;
}
for (int i = 0; i < TSIZE; i++)
{
for (int j = 0; j < DIM; j++)
{
int offset = i * DIM;
int index = offset + j;
if (index >= preNmsTopN)
{
break;
}
__syncthreads();
if (threadIdx.x == j)
{
kept = 0 == (del & ((uint64_t) 1 << i));
last = t[i];
if (kept)
{
int cnt = blockIdx.x * afterNmsTopN + foundBatch;
filtered[cnt * 4 + 0] = t[i].x1;
filtered[cnt * 4 + 1] = t[i].y1;
filtered[cnt * 4 + 2] = t[i].x2;
filtered[cnt * 4 + 3] = t[i].y2;
foundBatch++;
}
}
__syncthreads();
if (foundBatch == afterNmsTopN)
{
return;
}
if (kept)
{
Bbox<T_PROPOSALS> test = last;
for (int k = 0; k < TSIZE; k++)
{
if (index < k * DIM + threadIdx.x
&& IoU<T_PROPOSALS>(test, t[k]) > nmsThres)
{
del |= (uint64_t) 1 << k;
}
}
}
}
}
}
// }}}
// NMS LAUNCH {{{
template <typename T_PROPOSALS, DLayout_t L_PROPOSALS, typename T_ROIS>
frcnnStatus_t nmsLaunch(cudaStream_t stream,
const int batch,
const int propSize,
void* proposals,
void* filtered,
const int preNmsTopN,
const float nmsThres,
const int afterNmsTopN)
{
const int blockSize = 1024;
#define P1(tsize) nmsKernel1<T_PROPOSALS, T_ROIS, blockSize, (tsize)>
#define P2(tsize) nmsKernel2<T_PROPOSALS, T_ROIS, blockSize, (tsize)>
void (*kernel[64])(int, Bbox<T_PROPOSALS> const*, T_ROIS*, int, float, int) =
{
P1(1), P1(2), P1(3), P1(4), P1(5), P1(6), P1(7), P1(8), P1(9), P1(10), P1(11), P1(12), P2(13), P2(14), P2(15), P2(16),
P2(17), P2(18), P2(19), P2(20), P2(21), P2(22), P2(23), P2(24), P2(25), P2(26), P2(27), P2(28), P2(29), P2(30), P2(31), P2(32),
P2(33), P2(34), P2(35), P2(36), P2(37), P2(38), P2(39), P2(40), P2(41), P2(42), P2(43), P2(44), P2(45), P2(46), P2(47), P2(48),
P2(49), P2(50), P2(51), P2(52), P2(53), P2(54), P2(55), P2(56), P2(57), P2(58), P2(59), P2(60), P2(61), P2(62), P2(63), P2(64)
};
FRCNN_ASSERT_PARAM(preNmsTopN < 64 * blockSize);
CSC(cudaMemsetAsync(filtered, 0, batch * afterNmsTopN * 4 * sizeof(T_ROIS), stream),
STATUS_FAILURE);
kernel[(preNmsTopN + blockSize - 1) / blockSize - 1] <<< batch, blockSize, 0, stream>>>(propSize,
(Bbox<T_PROPOSALS>*) proposals,
(T_ROIS*) filtered,
preNmsTopN,
nmsThres,
afterNmsTopN);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// }}}
// NMS GPU {{{
template <typename T_SCORES, typename T_ROIS>
frcnnStatus_t nmsGpu(cudaStream_t stream,
const int N,
const int R,
const int preNmsTop,
const int nmsMaxOut,
const float iouThreshold,
void* fgScores,
const void* proposals,
void* workspace,
void* rois)
{
int8_t* vworkspace = alignPtr((int8_t*) workspace, ALIGNMENT);
DEBUG_PRINTF("&&&& [NMS] PROPOSALS %u\n", hash(proposals, N * R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(fgScores, N * R * sizeof(float)));
frcnnStatus_t error;
DEBUG_PRINTF("&&&& [NMS] DISCARD\n");
DEBUG_PRINTF("&&&& [NMS] PROPOSALS %u\n", hash(proposals, N * R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(fgScores, N * R * sizeof(float)));
// Generate offsets
int* offsets = (int*) vworkspace;
setOffset <<< 1, 1024, 0, stream>>>(R, N + 1, offsets);
CSC(cudaGetLastError(), STATUS_FAILURE);
vworkspace = vworkspace + N + 1;
vworkspace = alignPtr(vworkspace, ALIGNMENT);
// Sort (batched)
std::size_t tempStorageBytes = 0;
cub::DeviceSegmentedRadixSort::SortPairsDescending(
NULL, tempStorageBytes,
(T_SCORES*) fgScores, (T_SCORES*) fgScores,
(Bbox<T_ROIS>*) proposals, (Bbox<T_ROIS>*) proposals,
N * R, N,
offsets, offsets + 1, 0, 8 * sizeof(T_SCORES), stream);
CSC(cudaGetLastError(), STATUS_FAILURE);
T_SCORES* scoresOut = (T_SCORES*) vworkspace;
vworkspace = (int8_t*) (scoresOut + N * R);
vworkspace = alignPtr(vworkspace, ALIGNMENT);
Bbox<T_ROIS>* proposalsOut = (Bbox<T_ROIS>*) vworkspace;
vworkspace = (int8_t*) (proposalsOut + N * R);
vworkspace = alignPtr(vworkspace, ALIGNMENT);
cub::DeviceSegmentedRadixSort::SortPairsDescending(
vworkspace, tempStorageBytes,
(T_SCORES*) fgScores, (T_SCORES*) scoresOut,
(Bbox<T_ROIS>*) proposals, (Bbox<T_ROIS>*) proposalsOut,
N * R, N,
offsets, offsets + 1,
0, 8 * sizeof(T_SCORES), stream);
CSC(cudaGetLastError(), STATUS_FAILURE);
DEBUG_PRINTF("&&&& [NMS] POST CUB\n");
DEBUG_PRINTF("&&&& [NMS] PROPOSALS %u\n", hash(proposalsOut, N * R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(scoresOut, N * R * sizeof(float)));
error = nmsLaunch<T_ROIS, NC4HW, T_ROIS>(stream,
N,
R,
proposalsOut,
rois,
preNmsTop,
iouThreshold,
nmsMaxOut);
DEBUG_PRINTF("&&&& [NMS] POST LAUNCH\n");
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(rois, N * nmsMaxOut * 4 * sizeof(float)));
if (error != STATUS_SUCCESS)
{
return error;
}
return STATUS_SUCCESS;
}
// }}}
// NMS LAUNCH CONFIG {{{
typedef frcnnStatus_t (*nmsFun)(cudaStream_t,
const int, // N
const int, // R
const int, // preNmsTop
const int, // nmsMaxOut
const float, // iouThreshold
void*, // fgScores
const void*, // proposals,
void*, // workspace,
void*); // rois
struct nmsLaunchConfig
{
DType_t t_fgScores;
DLayout_t l_fgScores;
DType_t t_proposals;
DLayout_t l_proposals;
DType_t t_rois;
nmsFun function;
nmsLaunchConfig(DType_t t_fgScores,
DLayout_t l_fgScores,
DType_t t_proposals,
DLayout_t l_proposals,
DType_t t_rois,
nmsFun function)
: t_fgScores(t_fgScores)
, l_fgScores(l_fgScores)
, t_proposals(t_proposals)
, l_proposals(l_proposals)
, t_rois(t_rois)
, function(function)
{
}
nmsLaunchConfig(DType_t t_fgScores,
DLayout_t l_fgScores,
DType_t t_proposals,
DLayout_t l_proposals,
DType_t t_rois)
: t_fgScores(t_fgScores)
, l_fgScores(l_fgScores)
, t_proposals(t_proposals)
, l_proposals(l_proposals)
, t_rois(t_rois)
{
}
bool operator==(const nmsLaunchConfig& other)
{
return (t_fgScores == other.t_fgScores) && (l_fgScores == other.l_fgScores)
&& (t_proposals == other.t_proposals) && (l_proposals == other.l_proposals)
&& (t_rois == other.t_rois);
}
};
static std::vector<nmsLaunchConfig> nmsLCVec;
#define FLOAT32 nvinfer1::DataType::kFLOAT
__global__ void _inverse_transform_gpu(const float* RPN_prob, const float* RPN_regr, int N,
int INPUT_H, int INPUT_W, int RPN_H, int RPN_W, float RPN_STD_SCALING, int RPN_STRIDE,
float* ANCHOR_SIZES, int anc_size_num, float* ANCHOR_RATIOS, int anc_ratio_num, float bbox_min_size,
float* fg_scores, float* proposal_out)
{
int nthreads = N * RPN_H * RPN_W * anc_size_num * anc_ratio_num;
int num_ancs = anc_size_num * anc_ratio_num;
for (int out_idx = threadIdx.x + blockDim.x * blockIdx.x; out_idx < nthreads;
out_idx += blockDim.x * gridDim.x)
{
//input RPN_regr: (N, A4, H, W), thread: (N, A, H, W)
int idx = out_idx;
int w = idx % RPN_W;
idx /= RPN_W;
int h = idx % RPN_H;
idx /= RPN_H;
int a = idx % num_ancs;
int n = idx / num_ancs;
// normalize by RPN_STD_SCALING
int ptr_1 = ((((n * num_ancs) + a) * 4) * RPN_H + h) * RPN_W + w;
int ptr_2 = ((((n * num_ancs) + a) * 4 + 1) * RPN_H + h) * RPN_W + w;
int ptr_3 = ((((n * num_ancs) + a) * 4 + 2) * RPN_H + h) * RPN_W + w;
int ptr_4 = ((((n * num_ancs) + a) * 4 + 3) * RPN_H + h) * RPN_W + w;
float tx = RPN_regr[ptr_1] / RPN_STD_SCALING;
float ty = RPN_regr[ptr_2] / RPN_STD_SCALING;
float tw = RPN_regr[ptr_3] / RPN_STD_SCALING;
float th = RPN_regr[ptr_4] / RPN_STD_SCALING;
// do inverse transform
int ar = a % anc_ratio_num;
int as = a / anc_ratio_num;
float anchor_w = ANCHOR_SIZES[as] * ANCHOR_RATIOS[ar];
float anchor_h = ANCHOR_SIZES[as] / ANCHOR_RATIOS[ar];
float anchor_cx = (w + 0.5f) * RPN_STRIDE;
float anchor_cy = (h + 0.5f) * RPN_STRIDE;
float cx1 = anchor_cx + anchor_w * tx;
float cy1 = anchor_cy + anchor_h * ty;
float w1 = __expf(tw) * anchor_w;
float h1 = __expf(th) * anchor_h;
tx = cx1 - w1 / 2.0f;
ty = cy1 - h1 / 2.0f;
tw = w1;
th = h1;
tw += tx;
th += ty;
// clip to min
tx = (tx >= 0.0f) ? tx : 0.0f;
ty = (ty >= 0.0f) ? ty : 0.0f;
tw = (tw >= 0.0f) ? tw : 0.0f;
th = (th >= 0.0f) ? th : 0.0f;
//clip to max
tx = (tx <= INPUT_W - 1.0f) ? tx : (INPUT_W - 1.0f);
ty = (ty <= INPUT_H - 1.0f) ? ty : (INPUT_H - 1.0f);
tw = (tw <= INPUT_W - 1.0f) ? tw : (INPUT_W - 1.0f);
th = (th <= INPUT_H - 1.0f) ? th : (INPUT_H - 1.0f);
// filter out small boxes by setting the confidence to -inf
int ininf = 0xff800000;
float ninf = *(float*) &ininf;
if (tw - tx <= bbox_min_size || th - ty <= bbox_min_size)
{
fg_scores[out_idx] = ninf;
}
// copy to proposal_out, output shape: (N, A, H, W, 4)
proposal_out[out_idx * 4] = tx;
proposal_out[out_idx * 4 + 1] = ty;
proposal_out[out_idx * 4 + 2] = tw;
proposal_out[out_idx * 4 + 3] = th;
}
}
void _inverse_transform_wrapper(const float* RPN_prob, const float* RPN_regr, int N, int INPUT_H,
int INPUT_W, int RPN_H, int RPN_W, float RPN_STD_SCALING, int RPN_STRIDE, float* ANCHOR_SIZES,
int anc_size_num, float* ANCHOR_RATIOS, int anc_ratio_num, float bbox_min_size, float* fg_scores,
float* proposal_out, cudaStream_t stream)
{
const int block_size = 1024;
const int grid_size = (N * anc_size_num * anc_ratio_num * RPN_H * RPN_W + block_size - 1) /
(block_size);
_inverse_transform_gpu <<< grid_size, block_size, 0, stream>>> (RPN_prob, RPN_regr, N, INPUT_H,
INPUT_W, RPN_H, RPN_W, RPN_STD_SCALING, RPN_STRIDE, ANCHOR_SIZES, anc_size_num, ANCHOR_RATIOS,
anc_ratio_num, bbox_min_size, fg_scores, proposal_out);
}
size_t _proposalsForwardNMSWorkspaceSize(int N,
int A,
int H,
int W,
int nmsMaxOut)
{
return N * A * H * W * 5 * 5 * sizeof(float) + (1 << 22);
}
size_t _proposalsForwardBboxWorkspaceSize(int N, int A, int H, int W)
{
return N * A * H * W * 4 * sizeof(float);
}
size_t _proposalForwardFgScoresWorkspaceSize(int N, int A, int H, int W)
{
return N * A * H * W * sizeof(float);
}
size_t anchors_buf_size(int anc_size_num, int anc_ratio_num)
{
return (anc_size_num + anc_ratio_num) * sizeof(float);
}
size_t calculateTotalWorkspaceSize(size_t* workspaces, int count);
size_t _get_workspace_size(int N,
int anc_size_num,
int anc_ratio_num,
int H,
int W,
int nmsMaxOut)
{
size_t wss[4];
int A = anc_size_num * anc_ratio_num;
wss[0] = _proposalsForwardNMSWorkspaceSize(N, A, H, W, nmsMaxOut);
wss[1] = _proposalsForwardBboxWorkspaceSize(N, A, H, W);
wss[2] = _proposalForwardFgScoresWorkspaceSize(N, A, H, W);
wss[3] = anchors_buf_size(anc_size_num, anc_ratio_num);
return calculateTotalWorkspaceSize(wss, 4);
}
template <typename T>
frcnnStatus_t extractFgScores_gpu(cudaStream_t stream,
int N,
int A,
int H,
int W,
const void* scores,
void* fgScores)
{
//TODO custom kernel for this
size_t size = A * H * W * sizeof(T);
for (int n = 0; n < N; n++)
{
size_t offset_ld = n * A * H * W;
size_t offset_st = n * A * H * W;
CSC(cudaMemcpyAsync(((T*) fgScores) + offset_st, ((T*) scores) + offset_ld, size,
cudaMemcpyDeviceToDevice, stream), STATUS_FAILURE);
}
return STATUS_SUCCESS;
}
void _copy_anchors_to_gpu(cudaStream_t stream, float* ANCHOR_SIZES, int anc_size_num,
float* ANCHOR_RATIOS, int anc_ratio_num, void* anchor_size_buf)
{
cudaMemcpyAsync(anchor_size_buf, static_cast<void*>(ANCHOR_SIZES), sizeof(float) * anc_size_num,
cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(static_cast<void*>(static_cast<float*>(anchor_size_buf) + anc_size_num), static_cast<void*>(ANCHOR_RATIOS), sizeof(float) * anc_ratio_num,
cudaMemcpyHostToDevice, stream);
}
__global__ void _normalize_rois_kernel(float* roi_after_nms, int nthreads, int width, int height)
{
for(int i = threadIdx.x + blockDim.x * blockIdx.x; i < nthreads; i += blockDim.x * gridDim.x)
{
float x1 = roi_after_nms[i * 4];
float y1 = roi_after_nms[i * 4 + 1];
float x2 = roi_after_nms[i * 4 + 2];
float y2 = roi_after_nms[i * 4 + 3];
roi_after_nms[i * 4] = y1 / (height - 1.0f);
roi_after_nms[i * 4 + 1] = x1 / (width - 1.0f);
roi_after_nms[i * 4 + 2] = y2 / (height - 1.0f);
roi_after_nms[i * 4 + 3] = x2 / (width - 1.0f);
}
}
void _normalize_rois(float* roi_after_nms, int n, int max_box_num, int input_width,
int input_height, cudaStream_t stream)
{
const int block_size = 1024;
const int grid_size = (n * max_box_num + block_size - 1) / block_size;
_normalize_rois_kernel <<< grid_size, block_size, 0, stream>>>(roi_after_nms, n * max_box_num,
input_width, input_height);
}
int proposalInference_gpu(
cudaStream_t stream,
const void* rpn_prob,
const void* rpn_regr,
int batch_size,
int input_height,
int input_width,
int rpn_height,
int rpn_width,
int MAX_BOX_NUM,
int RPN_PRE_NMS_TOP_N,
float* ANCHOR_SIZES,
int anc_size_num,
float* ANCHOR_RATIOS,
int anc_ratio_num,
float rpn_std_scaling,
int rpn_stride,
float bbox_min_size,
float nms_iou_threshold,
void * workspace,
void* output)
{
size_t nmsWorkspaceSize = _proposalsForwardNMSWorkspaceSize(batch_size, anc_size_num * anc_ratio_num,
rpn_height, rpn_width, MAX_BOX_NUM);
void* nmsWorkspace = workspace;
size_t proposalsSize = _proposalsForwardBboxWorkspaceSize(batch_size, anc_size_num * anc_ratio_num,
rpn_height, rpn_width);
const DType_t t_proposals = nvinfer1::DataType::kFLOAT;
const DLayout_t l_proposals = NC4HW;
void* proposals = nextWorkspacePtr((int8_t*) nmsWorkspace, nmsWorkspaceSize);
void* fg_scores = nextWorkspacePtr((int8_t*) proposals, proposalsSize);
size_t fg_scores_size = _proposalForwardFgScoresWorkspaceSize(batch_size,
anc_size_num * anc_ratio_num, rpn_height, rpn_width);
void* anchor_size_buf = nextWorkspacePtr((int8_t*) fg_scores, fg_scores_size);
void* anchor_ratio_buf = static_cast<void*>(static_cast<float*>(anchor_size_buf) + anc_size_num);
frcnnStatus_t status;
_copy_anchors_to_gpu(stream, ANCHOR_SIZES, anc_size_num, ANCHOR_RATIOS, anc_ratio_num,
anchor_size_buf);
status = extractFgScores_gpu<float>(stream,
batch_size,
anc_size_num * anc_ratio_num,
rpn_height,
rpn_width,
rpn_prob,
fg_scores);
ASSERT(status == 0);
_inverse_transform_wrapper(static_cast<const float*>(rpn_prob), static_cast<const float*>(rpn_regr),
batch_size, input_height, input_width, rpn_height, rpn_width, rpn_std_scaling, rpn_stride,
static_cast<float*>(anchor_size_buf), anc_size_num, static_cast<float*>(anchor_ratio_buf),
anc_ratio_num, bbox_min_size, static_cast<float*>(fg_scores), static_cast<float*>(proposals),
stream);
status = nms(stream,
batch_size,
anc_size_num * anc_ratio_num * rpn_height * rpn_width,
RPN_PRE_NMS_TOP_N,
MAX_BOX_NUM,
nms_iou_threshold,
nvinfer1::DataType::kFLOAT,
NCHW,
fg_scores,
t_proposals,
l_proposals,
proposals,
workspace,
nvinfer1::DataType::kFLOAT,
output);
ASSERT(status == 0);
_normalize_rois(static_cast<float*>(output), batch_size, MAX_BOX_NUM, input_width, input_height,
stream);
return 0;
}
|
387850dc8f3eca4fe7b55799a6c343bddf654436.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <benchmark/benchmark.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <chrono>
#include <functional>
#include <helper_cuda.h>
#include <cassert>
#include <iostream>
using namespace std;
// TODO: enable this in order to try mapped memory (vs streaming)
// hipSetDeviceFlags(hipDeviceMapHost);
// Print the vector length to be used, and compute its size
constexpr int G = 30;
constexpr int M = 20;
constexpr int K = 10;
constexpr int kWarpSize = 32;
constexpr int kNumSM = 24; // gpu specific.
__host__ __device__ inline int xorshift_hash(int x) {
x ^= x >> 12; // a
x ^= x << 25; // b
x ^= x >> 27; // c
return ((unsigned int)x) * 213338717U;
}
// aka.1024. worked slightly better.
// it means each of the 4 exec units has 8 threads it can try to schedule
// and can hide latency up to 8x of
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
// __global__ void
// vectorAdd(const float *A, const float *B, float *C, int numElements)
// {
// int i = blockDim.x * blockIdx.x + threadIdx.x;
// if (i < numElements)
// {
// C[i] = A[i] + B[i];
// }
// }
enum struct Variant {
Mat,
NoMat,
OnlyMat,
NaiveMemcpy,
StreamMemcpy,
RandCheck, // if it has different performance to Mat, then the RNG is not good.
CudaMemcpy,
CudaMemset,
MAXVARIANT // do not use.
};
// how many outstanding requests (concurrent) in ad.
enum ILP {
ilp1 = 1,
ilp2 = 2,
ilp4 = 4,
ilp8 = 8,
ilp16 = 16,
ilp32 = 32,
ilp64 = 64,
ilp128 = 128,
};
enum ActiveLanes {
al1 = 1,
al2 = 2,
al4 = 4,
al8 = 8,
al16 = 16,
al32 = 32
};
// how many tasks of size ILP will be executed in series.
enum GrainSize {
gs1 = 1,
gs2 = 2,
gs3 = 3,
gs4 = 4,
gs8 = 8
};
// returns a 1-hot of the current lane.
__device__ inline int get_lane(){
return threadIdx.x % 32;
// int lanebit = 0;
// asm("mov.u32 %0, %lanemask_eq;" : "=r"(lanebit));
// const auto lane = __ffs(lanebit) - 1;
// return lane;
}
template <Variant variant, ILP ilp, ActiveLanes ActiveThreads, GrainSize gs>
__global__ void
templateKernel(const int * __restrict__ index_col,
const int *__restrict__ dimension_col,
int * __restrict__ output,
int idx_len,
int idx_domain)
{
static_assert(ActiveThreads <= kWarpSize, "limit");
static_assert(ActiveThreads > 0, "limit");
static_assert((ActiveThreads - 1 & ActiveThreads) == 0, "power of 2"); // power of 2
// mapping block to data
constexpr int64_t warpFraction = kWarpSize / ActiveThreads;
int64_t blockSize = (gs * ilp * blockDim.x)/warpFraction;
int64_t blockStart = blockSize * blockIdx.x;
int64_t blockEnd = blockStart + blockSize;
// mapping warp to data
constexpr int64_t dataPerWarp = gs * ilp * ActiveThreads;
int64_t warpNo = threadIdx.x / kWarpSize;
int64_t warpOffset = blockStart + warpNo * dataPerWarp;
const auto unknown_variant = 0;
const auto mask = idx_domain - 1;
auto lane = get_lane();
if (lane < ActiveThreads) {
const auto taskoffset = warpOffset + lane;
// // most blocks.
if (blockEnd <= idx_len) {
// if (variant == Variant::NaiveMemcpy){
// // init tmp.
// for (int g = 0; g < ilp; ++g){
// tmp[g][0] = index_col[taskoffset + g*ActiveThreads];
// tmp[g][1] = index_col[taskoffset + g*ActiveThreads + delta];
// }
// }
int tmp[ilp]; // wait until next one.
constexpr auto delta = ilp*ActiveThreads;
for (int iter = 0; iter < gs; ++iter){
auto offset = taskoffset + iter*delta;
//auto this_slot = (iter % 3);
//auto next_slot = (iter + 2) % 3;
// load phase
for (int g = 0; g < ilp; ++g) {
auto item = offset + g*ActiveThreads;
switch(variant) {
case Variant::NaiveMemcpy:{
// aka index_col[item + delta];
int ldd;
auto nextaddr = &index_col[item + 2*delta];
asm("ld.global.cs.u32 %0, [%1];" : "=r"(ldd) : "l"(nextaddr));
tmp[g] = ldd;
//auto nextaddr = &index_col[item + delta];
// prefetch next round
//asm("prefetch.global.L2 [%0];" :: "l"(nextaddr));
break;
}
case Variant::RandCheck:
case Variant::Mat:{
auto idx = index_col[item];
tmp[g] = idx;
//auto theaddr = &dimension_col[idx];
//asm("prefetch.local.L1 [%0];" :: "l"(theaddr));
break;
}
case Variant::OnlyMat:
tmp[g] = 5*index_col[item] + 1;
break;
case Variant::NoMat:
{
auto num = xorshift_hash(item);
auto theidx = num & mask;
tmp[g] = theidx;
//asm("prefetchu.L1 [%0];" :: "l"(theaddr));
//assert(index_col[item] == idx);
break;
}
default:
assert(unknown_variant);
}
}
for (int g = 0; g < ilp; ++g){
switch(variant){
case Variant::NoMat:
case Variant::RandCheck:
case Variant::Mat:
int val;
auto addr = &dimension_col[tmp[g]];
asm("ld.global.cs.s32 %0, [%1];" : "=r"(val) : "l"(addr));
tmp[g] = val;
break;
}
}
// use phase
for (int g = 0; g < ilp; ++g) {
auto item = offset + g*ActiveThreads;
switch(variant){
case Variant::NoMat:
case Variant::RandCheck:
case Variant::Mat:
output[item] = tmp[g];
//auto outaddr = &output[item];
//asm("st.global.cs.s32 [%0], %1;": "=l"(outaddr) , "r"(tmp[g][1]));
break;
default:
output[item] = tmp[g];
}
}
}
} else { // used only for the last thread block.
//assert(0);
for (int iter = 0; iter < gs; ++iter){
auto offset = taskoffset + iter*ilp*ActiveThreads;
for (int g = 0; g < ilp; ++g) {
auto item = offset + g*ActiveThreads;
if (item < idx_len)
{
switch(variant){
case Variant::NaiveMemcpy:
output[item] = index_col[item];
break;
case Variant::RandCheck:
case Variant::Mat:
output[item] = dimension_col[index_col[item]];
break;
case Variant::OnlyMat:
output[item] = 5*index_col[item] + 1;
break;
case Variant::NoMat:
{
auto num = xorshift_hash(item);
auto idx = num & mask;
output[item] = dimension_col[idx];
break;
}
default:
assert(unknown_variant);
}
}
}
}
}
}
}
#define cudaCheckErrors($call) \
do { \
hipError_t err = hipGetLastError(); \
if (err != hipSuccess){\
fprintf(stderr, "WARNING: Error was already set before call: (%s at %s:%d)\n", \
hipGetErrorString(err), \
__FILE__, __LINE__); \
}\
$call; \
err = hipGetLastError(); \
if (err != hipSuccess) { \
fprintf(stderr, "Fatal error: (%s at %s:%d)\n", \
hipGetErrorString(err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
using KernelT = void(const int *, const int *, int *, int, int);
template <Variant variant, ILP ilp, int ThreadsPerBlock, ActiveLanes ActiveThreads, GrainSize gs>
void GPU_BM(benchmark::State& state)
{
static_assert(int32_t(variant) < int32_t(Variant::MAXVARIANT), "invalid variant");
//cerr << "running bench again" << endl;
//printf("FYI:\ncuda cpuDeviceId: %d\n", hipCpuDeviceId);
int64_t idx_size = state.range(0);
int64_t dim_size = state.range(1);
int64_t idx_num = idx_size / sizeof(int);
int64_t dim_num = dim_size / sizeof(int);
//auto dev = 0;
//cudaCheckErrors(hipSetDevice(dev));
//hipDeviceProp_t deviceProp;
//cudaCheckErrors(hipGetDeviceProperties(&deviceProp, dev));
// printf("some device %d properties:\n",dev);
// printf("concurrent kernels %d\n",deviceProp.concurrentKernels);
// printf("device overlap %d\n",deviceProp.deviceOverlap);
// printf("max threads per block %d\n",deviceProp.maxThreadsPerBlock);
// printf("warp size %d\n",deviceProp.warpSize);
// printf("regs per block %d\n",deviceProp.regsPerBlock);
// printf("[Gather of %lu indices into a table of %lu locations]\n", idx_num, dim_num);
// Allocate the host input vector A
int *h_A = nullptr;
cudaCheckErrors(hipHostMalloc(&h_A, idx_size));
// Allocate the host input vector B
int *h_B = nullptr;
cudaCheckErrors(hipHostMalloc(&h_B, dim_size));
int sm = __builtin_popcountl (dim_num);
assert(sm == 1); // popcount of 1.
const int mask = dim_num - 1;
// Initialize the host input vectors
for (int i = 0; i < idx_num; ++i)
{
int rando = 0;
if (variant == Variant::RandCheck) {
auto rando = rand();
} else {
rando = xorshift_hash(i);
}
h_A[i] = rando & mask;
assert(h_A[i] < dim_num);
}
for (int i = 0; i < dim_num; ++i){
h_B[i] = 5*i + 1;
}
// Allocate the device input vector A
int *d_A = NULL;
cudaCheckErrors(hipMalloc(&d_A, idx_size));
// Allocate the device input vector B
int *d_B = NULL;
cudaCheckErrors(hipMalloc((void **)&d_B, dim_size));
// Allocate the device output vector C
int *d_C = NULL;
cudaCheckErrors(hipMalloc((void **)&d_C, idx_size));
// init memory in device. to detect.
cudaCheckErrors(hipMemset(d_C, 0xff, idx_size));
int itemsPerBlock = -1;
int blocksPerGrid = -1;
cudaCheckErrors(hipMemcpy(d_B, h_B, dim_size, hipMemcpyHostToDevice));
cudaCheckErrors(hipMemcpy(d_A, h_A, idx_size, hipMemcpyHostToDevice));
KernelT* kernel = nullptr;
switch (variant){
case Variant::CudaMemcpy:
case Variant::CudaMemset:
break;
default:{
kernel=templateKernel<variant, ilp, ActiveThreads, gs>;
auto threadMultiplier = kWarpSize/ActiveThreads;
itemsPerBlock = (ilp * gs * ThreadsPerBlock)/threadMultiplier;
blocksPerGrid = (idx_size + itemsPerBlock - 1) / itemsPerBlock;
fprintf(stderr,
"Variant: %d\n"
"ILP: %d\n"
"Items per thread: %d\n"
"Items per block: %d\n"
"Active threads per warp: %d\n"
"Threads per block: %d\n"
"Blocks per SM: %d\n"
"Remainder blocks: %d\n"
"Remainder threads: %d\n",
int(variant),
ilp,
gs * ilp,
itemsPerBlock,
ActiveThreads,
ThreadsPerBlock,
blocksPerGrid / kNumSM,
blocksPerGrid % kNumSM,
2048 % ThreadsPerBlock);
}
}
while (state.KeepRunning()){
switch (variant) {
case Variant::CudaMemcpy:
hipMemcpy(d_C, d_A, idx_size, hipMemcpyDeviceToDevice);
break;
case Variant::CudaMemset:
hipMemset(d_C, 0xf, idx_size);
break;
default:
hipLaunchKernelGGL(( kernel), dim3(blocksPerGrid), dim3(ThreadsPerBlock), 0, 0, d_A, d_B, d_C, idx_num, dim_num);
break;
}
hipDeviceSynchronize();
}
state.SetItemsProcessed(int64_t(state.iterations())*int64_t(idx_num));
switch(variant){
case Variant::CudaMemcpy:
case Variant::NaiveMemcpy:
state.SetBytesProcessed(int64_t(state.iterations())*
int64_t(idx_size * 2)); // read write
break;
case Variant::CudaMemset:
state.SetBytesProcessed(int64_t(state.iterations())*
int64_t(idx_size)); // read write
break;
default:
break;
}
// Allocate the host output vector C for checking.
int *h_C = nullptr;
cudaCheckErrors(hipHostMalloc(&h_C, idx_size));
cudaCheckErrors(hipMemcpy(h_C, d_C, idx_size, hipMemcpyDeviceToHost));
// Verify that the result vector is correct
switch (variant){
case Variant::CudaMemcpy:
case Variant::NaiveMemcpy:
{
for (int i = 0; i < idx_num; ++i) {
if (h_C[i] != h_A[i]) {
fprintf(stdout, "\033[1;31mERROR:\033[0m memcpy verification failed at position %d: h_C=%d but h_A=%d\n", i, h_C[i], h_A[i]);
break; // free memory
}
}
break;
}
case Variant::CudaMemset:
{
for (int i = 0; i < idx_num; ++i){
if (h_C[i] != 0x0f0f0f0f){
fprintf(stdout, "ERROR. memset verification failed\n");
break; // free memory
}
}
break;
}
default:
{// mbold red text
for (int i = 0; i < idx_num; ++i) {
if (h_C[i] != h_A[i]*5+1) {
fprintf(stdout, "\033[1;31mERROR:\033[0m gather verification failed at position %d: h_C=%d but h_A=%d and hA*5 + 1 = %d\n", i, h_C[i], h_A[i], h_A[i]*5+ 1);
break; // free memory
}
}
break;
}
}
cudaCheckErrors(hipHostFree(h_A));
cudaCheckErrors(hipHostFree(h_B));
cudaCheckErrors(hipHostFree(h_C));
cudaCheckErrors(hipFree(d_A));
cudaCheckErrors(hipFree(d_B));
cudaCheckErrors(hipFree(d_C));
//printf("Test PASSED.\n");
}
#define TPB(n) n
#define ATh(n) ActiveLanes::al##n
#define ILP(n) (ILP::ilp##n)
#define GS(n) GrainSize::gs##n
BENCHMARK_TEMPLATE(GPU_BM, Variant::NoMat, ILP(2), TPB(256), ATh(32), GS(8)) // actually does write output for now..
->RangeMultiplier(8)
->Ranges({{1<<G, 1<<G}, {64 << M, 64 << M}})
->Unit(benchmark::kMillisecond);
BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(4), TPB(256), ATh(32), GS(4)) // actually does write output for now..
->RangeMultiplier(2)
->Ranges({{1<<G, 1<<G}, {64 << M, 64 << M}})
->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NoMat, ILP(8), TPB(256), ATh(16)) // actually does write output for now..
// ->RangeMultiplier(2)
// ->Ranges({{1<<G, 1<<G}, {256 << M, 256 << M}})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NoMat, ILP(16), TPB(256), ATh(8)) // actually does write output for now..
// ->RangeMultiplier(2)
// ->Ranges({{1<<G, 1<<G}, {256 << M, 256 << M}})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NoMat, ILP(16), TPB(256), ATh(4)) // actually does write output for now..
// ->RangeMultiplier(2)
// ->Ranges({{1<<G, 1<<G}, {256 << M, 256 << M}})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NoMat, ILP(16), TPB(256), ATh(2)) // actually does write output for now..
// ->RangeMultiplier(2)
// ->Ranges({{1<<G, 1<<G}, {256 << M, 256 << M}})
// ->Unit(benchmark::kMillisecond);
BENCHMARK_TEMPLATE(GPU_BM, Variant::Mat, ILP(2), TPB(256), ATh(32), GS(2)) // actually does write output for now..
->RangeMultiplier(2)
->Ranges({{1<<G, 1<<G}, {1 << K, 512 << M}})
->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(1), TPB(1024), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(2), TPB(1024), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(4), TPB(1024), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(8), TPB(1024), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(16), TPB(1024), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(8), TPB(512), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(16), TPB(512), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(16), TPB(512), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
BENCHMARK_TEMPLATE(GPU_BM, Variant::CudaMemcpy, ILP(1), TPB(1024), ATh(32) ,GS(1)) // dim should be irrelevant
->Args({1 << G, 1 << K})
->Unit(benchmark::kMillisecond);
BENCHMARK_TEMPLATE(GPU_BM, Variant::CudaMemset, ILP(1), TPB(1024), ATh(32), GS(1)) // dim should be irrelevant
->Args({1 << G, 1 << K})
->Unit(benchmark::kMillisecond);
BENCHMARK_MAIN(); | 387850dc8f3eca4fe7b55799a6c343bddf654436.cu | #include <stdio.h>
#include <benchmark/benchmark.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <chrono>
#include <functional>
#include <helper_cuda.h>
#include <cassert>
#include <iostream>
using namespace std;
// TODO: enable this in order to try mapped memory (vs streaming)
// cudaSetDeviceFlags(cudaDeviceMapHost);
// Print the vector length to be used, and compute its size
constexpr int G = 30;
constexpr int M = 20;
constexpr int K = 10;
constexpr int kWarpSize = 32;
constexpr int kNumSM = 24; // gpu specific.
__host__ __device__ inline int xorshift_hash(int x) {
x ^= x >> 12; // a
x ^= x << 25; // b
x ^= x >> 27; // c
return ((unsigned int)x) * 213338717U;
}
// aka.1024. worked slightly better.
// it means each of the 4 exec units has 8 threads it can try to schedule
// and can hide latency up to 8x of
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
// __global__ void
// vectorAdd(const float *A, const float *B, float *C, int numElements)
// {
// int i = blockDim.x * blockIdx.x + threadIdx.x;
// if (i < numElements)
// {
// C[i] = A[i] + B[i];
// }
// }
enum struct Variant {
Mat,
NoMat,
OnlyMat,
NaiveMemcpy,
StreamMemcpy,
RandCheck, // if it has different performance to Mat, then the RNG is not good.
CudaMemcpy,
CudaMemset,
MAXVARIANT // do not use.
};
// how many outstanding requests (concurrent) in ad.
enum ILP {
ilp1 = 1,
ilp2 = 2,
ilp4 = 4,
ilp8 = 8,
ilp16 = 16,
ilp32 = 32,
ilp64 = 64,
ilp128 = 128,
};
enum ActiveLanes {
al1 = 1,
al2 = 2,
al4 = 4,
al8 = 8,
al16 = 16,
al32 = 32
};
// how many tasks of size ILP will be executed in series.
enum GrainSize {
gs1 = 1,
gs2 = 2,
gs3 = 3,
gs4 = 4,
gs8 = 8
};
// returns a 1-hot of the current lane.
__device__ inline int get_lane(){
return threadIdx.x % 32;
// int lanebit = 0;
// asm("mov.u32 %0, %lanemask_eq;" : "=r"(lanebit));
// const auto lane = __ffs(lanebit) - 1;
// return lane;
}
template <Variant variant, ILP ilp, ActiveLanes ActiveThreads, GrainSize gs>
__global__ void
templateKernel(const int * __restrict__ index_col,
const int *__restrict__ dimension_col,
int * __restrict__ output,
int idx_len,
int idx_domain)
{
static_assert(ActiveThreads <= kWarpSize, "limit");
static_assert(ActiveThreads > 0, "limit");
static_assert((ActiveThreads - 1 & ActiveThreads) == 0, "power of 2"); // power of 2
// mapping block to data
constexpr int64_t warpFraction = kWarpSize / ActiveThreads;
int64_t blockSize = (gs * ilp * blockDim.x)/warpFraction;
int64_t blockStart = blockSize * blockIdx.x;
int64_t blockEnd = blockStart + blockSize;
// mapping warp to data
constexpr int64_t dataPerWarp = gs * ilp * ActiveThreads;
int64_t warpNo = threadIdx.x / kWarpSize;
int64_t warpOffset = blockStart + warpNo * dataPerWarp;
const auto unknown_variant = 0;
const auto mask = idx_domain - 1;
auto lane = get_lane();
if (lane < ActiveThreads) {
const auto taskoffset = warpOffset + lane;
// // most blocks.
if (blockEnd <= idx_len) {
// if (variant == Variant::NaiveMemcpy){
// // init tmp.
// for (int g = 0; g < ilp; ++g){
// tmp[g][0] = index_col[taskoffset + g*ActiveThreads];
// tmp[g][1] = index_col[taskoffset + g*ActiveThreads + delta];
// }
// }
int tmp[ilp]; // wait until next one.
constexpr auto delta = ilp*ActiveThreads;
for (int iter = 0; iter < gs; ++iter){
auto offset = taskoffset + iter*delta;
//auto this_slot = (iter % 3);
//auto next_slot = (iter + 2) % 3;
// load phase
for (int g = 0; g < ilp; ++g) {
auto item = offset + g*ActiveThreads;
switch(variant) {
case Variant::NaiveMemcpy:{
// aka index_col[item + delta];
int ldd;
auto nextaddr = &index_col[item + 2*delta];
asm("ld.global.cs.u32 %0, [%1];" : "=r"(ldd) : "l"(nextaddr));
tmp[g] = ldd;
//auto nextaddr = &index_col[item + delta];
// prefetch next round
//asm("prefetch.global.L2 [%0];" :: "l"(nextaddr));
break;
}
case Variant::RandCheck:
case Variant::Mat:{
auto idx = index_col[item];
tmp[g] = idx;
//auto theaddr = &dimension_col[idx];
//asm("prefetch.local.L1 [%0];" :: "l"(theaddr));
break;
}
case Variant::OnlyMat:
tmp[g] = 5*index_col[item] + 1;
break;
case Variant::NoMat:
{
auto num = xorshift_hash(item);
auto theidx = num & mask;
tmp[g] = theidx;
//asm("prefetchu.L1 [%0];" :: "l"(theaddr));
//assert(index_col[item] == idx);
break;
}
default:
assert(unknown_variant);
}
}
for (int g = 0; g < ilp; ++g){
switch(variant){
case Variant::NoMat:
case Variant::RandCheck:
case Variant::Mat:
int val;
auto addr = &dimension_col[tmp[g]];
asm("ld.global.cs.s32 %0, [%1];" : "=r"(val) : "l"(addr));
tmp[g] = val;
break;
}
}
// use phase
for (int g = 0; g < ilp; ++g) {
auto item = offset + g*ActiveThreads;
switch(variant){
case Variant::NoMat:
case Variant::RandCheck:
case Variant::Mat:
output[item] = tmp[g];
//auto outaddr = &output[item];
//asm("st.global.cs.s32 [%0], %1;": "=l"(outaddr) , "r"(tmp[g][1]));
break;
default:
output[item] = tmp[g];
}
}
}
} else { // used only for the last thread block.
//assert(0);
for (int iter = 0; iter < gs; ++iter){
auto offset = taskoffset + iter*ilp*ActiveThreads;
for (int g = 0; g < ilp; ++g) {
auto item = offset + g*ActiveThreads;
if (item < idx_len)
{
switch(variant){
case Variant::NaiveMemcpy:
output[item] = index_col[item];
break;
case Variant::RandCheck:
case Variant::Mat:
output[item] = dimension_col[index_col[item]];
break;
case Variant::OnlyMat:
output[item] = 5*index_col[item] + 1;
break;
case Variant::NoMat:
{
auto num = xorshift_hash(item);
auto idx = num & mask;
output[item] = dimension_col[idx];
break;
}
default:
assert(unknown_variant);
}
}
}
}
}
}
}
#define cudaCheckErrors($call) \
do { \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess){\
fprintf(stderr, "WARNING: Error was already set before call: (%s at %s:%d)\n", \
cudaGetErrorString(err), \
__FILE__, __LINE__); \
}\
$call; \
err = cudaGetLastError(); \
if (err != cudaSuccess) { \
fprintf(stderr, "Fatal error: (%s at %s:%d)\n", \
cudaGetErrorString(err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
using KernelT = void(const int *, const int *, int *, int, int);
template <Variant variant, ILP ilp, int ThreadsPerBlock, ActiveLanes ActiveThreads, GrainSize gs>
void GPU_BM(benchmark::State& state)
{
static_assert(int32_t(variant) < int32_t(Variant::MAXVARIANT), "invalid variant");
//cerr << "running bench again" << endl;
//printf("FYI:\ncuda cpuDeviceId: %d\n", cudaCpuDeviceId);
int64_t idx_size = state.range(0);
int64_t dim_size = state.range(1);
int64_t idx_num = idx_size / sizeof(int);
int64_t dim_num = dim_size / sizeof(int);
//auto dev = 0;
//cudaCheckErrors(cudaSetDevice(dev));
//cudaDeviceProp deviceProp;
//cudaCheckErrors(cudaGetDeviceProperties(&deviceProp, dev));
// printf("some device %d properties:\n",dev);
// printf("concurrent kernels %d\n",deviceProp.concurrentKernels);
// printf("device overlap %d\n",deviceProp.deviceOverlap);
// printf("max threads per block %d\n",deviceProp.maxThreadsPerBlock);
// printf("warp size %d\n",deviceProp.warpSize);
// printf("regs per block %d\n",deviceProp.regsPerBlock);
// printf("[Gather of %lu indices into a table of %lu locations]\n", idx_num, dim_num);
// Allocate the host input vector A
int *h_A = nullptr;
cudaCheckErrors(cudaMallocHost(&h_A, idx_size));
// Allocate the host input vector B
int *h_B = nullptr;
cudaCheckErrors(cudaMallocHost(&h_B, dim_size));
int sm = __builtin_popcountl (dim_num);
assert(sm == 1); // popcount of 1.
const int mask = dim_num - 1;
// Initialize the host input vectors
for (int i = 0; i < idx_num; ++i)
{
int rando = 0;
if (variant == Variant::RandCheck) {
auto rando = rand();
} else {
rando = xorshift_hash(i);
}
h_A[i] = rando & mask;
assert(h_A[i] < dim_num);
}
for (int i = 0; i < dim_num; ++i){
h_B[i] = 5*i + 1;
}
// Allocate the device input vector A
int *d_A = NULL;
cudaCheckErrors(cudaMalloc(&d_A, idx_size));
// Allocate the device input vector B
int *d_B = NULL;
cudaCheckErrors(cudaMalloc((void **)&d_B, dim_size));
// Allocate the device output vector C
int *d_C = NULL;
cudaCheckErrors(cudaMalloc((void **)&d_C, idx_size));
// init memory in device. to detect.
cudaCheckErrors(cudaMemset(d_C, 0xff, idx_size));
int itemsPerBlock = -1;
int blocksPerGrid = -1;
cudaCheckErrors(cudaMemcpy(d_B, h_B, dim_size, cudaMemcpyHostToDevice));
cudaCheckErrors(cudaMemcpy(d_A, h_A, idx_size, cudaMemcpyHostToDevice));
KernelT* kernel = nullptr;
switch (variant){
case Variant::CudaMemcpy:
case Variant::CudaMemset:
break;
default:{
kernel=templateKernel<variant, ilp, ActiveThreads, gs>;
auto threadMultiplier = kWarpSize/ActiveThreads;
itemsPerBlock = (ilp * gs * ThreadsPerBlock)/threadMultiplier;
blocksPerGrid = (idx_size + itemsPerBlock - 1) / itemsPerBlock;
fprintf(stderr,
"Variant: %d\n"
"ILP: %d\n"
"Items per thread: %d\n"
"Items per block: %d\n"
"Active threads per warp: %d\n"
"Threads per block: %d\n"
"Blocks per SM: %d\n"
"Remainder blocks: %d\n"
"Remainder threads: %d\n",
int(variant),
ilp,
gs * ilp,
itemsPerBlock,
ActiveThreads,
ThreadsPerBlock,
blocksPerGrid / kNumSM,
blocksPerGrid % kNumSM,
2048 % ThreadsPerBlock);
}
}
while (state.KeepRunning()){
switch (variant) {
case Variant::CudaMemcpy:
cudaMemcpy(d_C, d_A, idx_size, cudaMemcpyDeviceToDevice);
break;
case Variant::CudaMemset:
cudaMemset(d_C, 0xf, idx_size);
break;
default:
kernel<<<blocksPerGrid, ThreadsPerBlock>>>(d_A, d_B, d_C, idx_num, dim_num);
break;
}
cudaDeviceSynchronize();
}
state.SetItemsProcessed(int64_t(state.iterations())*int64_t(idx_num));
switch(variant){
case Variant::CudaMemcpy:
case Variant::NaiveMemcpy:
state.SetBytesProcessed(int64_t(state.iterations())*
int64_t(idx_size * 2)); // read write
break;
case Variant::CudaMemset:
state.SetBytesProcessed(int64_t(state.iterations())*
int64_t(idx_size)); // read write
break;
default:
break;
}
// Allocate the host output vector C for checking.
int *h_C = nullptr;
cudaCheckErrors(cudaMallocHost(&h_C, idx_size));
cudaCheckErrors(cudaMemcpy(h_C, d_C, idx_size, cudaMemcpyDeviceToHost));
// Verify that the result vector is correct
switch (variant){
case Variant::CudaMemcpy:
case Variant::NaiveMemcpy:
{
for (int i = 0; i < idx_num; ++i) {
if (h_C[i] != h_A[i]) {
fprintf(stdout, "\033[1;31mERROR:\033[0m memcpy verification failed at position %d: h_C=%d but h_A=%d\n", i, h_C[i], h_A[i]);
break; // free memory
}
}
break;
}
case Variant::CudaMemset:
{
for (int i = 0; i < idx_num; ++i){
if (h_C[i] != 0x0f0f0f0f){
fprintf(stdout, "ERROR. memset verification failed\n");
break; // free memory
}
}
break;
}
default:
{// mbold red text
for (int i = 0; i < idx_num; ++i) {
if (h_C[i] != h_A[i]*5+1) {
fprintf(stdout, "\033[1;31mERROR:\033[0m gather verification failed at position %d: h_C=%d but h_A=%d and hA*5 + 1 = %d\n", i, h_C[i], h_A[i], h_A[i]*5+ 1);
break; // free memory
}
}
break;
}
}
cudaCheckErrors(cudaFreeHost(h_A));
cudaCheckErrors(cudaFreeHost(h_B));
cudaCheckErrors(cudaFreeHost(h_C));
cudaCheckErrors(cudaFree(d_A));
cudaCheckErrors(cudaFree(d_B));
cudaCheckErrors(cudaFree(d_C));
//printf("Test PASSED.\n");
}
#define TPB(n) n
#define ATh(n) ActiveLanes::al##n
#define ILP(n) (ILP::ilp##n)
#define GS(n) GrainSize::gs##n
BENCHMARK_TEMPLATE(GPU_BM, Variant::NoMat, ILP(2), TPB(256), ATh(32), GS(8)) // actually does write output for now..
->RangeMultiplier(8)
->Ranges({{1<<G, 1<<G}, {64 << M, 64 << M}})
->Unit(benchmark::kMillisecond);
BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(4), TPB(256), ATh(32), GS(4)) // actually does write output for now..
->RangeMultiplier(2)
->Ranges({{1<<G, 1<<G}, {64 << M, 64 << M}})
->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NoMat, ILP(8), TPB(256), ATh(16)) // actually does write output for now..
// ->RangeMultiplier(2)
// ->Ranges({{1<<G, 1<<G}, {256 << M, 256 << M}})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NoMat, ILP(16), TPB(256), ATh(8)) // actually does write output for now..
// ->RangeMultiplier(2)
// ->Ranges({{1<<G, 1<<G}, {256 << M, 256 << M}})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NoMat, ILP(16), TPB(256), ATh(4)) // actually does write output for now..
// ->RangeMultiplier(2)
// ->Ranges({{1<<G, 1<<G}, {256 << M, 256 << M}})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NoMat, ILP(16), TPB(256), ATh(2)) // actually does write output for now..
// ->RangeMultiplier(2)
// ->Ranges({{1<<G, 1<<G}, {256 << M, 256 << M}})
// ->Unit(benchmark::kMillisecond);
BENCHMARK_TEMPLATE(GPU_BM, Variant::Mat, ILP(2), TPB(256), ATh(32), GS(2)) // actually does write output for now..
->RangeMultiplier(2)
->Ranges({{1<<G, 1<<G}, {1 << K, 512 << M}})
->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(1), TPB(1024), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(2), TPB(1024), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(4), TPB(1024), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(8), TPB(1024), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(16), TPB(1024), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(8), TPB(512), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(16), TPB(512), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
// BENCHMARK_TEMPLATE(GPU_BM, Variant::NaiveMemcpy, ILP(16), TPB(512), ATh(32)) // dim should be irrelevant
// ->Args({1 << G, 1 << K})
// ->Unit(benchmark::kMillisecond);
BENCHMARK_TEMPLATE(GPU_BM, Variant::CudaMemcpy, ILP(1), TPB(1024), ATh(32) ,GS(1)) // dim should be irrelevant
->Args({1 << G, 1 << K})
->Unit(benchmark::kMillisecond);
BENCHMARK_TEMPLATE(GPU_BM, Variant::CudaMemset, ILP(1), TPB(1024), ATh(32), GS(1)) // dim should be irrelevant
->Args({1 << G, 1 << K})
->Unit(benchmark::kMillisecond);
BENCHMARK_MAIN(); |
37a8a65b21cd53cfbd98ab3e27c6763da2754b47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include "itf/engine/layer.hpp"
#include "itf/engine/util/math_functions.hpp"
#include "itf/engine/vision_layers.hpp"
namespace itf {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data,
int* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype maxval = -FLT_MAX;
int maxidx = -1;
if (bottom_data_a[index] > bottom_data_b[index]) {
// only update for very first bottom_data blob (blob_idx == 0)
if (blob_idx == 0) {
maxval = bottom_data_a[index];
top_data[index] = maxval;
maxidx = blob_idx;
mask[index] = maxidx;
}
} else {
maxval = bottom_data_b[index];
top_data[index] = maxval;
maxidx = blob_idx + 1;
mask[index] = maxidx;
}
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int* mask = NULL;
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
for (int i = 2; i < bottom.size(); ++i) {
caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_SUM:
caffe_gpu_set(count, Dtype(0.), top_data);
// TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1?
for (int i = 0; i < bottom.size(); ++i) {
caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask);
for (int i = 2; i < bottom.size(); ++i) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask);
}
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff,
const int blob_idx, const int* mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype gradient = 0;
if (mask[index] == blob_idx) {
gradient += top_diff[index];
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
if (stable_prod_grad_) {
bool initialized = false;
for (int j = 0; j < bottom.size(); ++j) {
if (i == j) { continue; }
if (!initialized) {
caffe_copy(count, bottom[j]->gpu_data(), bottom_diff);
initialized = true;
} else {
caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff,
bottom_diff);
}
}
} else {
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
}
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
break;
case EltwiseParameter_EltwiseOp_SUM:
if (coeffs_[i] == Dtype(1.)) {
caffe_copy(count, top_diff, bottom_diff);
} else {
caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.gpu_data();
MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, i, mask, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer);
} // namespace itf
| 37a8a65b21cd53cfbd98ab3e27c6763da2754b47.cu | #include <cfloat>
#include <vector>
#include "itf/engine/layer.hpp"
#include "itf/engine/util/math_functions.hpp"
#include "itf/engine/vision_layers.hpp"
namespace itf {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data,
int* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype maxval = -FLT_MAX;
int maxidx = -1;
if (bottom_data_a[index] > bottom_data_b[index]) {
// only update for very first bottom_data blob (blob_idx == 0)
if (blob_idx == 0) {
maxval = bottom_data_a[index];
top_data[index] = maxval;
maxidx = blob_idx;
mask[index] = maxidx;
}
} else {
maxval = bottom_data_b[index];
top_data[index] = maxval;
maxidx = blob_idx + 1;
mask[index] = maxidx;
}
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int* mask = NULL;
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
for (int i = 2; i < bottom.size(); ++i) {
caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_SUM:
caffe_gpu_set(count, Dtype(0.), top_data);
// TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1?
for (int i = 0; i < bottom.size(); ++i) {
caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask);
for (int i = 2; i < bottom.size(); ++i) {
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask);
}
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff,
const int blob_idx, const int* mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype gradient = 0;
if (mask[index] == blob_idx) {
gradient += top_diff[index];
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
if (stable_prod_grad_) {
bool initialized = false;
for (int j = 0; j < bottom.size(); ++j) {
if (i == j) { continue; }
if (!initialized) {
caffe_copy(count, bottom[j]->gpu_data(), bottom_diff);
initialized = true;
} else {
caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff,
bottom_diff);
}
}
} else {
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
}
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
break;
case EltwiseParameter_EltwiseOp_SUM:
if (coeffs_[i] == Dtype(1.)) {
caffe_copy(count, top_diff, bottom_diff);
} else {
caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.gpu_data();
MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, i, mask, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer);
} // namespace itf
|
66d1064c01b9ca526d26ea7d2d74c73c2d77143c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void div_scalar_double(int n, int idx,double dx,double *dy,int incy,double * result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = dy[i] / dx;
}
}
| 66d1064c01b9ca526d26ea7d2d74c73c2d77143c.cu | extern "C"
__global__ void div_scalar_double(int n, int idx,double dx,double *dy,int incy,double * result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = dy[i] / dx;
}
}
|
6bd1d733c94df906fbc955b4514670b47f6ffdc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// //#include "/home/jbedorf/papers/GBPZ2010/codes/jb/build_tree/CUDA/support_kernels.cu"
#include "support_kernels.cu"
#include <stdio.h>
//////////////////////////////
//////////////////////////////
//////////////////////////////
#define LEVEL_MIN 3
extern "C" __global__ void boundaryReduction(const int n_particles,
real4 *positions,
float3 *output_min,
float3 *output_max)
{
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
//const uint idx = bid * blockDim.x + tid;
volatile __shared__ float3 shmem[512];
float3 r_min = (float3){+1e10f, +1e10f, +1e10f};
float3 r_max = (float3){-1e10f, -1e10f, -1e10f};
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[256];
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
// unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
real4 pos;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
//based on reduce6 example
while (i < n_particles) {
if (i < n_particles)
{
pos = positions[i];
r_min.x = fminf(pos.x, r_min.x);
r_min.y = fminf(pos.y, r_min.y);
r_min.z = fminf(pos.z, r_min.z);
r_max.x = fmaxf(pos.x, r_max.x);
r_max.y = fmaxf(pos.y, r_max.y);
r_max.z = fmaxf(pos.z, r_max.z);
}
if (i + blockSize < n_particles)
{
pos = positions[i + blockSize];
r_min.x = fminf(pos.x, r_min.x);
r_min.y = fminf(pos.y, r_min.y);
r_min.z = fminf(pos.z, r_min.z);
r_max.x = fmaxf(pos.x, r_max.x);
r_max.y = fmaxf(pos.y, r_max.y);
r_max.z = fmaxf(pos.z, r_max.z);
}
i += gridSize;
}
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
__syncthreads();
// do reduction in shared mem
if(blockDim.x >= 512) if (tid < 256) {sh_MinMax(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {sh_MinMax(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if (tid < 32)
{
sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin,sh_rmax);
}
// write result for this block to global mem
if (tid == 0)
{
//Compiler doesnt allow: volatile float3 = float3
output_min[bid].x = sh_rmin[0].x; output_min[bid].y = sh_rmin[0].y; output_min[bid].z = sh_rmin[0].z;
output_max[bid].x = sh_rmax[0].x; output_max[bid].y = sh_rmax[0].y; output_max[bid].z = sh_rmax[0].z;
}
}
//Get the domain size, by taking into account the group size
extern "C" __global__ void boundaryReductionGroups(const int n_groups,
real4 *positions,
real4 *sizes,
float3 *output_min,
float3 *output_max)
{
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
//const uint idx = bid * blockDim.x + tid;
volatile __shared__ float3 shmem[512];
float3 r_min = (float3){+1e10f, +1e10f, +1e10f};
float3 r_max = (float3){-1e10f, -1e10f, -1e10f};
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[256];
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
// unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
real4 pos;
real4 size;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
//based on reduce6 example
while (i < n_groups) {
if (i < n_groups)
{
pos = positions[i];
size = sizes[i];
r_min.x = fminf(pos.x-size.x, r_min.x);
r_min.y = fminf(pos.y-size.y, r_min.y);
r_min.z = fminf(pos.z-size.z, r_min.z);
r_max.x = fmaxf(pos.x+size.x, r_max.x);
r_max.y = fmaxf(pos.y+size.y, r_max.y);
r_max.z = fmaxf(pos.z+size.z, r_max.z);
}
if (i + blockSize < n_groups)
{
pos = positions[i + blockSize];
size = sizes[i + blockSize];
r_min.x = fminf(pos.x-size.x, r_min.x);
r_min.y = fminf(pos.y-size.y, r_min.y);
r_min.z = fminf(pos.z-size.z, r_min.z);
r_max.x = fmaxf(pos.x+size.x, r_max.x);
r_max.y = fmaxf(pos.y+size.y, r_max.y);
r_max.z = fmaxf(pos.z+size.z, r_max.z);
}
i += gridSize;
}
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
__syncthreads();
// do reduction in shared mem
if(blockDim.x >= 512) if (tid < 256) {sh_MinMax(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {sh_MinMax(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if (tid < 32)
{
sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin,sh_rmax);
}
// write result for this block to global mem
if (tid == 0)
{
//Compiler doesnt allow: volatile float3 = float3
output_min[bid].x = sh_rmin[0].x; output_min[bid].y = sh_rmin[0].y; output_min[bid].z = sh_rmin[0].z;
output_max[bid].x = sh_rmax[0].x; output_max[bid].y = sh_rmax[0].y; output_max[bid].z = sh_rmax[0].z;
}
}
//#define EXACT_KEY
extern "C" __global__ void cl_build_key_list(uint4 *body_key,
real4 *body_pos,
int n_bodies,
real4 corner) {
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= n_bodies) return;
real4 pos = body_pos[id];
int4 crd;
real domain_fac = corner.w;
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
// crd.x = (int)((pos.x - corner.x) / domain_fac + 0.5);
// crd.y = (int)((pos.y - corner.y) / domain_fac + 0.5);
// crd.z = (int)((pos.z - corner.z) / domain_fac + 0.5);
// uint4 key = get_key(crd);
// if (id == n_bodies) key = (uint4){0xFFFFFFFF, 0xFFFFFFFF, 0, 0};
body_key[id] = get_key(crd);;
}
#if 0
This might be useful to speed up the group creating by
not building a full key but only the first 10 bits
extern "C" __global__ void build_phkey_list(uint2 *body_key,
real4 *body_pos,
int n_bodies,
real4 corner) {
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id > n_bodies) return;
real4 pos = body_pos[id];
int4 crd;
real domain_fac = corner.w;
//Get the integer position, will be used for the key calculation
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
const int bits = 18;
int i,xi, yi, zi;
int mask;
long key;
//0= 000, 1=001, 2=011, 3=010, 4=110, 5=111, 6=101, 7=100
//000=0=0, 001=1=1, 011=3=2, 010=2=3, 110=6=4, 111=7=5, 101=5=6, 100=4=7
const int C[8] = {0, 1, 7, 6, 3, 2, 4, 5};
int temp;
mask = 1 << (bits - 1);
key = 0;
for(i = 0; i < bits; i++, mask >>= 1)
{
xi = (crd.x & mask) ? 1 : 0;
yi = (crd.y & mask) ? 1 : 0;
zi = (crd.z & mask) ? 1 : 0;
if(xi == 0 && yi == 0 && zi == 0)
{
temp = crd.z; crd.z = crd.y; crd.y = temp;
}
else if(xi == 0 && yi == 0 && zi == 1)
{
temp = crd.x; crd.x = crd.y; crd.y = temp;
}
else if(xi == 1 && yi == 0 && zi == 1)
{
temp = crd.x; crd.x = crd.y; crd.y = temp;
}
else if(xi == 1 && yi == 0 && zi == 0)
{
crd.x = (crd.x) ^ (-1);
crd.z = (crd.z) ^ (-1);
}
else if(xi == 1 && yi == 1 && zi == 0)
{
crd.x = (crd.x) ^ (-1);
crd.z = (crd.z) ^ (-1);
}
else if(xi == 1 && yi == 1 && zi == 1)
{
temp = (crd.x) ^ (-1);
crd.x = (crd.y) ^ (-1);
crd.y = temp;
}
else if(xi == 0 && yi == 1 && zi == 1)
{
temp = (crd.x) ^ (-1);
crd.x = (crd.y) ^ (-1);
crd.y = temp;
}
else
{
temp = (crd.z) ^ (-1);
crd.z = (crd.y) ^ (-1);
crd.y = temp;
}
int index = (xi << 2) + (yi << 1) + zi;
key = (key << 3) + C[index];
}
uint2 key_new;
key_new.x = key & 0xFFFFFFFF;
key_new.y = (key >> 32) & 0xFFFFFFFF;
if (id == n_bodies) key_new = (uint2){0xFFFFFFFF, 0xFFFFFFFF};
body_key[id] = key_new;
}
#endif
extern "C" __global__ void cl_build_valid_list(int n_bodies,
int level,
uint4 *body_key,
uint *valid_list){
// uint2 *test_key_data) {
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint id = bid * blockDim.x + tid;
const uint4 key_F = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
const uint4 key_B = {0xFFFFFFF1, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //A border, valid0 will become 1
const uint4 key_I = {0xFFFFFFF2, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //Ignore
const uint4 key_E = {0xFFFFFFF3, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //End
const uint4 key_A = {0xFFFFFFF4, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //Start and End
// const uint2 key_TEST = {0x0, 0x0}; //Start and End
//TODO clean this if we dont use it
if (id >= n_bodies) return; // >= since the last particle is extra boudnary particle
uint4 mask = get_mask(level);
mask.x = mask.x | ((uint)1 << 30) | ((uint)1 << 31);
uint4 key_m;
uint4 key_c = body_key[id];
uint4 key_p;
if (id == 0)
{
key_m = key_F;
}
else
{
key_m = body_key[id-1];
}
if((id+1) < n_bodies) //The last particle gets a different key to compare with
{
key_p = body_key[id+1];
}
else
key_p = (uint4){0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
int valid0 = 0;
int valid1 = 0;
if (cmp_uint4(key_c, key_A) == 0) {
valid0 = 1; //Set a border
valid1 = 1; //Set a border
}
else if (cmp_uint4(key_c, key_B) == 0) {
valid0 = 1; //Set a border
}
else if (cmp_uint4(key_c, key_E) == 0) {
valid1 = 1; //Set a border
}
else if (cmp_uint4(key_c, key_I) == 0) {
//Do nothing
}
else if (cmp_uint4(key_c, key_F) != 0) {
key_c.x = key_c.x & mask.x;
key_c.y = key_c.y & mask.y;
key_c.z = key_c.z & mask.z;
key_p.x = key_p.x & mask.x;
key_p.y = key_p.y & mask.y;
key_p.z = key_p.z & mask.z;
key_m.x = key_m.x & mask.x;
key_m.y = key_m.y & mask.y;
key_m.z = key_m.z & mask.z;
valid0 = abs(cmp_uint4(key_c, key_m));
valid1 = abs(cmp_uint4(key_c, key_p));
}
valid_list[id*2] = id | ((valid0) << 31);
valid_list[id*2+1] = id | ((valid1) << 31);
}
//////////////////////////////
//////////////////////////////
//////////////////////////////
extern "C" __global__ void cl_build_nodes(uint level,
uint compact_list_len,
uint offset,
uint *compact_list,
// uint *compact_list_end,
uint4 *bodies_key,
uint4 *node_key,
uint *n_children,
uint2 *node_bodies){
// uint *testValidList) {
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= compact_list_len) return;
uint bi = compact_list[id*2];
uint bj = compact_list[id*2+1] + 1;
uint4 key = bodies_key[bi];
uint4 mask = get_mask(level);
key = (uint4){key.x & mask.x, key.y & mask.y, key.z & mask.z, 0};
node_bodies[offset+id] = (uint2){bi | (level << BITLEVELS), bj};
node_key [offset+id] = key;
n_children [offset+id] = 0;
if ((int)level > (int)(LEVEL_MIN - 1))
if (bj - bi <= NLEAF) //Leaf can only have NLEAF particles, if its more there will be a split
for (int i = bi; i < bj; i++)
bodies_key[i] = (uint4){0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}; //sets the key to FF to indicate the body is used
}
//////////////////////////////
//////////////////////////////
//////////////////////////////
extern "C" __global__ void cl_link_tree(int n_nodes,
uint *n_children,
uint2 *node_bodies,
real4 *bodies_pos,
real4 corner,
uint2 *level_list, //TODO could make this constant if it proves usefull
// uint* parent_id_list,
uint* valid_list,
uint4 *node_keys,
uint4 *bodies_key,
int maxLevel) {
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= n_nodes) return;
uint2 bij = node_bodies[id];
uint level = (bij.x & LEVELMASK) >> BITLEVELS;
uint bi = bij.x & ILEVELMASK;
uint bj = bij.y;
real4 pos = bodies_pos[bi];
int4 crd;
real domain_fac = corner.w;
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
uint4 key = get_key(crd);
/********* accumulate children *****/
uint4 mask = get_mask(level - 1);
key = (uint4){key.x & mask.x, key.y & mask.y, key.z & mask.z, 0};
uint2 cij;
if(id > 0)
cij = level_list[level-1];
int ci;
//Jeroen, modified this since we dont use textures in find_key,
//the function will fail because out of bound memory access when id==0
if(id > 0)
ci = find_key(key, cij, node_keys);
else
ci = 0;
//ci now points to the node that is the parent, was used in previous group method
// parent_id_list[id] = ci;
mask = get_imask(mask);
key = (uint4) {key.x | mask.x, key.y | mask.y, key.z | mask.z, 0 };
if (id > 0)
atomicAdd(&n_children[ci], (1 << 28));
key = get_key(crd);
mask = get_mask(level);
key = (uint4) {key.x & mask.x, key.y & mask.y, key.z & mask.z, 0};
/********* store the 1st child *****/
cij = level_list[level+1];
int cj = -1;
cj = find_key(key, cij, node_keys);
atomicOr(&n_children[id], cj); //Atomic since multiple threads can work on this
uint valid = id | (uint)(0 << 31);
if ((int)level > (int)(LEVEL_MIN - 1))
if ((bj - bi) <= NLEAF)
valid = id | (uint)(1 << 31); //Distinguish leaves and nodes
valid_list[id] = valid;
}
//Determines which level of node starts at which offset
extern "C" __global__ void build_level_list(const int n_nodes,
const int n_leafs,
uint *leafsIdxs,
uint2 *node_bodies,
uint* valid_list){
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint id = bid * blockDim.x + tid;
if (id >= n_nodes-n_leafs) return;
const int nodeID = leafsIdxs[id+n_leafs]; //Get the idx into the node_bodies array
int level_c, level_m, level_p;
uint2 bij = node_bodies[leafsIdxs[id+n_leafs]]; //current non-leaf
level_c = (bij.x & LEVELMASK) >> BITLEVELS;
if((id+1) < (n_nodes-n_leafs)){
//The last node gets a default lvl
bij = node_bodies[leafsIdxs[id+1+n_leafs]]; //next non-leaf
level_p = (bij.x & LEVELMASK) >> BITLEVELS;
}
else{
//Last is always an end
level_p = MAXLEVELS+5;
}
//Compare level with the node before and node after
if(nodeID == 0)
{
level_m = -1;
}
else
{
bij = node_bodies[ leafsIdxs[id-1+n_leafs]]; //Get info of previous non-leaf node
level_m = (bij.x & LEVELMASK) >> BITLEVELS;
}
int valid0 = 0;
int valid1 = 0;
valid0 = (level_c != level_m) << 31 | (id+n_leafs);
valid1 = (level_c != level_p) << 31 | (id+n_leafs);
valid_list[id*2] = valid0;
valid_list[id*2+1] = valid1;
} //end build_level_list
//Finds nodes/leafs that will become groups
//After executions valid_list contains the
//valid nodes/leafs that form groups
extern "C" __global__ void build_group_list(int n_nodes,
uint* parent_id_list,
uint2 *node_bodies,
uint* valid_list)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= n_nodes) return;
uint2 bij = node_bodies[id];
int ownChildren = bij.y - (bij.x & ILEVELMASK);
bij = node_bodies[parent_id_list[id]];
int parentChildren = bij.y - (bij.x & ILEVELMASK);
//group if nchild <= NCRIT AND parent_nchild > NCRIT
//if((ownChildren <= NCRIT) && (parentChildren > NCRIT))
if((ownChildren <= NCRIT) && (parentChildren > NCRIT))
valid_list[id] = id | (uint)(1 << 31); //Group
else
valid_list[id] = id | (0 << 31); //Not a group
}
//Finds nodes/leafs that will become groups
//After executions valid_list contains the
//valid nodes/leafs that form groups
extern "C" __global__ void build_group_list2(int n_particles,
uint *validList,
real4 *bodies_pos,
const float DIST)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint idx = bid * blockDim.x + tid;
//TODO use shared mem ffor the positions
//since we use them multiple times?
//Note that we do not include the final particle
//Since there is no reason to check it
if (idx >= n_particles) return;
//Get the current
float4 curPos, nexPos, prevPos;
curPos = bodies_pos[idx];
//Have to check the first and last to prevent out of bound access
if(idx+1 == n_particles)
nexPos = curPos;
else
nexPos = bodies_pos[idx+1];
if(idx == 0)
prevPos = curPos;
else
prevPos = bodies_pos[idx-1];
//Compute geometrical distance
float dsPlus = ((curPos.x-nexPos.x)*(curPos.x-nexPos.x)) +
((curPos.y-nexPos.y)*(curPos.y-nexPos.y)) +
((curPos.z-nexPos.z)*(curPos.z-nexPos.z));
float dsMin = ((curPos.x-prevPos.x)*(curPos.x-prevPos.x)) +
((curPos.y-prevPos.y)*(curPos.y-prevPos.y)) +
((curPos.z-prevPos.z)*(curPos.z-prevPos.z));
//Multiples of the preferred group size are _always_ valid
int validStart = ((idx % NCRIT) == 0);
int validEnd = (((idx+1) % NCRIT) == 0);
// const int DIST = 1;
// const float DIST = 44;
//The extra possible split(s) if the distance between two particles is too large
if(dsPlus > DIST) validEnd = 1;
if(dsMin > DIST) validStart = 1;
//Last particle is always the end, n_particles dont have to be a multiple of NCRIT
//so this is required
if(idx+1 == n_particles) validEnd = 1;
//Set valid
validList[2*idx + 0] = (idx) | (uint)(validStart << 31);
validList[2*idx + 1] = (idx+1) | (uint)(validEnd << 31);
}
extern "C" __global__ void store_group_list(int n_groups,
uint *validList,
uint *body2group_list,
uint2 *group_list)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
// uint idx = bid * blockDim.x + tid;
if(bid >= n_groups) return;
int start = validList[2*bid];
int end = validList[2*bid+1];
if((start + tid) <= end)
{
body2group_list[start + tid] = bid;
}
if(tid == 0)
{
group_list[bid] = (uint2){start,end};
}
}
extern "C" __global__ void expandLeafList(int n_leafs,
uint *leaf2NodeIdx,
uint2 *node_bodies,
uint *leafPart2Body)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint idx = bid * blockDim.x + tid;
if(bid >= n_leafs) return;
uint2 bij = node_bodies[leaf2NodeIdx[bid]];
uint bi = bij.x & ILEVELMASK;
uint bj = bij.y;
//Write the particle id at the correct location, only if we are
//below the end particle id
if(bi+tid < bj)
{
leafPart2Body[idx] = idx;
}
}
//Assign a grp id to each particle of that grp to
//create particle -> group relation using the
//group -> particle relation
extern "C" __global__ void build_body2group_list(const int n_groups,
uint *group_list,
uint2 *node_bodies,
uint *body2group_list)
{
const int bid = gridDim.x * blockIdx.y + blockIdx.x;
const int tid = threadIdx.x;
if (bid >= n_groups) return;
const int nodeID = group_list[bid];
uint2 bij = node_bodies[nodeID];
const uint firstChild = bij.x & ILEVELMASK;
const uint nChildren = bij.y - (bij.x & ILEVELMASK);
int idx = firstChild+tid;
//Save the group id for this particle
if (tid < nChildren)
body2group_list[idx] = bid;
}
#if 1
//Finds nodes/leafs that will become groups
//After executions valid_list contains the
//valid nodes/leafs that form groups
extern "C" __global__ void build_group_list_new(int n_particles,
uint *validList)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint idx = bid * blockDim.x + tid;
//TODO use shared mem ffor the positions
//since we use them multiple times?
//Note that we do not include the final particle
//Since there is no reason to check it
if (idx >= n_particles) return;
//Multiples of the preferred group size are _always_ valid
int validStart = ((idx % NCRIT) == 0);
int validEnd = (((idx+1) % NCRIT) == 0);
//Last particle is always the end, n_particles dont have to be a multiple of NCRIT
//so this is required
if(idx+1 == n_particles) validEnd = 1;
//Set valid
if(validStart)
validList[2*idx + 0] = (idx) | (uint)(validStart << 31);
if(validEnd)
validList[2*idx + 1] = (idx) | (uint)(validEnd << 31);
}
#endif
// valid0 = abs(cmp_uint4(key_c, key_m));
// valid1 = abs(cmp_uint4(key_c, key_p));
// }
//
// valid_list[id*2] = id | ((valid0) << 31);
// valid_list[id*2+1] = id | ((valid1) << 31);
| 6bd1d733c94df906fbc955b4514670b47f6ffdc7.cu | // //#include "/home/jbedorf/papers/GBPZ2010/codes/jb/build_tree/CUDA/support_kernels.cu"
#include "support_kernels.cu"
#include <stdio.h>
//////////////////////////////
//////////////////////////////
//////////////////////////////
#define LEVEL_MIN 3
extern "C" __global__ void boundaryReduction(const int n_particles,
real4 *positions,
float3 *output_min,
float3 *output_max)
{
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
//const uint idx = bid * blockDim.x + tid;
volatile __shared__ float3 shmem[512];
float3 r_min = (float3){+1e10f, +1e10f, +1e10f};
float3 r_max = (float3){-1e10f, -1e10f, -1e10f};
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[256];
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
// unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
real4 pos;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
//based on reduce6 example
while (i < n_particles) {
if (i < n_particles)
{
pos = positions[i];
r_min.x = fminf(pos.x, r_min.x);
r_min.y = fminf(pos.y, r_min.y);
r_min.z = fminf(pos.z, r_min.z);
r_max.x = fmaxf(pos.x, r_max.x);
r_max.y = fmaxf(pos.y, r_max.y);
r_max.z = fmaxf(pos.z, r_max.z);
}
if (i + blockSize < n_particles)
{
pos = positions[i + blockSize];
r_min.x = fminf(pos.x, r_min.x);
r_min.y = fminf(pos.y, r_min.y);
r_min.z = fminf(pos.z, r_min.z);
r_max.x = fmaxf(pos.x, r_max.x);
r_max.y = fmaxf(pos.y, r_max.y);
r_max.z = fmaxf(pos.z, r_max.z);
}
i += gridSize;
}
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
__syncthreads();
// do reduction in shared mem
if(blockDim.x >= 512) if (tid < 256) {sh_MinMax(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {sh_MinMax(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if (tid < 32)
{
sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin,sh_rmax);
}
// write result for this block to global mem
if (tid == 0)
{
//Compiler doesnt allow: volatile float3 = float3
output_min[bid].x = sh_rmin[0].x; output_min[bid].y = sh_rmin[0].y; output_min[bid].z = sh_rmin[0].z;
output_max[bid].x = sh_rmax[0].x; output_max[bid].y = sh_rmax[0].y; output_max[bid].z = sh_rmax[0].z;
}
}
//Get the domain size, by taking into account the group size
extern "C" __global__ void boundaryReductionGroups(const int n_groups,
real4 *positions,
real4 *sizes,
float3 *output_min,
float3 *output_max)
{
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
//const uint idx = bid * blockDim.x + tid;
volatile __shared__ float3 shmem[512];
float3 r_min = (float3){+1e10f, +1e10f, +1e10f};
float3 r_max = (float3){-1e10f, -1e10f, -1e10f};
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[256];
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
// unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
real4 pos;
real4 size;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
//based on reduce6 example
while (i < n_groups) {
if (i < n_groups)
{
pos = positions[i];
size = sizes[i];
r_min.x = fminf(pos.x-size.x, r_min.x);
r_min.y = fminf(pos.y-size.y, r_min.y);
r_min.z = fminf(pos.z-size.z, r_min.z);
r_max.x = fmaxf(pos.x+size.x, r_max.x);
r_max.y = fmaxf(pos.y+size.y, r_max.y);
r_max.z = fmaxf(pos.z+size.z, r_max.z);
}
if (i + blockSize < n_groups)
{
pos = positions[i + blockSize];
size = sizes[i + blockSize];
r_min.x = fminf(pos.x-size.x, r_min.x);
r_min.y = fminf(pos.y-size.y, r_min.y);
r_min.z = fminf(pos.z-size.z, r_min.z);
r_max.x = fmaxf(pos.x+size.x, r_max.x);
r_max.y = fmaxf(pos.y+size.y, r_max.y);
r_max.z = fmaxf(pos.z+size.z, r_max.z);
}
i += gridSize;
}
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
__syncthreads();
// do reduction in shared mem
if(blockDim.x >= 512) if (tid < 256) {sh_MinMax(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {sh_MinMax(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if (tid < 32)
{
sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin,sh_rmax);
}
// write result for this block to global mem
if (tid == 0)
{
//Compiler doesnt allow: volatile float3 = float3
output_min[bid].x = sh_rmin[0].x; output_min[bid].y = sh_rmin[0].y; output_min[bid].z = sh_rmin[0].z;
output_max[bid].x = sh_rmax[0].x; output_max[bid].y = sh_rmax[0].y; output_max[bid].z = sh_rmax[0].z;
}
}
//#define EXACT_KEY
extern "C" __global__ void cl_build_key_list(uint4 *body_key,
real4 *body_pos,
int n_bodies,
real4 corner) {
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= n_bodies) return;
real4 pos = body_pos[id];
int4 crd;
real domain_fac = corner.w;
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
// crd.x = (int)((pos.x - corner.x) / domain_fac + 0.5);
// crd.y = (int)((pos.y - corner.y) / domain_fac + 0.5);
// crd.z = (int)((pos.z - corner.z) / domain_fac + 0.5);
// uint4 key = get_key(crd);
// if (id == n_bodies) key = (uint4){0xFFFFFFFF, 0xFFFFFFFF, 0, 0};
body_key[id] = get_key(crd);;
}
#if 0
This might be useful to speed up the group creating by
not building a full key but only the first 10 bits
extern "C" __global__ void build_phkey_list(uint2 *body_key,
real4 *body_pos,
int n_bodies,
real4 corner) {
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id > n_bodies) return;
real4 pos = body_pos[id];
int4 crd;
real domain_fac = corner.w;
//Get the integer position, will be used for the key calculation
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
const int bits = 18;
int i,xi, yi, zi;
int mask;
long key;
//0= 000, 1=001, 2=011, 3=010, 4=110, 5=111, 6=101, 7=100
//000=0=0, 001=1=1, 011=3=2, 010=2=3, 110=6=4, 111=7=5, 101=5=6, 100=4=7
const int C[8] = {0, 1, 7, 6, 3, 2, 4, 5};
int temp;
mask = 1 << (bits - 1);
key = 0;
for(i = 0; i < bits; i++, mask >>= 1)
{
xi = (crd.x & mask) ? 1 : 0;
yi = (crd.y & mask) ? 1 : 0;
zi = (crd.z & mask) ? 1 : 0;
if(xi == 0 && yi == 0 && zi == 0)
{
temp = crd.z; crd.z = crd.y; crd.y = temp;
}
else if(xi == 0 && yi == 0 && zi == 1)
{
temp = crd.x; crd.x = crd.y; crd.y = temp;
}
else if(xi == 1 && yi == 0 && zi == 1)
{
temp = crd.x; crd.x = crd.y; crd.y = temp;
}
else if(xi == 1 && yi == 0 && zi == 0)
{
crd.x = (crd.x) ^ (-1);
crd.z = (crd.z) ^ (-1);
}
else if(xi == 1 && yi == 1 && zi == 0)
{
crd.x = (crd.x) ^ (-1);
crd.z = (crd.z) ^ (-1);
}
else if(xi == 1 && yi == 1 && zi == 1)
{
temp = (crd.x) ^ (-1);
crd.x = (crd.y) ^ (-1);
crd.y = temp;
}
else if(xi == 0 && yi == 1 && zi == 1)
{
temp = (crd.x) ^ (-1);
crd.x = (crd.y) ^ (-1);
crd.y = temp;
}
else
{
temp = (crd.z) ^ (-1);
crd.z = (crd.y) ^ (-1);
crd.y = temp;
}
int index = (xi << 2) + (yi << 1) + zi;
key = (key << 3) + C[index];
}
uint2 key_new;
key_new.x = key & 0xFFFFFFFF;
key_new.y = (key >> 32) & 0xFFFFFFFF;
if (id == n_bodies) key_new = (uint2){0xFFFFFFFF, 0xFFFFFFFF};
body_key[id] = key_new;
}
#endif
extern "C" __global__ void cl_build_valid_list(int n_bodies,
int level,
uint4 *body_key,
uint *valid_list){
// uint2 *test_key_data) {
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint id = bid * blockDim.x + tid;
const uint4 key_F = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
const uint4 key_B = {0xFFFFFFF1, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //A border, valid0 will become 1
const uint4 key_I = {0xFFFFFFF2, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //Ignore
const uint4 key_E = {0xFFFFFFF3, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //End
const uint4 key_A = {0xFFFFFFF4, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //Start and End
// const uint2 key_TEST = {0x0, 0x0}; //Start and End
//TODO clean this if we dont use it
if (id >= n_bodies) return; // >= since the last particle is extra boudnary particle
uint4 mask = get_mask(level);
mask.x = mask.x | ((uint)1 << 30) | ((uint)1 << 31);
uint4 key_m;
uint4 key_c = body_key[id];
uint4 key_p;
if (id == 0)
{
key_m = key_F;
}
else
{
key_m = body_key[id-1];
}
if((id+1) < n_bodies) //The last particle gets a different key to compare with
{
key_p = body_key[id+1];
}
else
key_p = (uint4){0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
int valid0 = 0;
int valid1 = 0;
if (cmp_uint4(key_c, key_A) == 0) {
valid0 = 1; //Set a border
valid1 = 1; //Set a border
}
else if (cmp_uint4(key_c, key_B) == 0) {
valid0 = 1; //Set a border
}
else if (cmp_uint4(key_c, key_E) == 0) {
valid1 = 1; //Set a border
}
else if (cmp_uint4(key_c, key_I) == 0) {
//Do nothing
}
else if (cmp_uint4(key_c, key_F) != 0) {
key_c.x = key_c.x & mask.x;
key_c.y = key_c.y & mask.y;
key_c.z = key_c.z & mask.z;
key_p.x = key_p.x & mask.x;
key_p.y = key_p.y & mask.y;
key_p.z = key_p.z & mask.z;
key_m.x = key_m.x & mask.x;
key_m.y = key_m.y & mask.y;
key_m.z = key_m.z & mask.z;
valid0 = abs(cmp_uint4(key_c, key_m));
valid1 = abs(cmp_uint4(key_c, key_p));
}
valid_list[id*2] = id | ((valid0) << 31);
valid_list[id*2+1] = id | ((valid1) << 31);
}
//////////////////////////////
//////////////////////////////
//////////////////////////////
extern "C" __global__ void cl_build_nodes(uint level,
uint compact_list_len,
uint offset,
uint *compact_list,
// uint *compact_list_end,
uint4 *bodies_key,
uint4 *node_key,
uint *n_children,
uint2 *node_bodies){
// uint *testValidList) {
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= compact_list_len) return;
uint bi = compact_list[id*2];
uint bj = compact_list[id*2+1] + 1;
uint4 key = bodies_key[bi];
uint4 mask = get_mask(level);
key = (uint4){key.x & mask.x, key.y & mask.y, key.z & mask.z, 0};
node_bodies[offset+id] = (uint2){bi | (level << BITLEVELS), bj};
node_key [offset+id] = key;
n_children [offset+id] = 0;
if ((int)level > (int)(LEVEL_MIN - 1))
if (bj - bi <= NLEAF) //Leaf can only have NLEAF particles, if its more there will be a split
for (int i = bi; i < bj; i++)
bodies_key[i] = (uint4){0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}; //sets the key to FF to indicate the body is used
}
//////////////////////////////
//////////////////////////////
//////////////////////////////
extern "C" __global__ void cl_link_tree(int n_nodes,
uint *n_children,
uint2 *node_bodies,
real4 *bodies_pos,
real4 corner,
uint2 *level_list, //TODO could make this constant if it proves usefull
// uint* parent_id_list,
uint* valid_list,
uint4 *node_keys,
uint4 *bodies_key,
int maxLevel) {
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= n_nodes) return;
uint2 bij = node_bodies[id];
uint level = (bij.x & LEVELMASK) >> BITLEVELS;
uint bi = bij.x & ILEVELMASK;
uint bj = bij.y;
real4 pos = bodies_pos[bi];
int4 crd;
real domain_fac = corner.w;
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
uint4 key = get_key(crd);
/********* accumulate children *****/
uint4 mask = get_mask(level - 1);
key = (uint4){key.x & mask.x, key.y & mask.y, key.z & mask.z, 0};
uint2 cij;
if(id > 0)
cij = level_list[level-1];
int ci;
//Jeroen, modified this since we dont use textures in find_key,
//the function will fail because out of bound memory access when id==0
if(id > 0)
ci = find_key(key, cij, node_keys);
else
ci = 0;
//ci now points to the node that is the parent, was used in previous group method
// parent_id_list[id] = ci;
mask = get_imask(mask);
key = (uint4) {key.x | mask.x, key.y | mask.y, key.z | mask.z, 0 };
if (id > 0)
atomicAdd(&n_children[ci], (1 << 28));
key = get_key(crd);
mask = get_mask(level);
key = (uint4) {key.x & mask.x, key.y & mask.y, key.z & mask.z, 0};
/********* store the 1st child *****/
cij = level_list[level+1];
int cj = -1;
cj = find_key(key, cij, node_keys);
atomicOr(&n_children[id], cj); //Atomic since multiple threads can work on this
uint valid = id | (uint)(0 << 31);
if ((int)level > (int)(LEVEL_MIN - 1))
if ((bj - bi) <= NLEAF)
valid = id | (uint)(1 << 31); //Distinguish leaves and nodes
valid_list[id] = valid;
}
//Determines which level of node starts at which offset
extern "C" __global__ void build_level_list(const int n_nodes,
const int n_leafs,
uint *leafsIdxs,
uint2 *node_bodies,
uint* valid_list){
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint id = bid * blockDim.x + tid;
if (id >= n_nodes-n_leafs) return;
const int nodeID = leafsIdxs[id+n_leafs]; //Get the idx into the node_bodies array
int level_c, level_m, level_p;
uint2 bij = node_bodies[leafsIdxs[id+n_leafs]]; //current non-leaf
level_c = (bij.x & LEVELMASK) >> BITLEVELS;
if((id+1) < (n_nodes-n_leafs)){
//The last node gets a default lvl
bij = node_bodies[leafsIdxs[id+1+n_leafs]]; //next non-leaf
level_p = (bij.x & LEVELMASK) >> BITLEVELS;
}
else{
//Last is always an end
level_p = MAXLEVELS+5;
}
//Compare level with the node before and node after
if(nodeID == 0)
{
level_m = -1;
}
else
{
bij = node_bodies[ leafsIdxs[id-1+n_leafs]]; //Get info of previous non-leaf node
level_m = (bij.x & LEVELMASK) >> BITLEVELS;
}
int valid0 = 0;
int valid1 = 0;
valid0 = (level_c != level_m) << 31 | (id+n_leafs);
valid1 = (level_c != level_p) << 31 | (id+n_leafs);
valid_list[id*2] = valid0;
valid_list[id*2+1] = valid1;
} //end build_level_list
//Finds nodes/leafs that will become groups
//After executions valid_list contains the
//valid nodes/leafs that form groups
extern "C" __global__ void build_group_list(int n_nodes,
uint* parent_id_list,
uint2 *node_bodies,
uint* valid_list)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= n_nodes) return;
uint2 bij = node_bodies[id];
int ownChildren = bij.y - (bij.x & ILEVELMASK);
bij = node_bodies[parent_id_list[id]];
int parentChildren = bij.y - (bij.x & ILEVELMASK);
//group if nchild <= NCRIT AND parent_nchild > NCRIT
//if((ownChildren <= NCRIT) && (parentChildren > NCRIT))
if((ownChildren <= NCRIT) && (parentChildren > NCRIT))
valid_list[id] = id | (uint)(1 << 31); //Group
else
valid_list[id] = id | (0 << 31); //Not a group
}
//Finds nodes/leafs that will become groups
//After executions valid_list contains the
//valid nodes/leafs that form groups
extern "C" __global__ void build_group_list2(int n_particles,
uint *validList,
real4 *bodies_pos,
const float DIST)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint idx = bid * blockDim.x + tid;
//TODO use shared mem ffor the positions
//since we use them multiple times?
//Note that we do not include the final particle
//Since there is no reason to check it
if (idx >= n_particles) return;
//Get the current
float4 curPos, nexPos, prevPos;
curPos = bodies_pos[idx];
//Have to check the first and last to prevent out of bound access
if(idx+1 == n_particles)
nexPos = curPos;
else
nexPos = bodies_pos[idx+1];
if(idx == 0)
prevPos = curPos;
else
prevPos = bodies_pos[idx-1];
//Compute geometrical distance
float dsPlus = ((curPos.x-nexPos.x)*(curPos.x-nexPos.x)) +
((curPos.y-nexPos.y)*(curPos.y-nexPos.y)) +
((curPos.z-nexPos.z)*(curPos.z-nexPos.z));
float dsMin = ((curPos.x-prevPos.x)*(curPos.x-prevPos.x)) +
((curPos.y-prevPos.y)*(curPos.y-prevPos.y)) +
((curPos.z-prevPos.z)*(curPos.z-prevPos.z));
//Multiples of the preferred group size are _always_ valid
int validStart = ((idx % NCRIT) == 0);
int validEnd = (((idx+1) % NCRIT) == 0);
// const int DIST = 1;
// const float DIST = 44;
//The extra possible split(s) if the distance between two particles is too large
if(dsPlus > DIST) validEnd = 1;
if(dsMin > DIST) validStart = 1;
//Last particle is always the end, n_particles dont have to be a multiple of NCRIT
//so this is required
if(idx+1 == n_particles) validEnd = 1;
//Set valid
validList[2*idx + 0] = (idx) | (uint)(validStart << 31);
validList[2*idx + 1] = (idx+1) | (uint)(validEnd << 31);
}
extern "C" __global__ void store_group_list(int n_groups,
uint *validList,
uint *body2group_list,
uint2 *group_list)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
// uint idx = bid * blockDim.x + tid;
if(bid >= n_groups) return;
int start = validList[2*bid];
int end = validList[2*bid+1];
if((start + tid) <= end)
{
body2group_list[start + tid] = bid;
}
if(tid == 0)
{
group_list[bid] = (uint2){start,end};
}
}
extern "C" __global__ void expandLeafList(int n_leafs,
uint *leaf2NodeIdx,
uint2 *node_bodies,
uint *leafPart2Body)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint idx = bid * blockDim.x + tid;
if(bid >= n_leafs) return;
uint2 bij = node_bodies[leaf2NodeIdx[bid]];
uint bi = bij.x & ILEVELMASK;
uint bj = bij.y;
//Write the particle id at the correct location, only if we are
//below the end particle id
if(bi+tid < bj)
{
leafPart2Body[idx] = idx;
}
}
//Assign a grp id to each particle of that grp to
//create particle -> group relation using the
//group -> particle relation
extern "C" __global__ void build_body2group_list(const int n_groups,
uint *group_list,
uint2 *node_bodies,
uint *body2group_list)
{
const int bid = gridDim.x * blockIdx.y + blockIdx.x;
const int tid = threadIdx.x;
if (bid >= n_groups) return;
const int nodeID = group_list[bid];
uint2 bij = node_bodies[nodeID];
const uint firstChild = bij.x & ILEVELMASK;
const uint nChildren = bij.y - (bij.x & ILEVELMASK);
int idx = firstChild+tid;
//Save the group id for this particle
if (tid < nChildren)
body2group_list[idx] = bid;
}
#if 1
//Finds nodes/leafs that will become groups
//After executions valid_list contains the
//valid nodes/leafs that form groups
extern "C" __global__ void build_group_list_new(int n_particles,
uint *validList)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint idx = bid * blockDim.x + tid;
//TODO use shared mem ffor the positions
//since we use them multiple times?
//Note that we do not include the final particle
//Since there is no reason to check it
if (idx >= n_particles) return;
//Multiples of the preferred group size are _always_ valid
int validStart = ((idx % NCRIT) == 0);
int validEnd = (((idx+1) % NCRIT) == 0);
//Last particle is always the end, n_particles dont have to be a multiple of NCRIT
//so this is required
if(idx+1 == n_particles) validEnd = 1;
//Set valid
if(validStart)
validList[2*idx + 0] = (idx) | (uint)(validStart << 31);
if(validEnd)
validList[2*idx + 1] = (idx) | (uint)(validEnd << 31);
}
#endif
// valid0 = abs(cmp_uint4(key_c, key_m));
// valid1 = abs(cmp_uint4(key_c, key_p));
// }
//
// valid_list[id*2] = id | ((valid0) << 31);
// valid_list[id*2+1] = id | ((valid1) << 31);
|
fa734adf0cc4ba0d6c13b9bfc74a09ccea97f800.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "hook_even.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *parent = NULL;
hipMalloc(&parent, XSIZE*YSIZE);
Edge *edge_list = NULL;
hipMalloc(&edge_list, XSIZE*YSIZE);
int e = 1;
bool *flag = NULL;
hipMalloc(&flag, XSIZE*YSIZE);
bool *active_edges = NULL;
hipMalloc(&active_edges, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
hook_even), dim3(gridBlock),dim3(threadBlock), 0, 0, parent,edge_list,e,flag,active_edges);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
hook_even), dim3(gridBlock),dim3(threadBlock), 0, 0, parent,edge_list,e,flag,active_edges);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
hook_even), dim3(gridBlock),dim3(threadBlock), 0, 0, parent,edge_list,e,flag,active_edges);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | fa734adf0cc4ba0d6c13b9bfc74a09ccea97f800.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "hook_even.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *parent = NULL;
cudaMalloc(&parent, XSIZE*YSIZE);
Edge *edge_list = NULL;
cudaMalloc(&edge_list, XSIZE*YSIZE);
int e = 1;
bool *flag = NULL;
cudaMalloc(&flag, XSIZE*YSIZE);
bool *active_edges = NULL;
cudaMalloc(&active_edges, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
hook_even<<<gridBlock,threadBlock>>>(parent,edge_list,e,flag,active_edges);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
hook_even<<<gridBlock,threadBlock>>>(parent,edge_list,e,flag,active_edges);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
hook_even<<<gridBlock,threadBlock>>>(parent,edge_list,e,flag,active_edges);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7b0e101c81f042baa5d815d87bc8669bb1417c43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved
#include "bbox_overlaps_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void BBoxOverlapsCUDAKernelLauncher(const Tensor bboxes1, const Tensor bboxes2,
Tensor ious, const int mode,
const bool aligned, const int offset) {
int output_size = ious.numel();
int num_bbox1 = bboxes1.size(0);
int num_bbox2 = bboxes2.size(0);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(bboxes1.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bboxes1.scalar_type(), "bbox_overlaps_cuda_kernel", ([&] {
hipLaunchKernelGGL(( bbox_overlaps_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
bboxes1.data_ptr<scalar_t>(), bboxes2.data_ptr<scalar_t>(),
ious.data_ptr<scalar_t>(), num_bbox1, num_bbox2, mode, aligned,
offset);
}));
AT_CUDA_CHECK(hipGetLastError());
}
| 7b0e101c81f042baa5d815d87bc8669bb1417c43.cu | // Copyright (c) OpenMMLab. All rights reserved
#include "bbox_overlaps_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void BBoxOverlapsCUDAKernelLauncher(const Tensor bboxes1, const Tensor bboxes2,
Tensor ious, const int mode,
const bool aligned, const int offset) {
int output_size = ious.numel();
int num_bbox1 = bboxes1.size(0);
int num_bbox2 = bboxes2.size(0);
at::cuda::CUDAGuard device_guard(bboxes1.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bboxes1.scalar_type(), "bbox_overlaps_cuda_kernel", ([&] {
bbox_overlaps_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
bboxes1.data_ptr<scalar_t>(), bboxes2.data_ptr<scalar_t>(),
ious.data_ptr<scalar_t>(), num_bbox1, num_bbox2, mode, aligned,
offset);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
|
99607edd1ad2755efb8bb543a95905d9b0f9973e.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA List Scan/Prefix Sum
// Tim Demetriades
// CPE 810 - GPU & Multicore Programming
// Professor Feng
// Stevens Institute of Technology
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "helper_cuda.h"
//for __syncthreads()
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include <hip/device_functions.h>
#include <stdio.h>
#include <cstdlib>
#include <time.h> // for CPU timer
#include <math.h> // for power function
// Thread block size
#define BLOCK_SIZE 2048 / 2 // block size is half section size
// Section size
#define SECTION_SIZE 2048
// Number of iterations for GPU and CPU List Scans
#define NITER 100
// Kernel function for List Scan (part 1)
__global__ void ListScan_GPU_1(unsigned int* device_input, unsigned int* device_output, unsigned int input_size, unsigned int* S) {
// Shared memory
__device__ __shared__ int device_output_shared[SECTION_SIZE];
int tid = 2 * blockIdx.x * blockDim.x + threadIdx.x; // Set thread index
if (tid < input_size) {
device_output_shared[threadIdx.x] = device_input[tid]; // Move values from global to shared memory
}
if (tid + blockDim.x < input_size) {
device_output_shared[threadIdx.x + blockDim.x] = device_input[tid + blockDim.x];
}
// Reduction phase
for (unsigned int stride = 1; stride <= BLOCK_SIZE; stride *= 2) {
__syncthreads();
int index = (threadIdx.x + 1) * stride * 2 - 1;
if (index < SECTION_SIZE) {
device_output_shared[index] += device_output_shared[index - stride];
}
}
// Post reduction reverse phase
for (unsigned int stride = SECTION_SIZE / 4; stride > 0; stride /= 2) {
__syncthreads();
int index = (threadIdx.x + 1) * stride * 2 - 1;
if (index + stride < SECTION_SIZE) {
device_output_shared[index + stride] += device_output_shared[index];
}
}
// Move output values from shared memory to global memory
__syncthreads();
if (tid < input_size) {
device_output[tid] = device_output_shared[threadIdx.x];
}
if (tid + blockDim.x < input_size) {
device_output[tid + blockDim.x] = device_output_shared[threadIdx.x + blockDim.x];
}
// Fill S array with last value of each section
__syncthreads();
if (threadIdx.x == blockDim.x - 1) {
S[blockIdx.x] = device_output_shared[SECTION_SIZE - 1];
}
}
// Kernel function for List Scan (part 2)
__global__ void ListScan_GPU_2(unsigned int* device_S, unsigned int input_size) {
// Shared memory
__device__ __shared__ int device_S_shared[SECTION_SIZE];
int tid = 2 * blockIdx.x * blockDim.x + threadIdx.x; // Set thread index
if (tid < input_size) {
device_S_shared[threadIdx.x] = device_S[tid]; // Move values from global to shared memory
}
if (tid + blockDim.x < input_size) {
device_S_shared[threadIdx.x + blockDim.x] = device_S[tid + blockDim.x];
}
// Reduction phase
for (unsigned int stride = 1; stride <= BLOCK_SIZE; stride *= 2) {
__syncthreads();
int index = (threadIdx.x + 1) * stride * 2 - 1;
if (index < SECTION_SIZE) {
device_S_shared[index] += device_S_shared[index - stride];
}
}
// Post reduction reverse phase
for (unsigned int stride = SECTION_SIZE / 4; stride > 0; stride /= 2) {
__syncthreads();
int index = (threadIdx.x + 1) * stride * 2 - 1;
if (index + stride < SECTION_SIZE) {
device_S_shared[index + stride] += device_S_shared[index];
}
}
// Move output S values from shared memory to global memory
__syncthreads();
if (tid < input_size) {
device_S[tid] = device_S_shared[threadIdx.x];
}
if (tid + blockDim.x < input_size) {
device_S[tid + blockDim.x] = device_S_shared[threadIdx.x + blockDim.x];
}
}
// Kernel function for List Scan (part 3)
__global__ void ListScan_GPU_3(unsigned int* device_output, unsigned int* device_S, unsigned int input_size) {
int tid = 2 * blockIdx.x * blockDim.x + threadIdx.x; // Set thread index
// Add output value with corresponding value in S array (making sure to skip first section)
if (tid < input_size && blockIdx.x != 0) {
device_output[tid] += device_S[blockIdx.x - 1];
}
if (tid + blockDim.x < input_size && blockIdx.x != 0) {
device_output[tid + blockDim.x] += device_S[blockIdx.x - 1];
}
}
// CPU Sequential List Scan (for comparing with GPU List Scan and veryfing results)
void ListScan_CPU(unsigned int* host_input, unsigned int* host_output_cpu, unsigned int input_size) {
int accumulator = host_input[0]; // Set accumulator to first value of input
host_output_cpu[0] = accumulator; // Set first value of output to accumulator
for (int i = 1; i < input_size; i++) {
accumulator += host_input[i]; // Accumulator = accumulator + current input(next value)
host_output_cpu[i] = accumulator; // Current output = accumulator
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char* argv[]) {
unsigned int input_size = 0;
if (argc == 3) { // 3 arguments expected (filename, -i, <dim>
if (atoi(argv[2]) <= 0 || atoi(argv[2]) > 2048 * 65535) {
printf("\nPlease make sure <dim> is between 1 and 2048 * 65535.\n");
exit(EXIT_FAILURE);
}
// Set input size
input_size = atoi(argv[2]);
printf("\nThe input is %d integers long\n", input_size);
}
else if (argc > 3) {
printf("\nToo many arguments provided.\nEnter arguments like this: \n");
printf("-i <dim> \n");
exit(EXIT_FAILURE);
}
else {
printf("\n2 arguments expected.\nEnter arguments like this: \n");
printf("-i <dim> \n");
exit(EXIT_FAILURE);
}
// Size in bytes of input
size_t input_bytes = input_size * sizeof(int); // unsigned int = int = 4 bytes
// Size in bytes of S (auxiliary array that holds reduction of each scan block)
size_t S_bytes = input_size / SECTION_SIZE * sizeof(int);
// Allocate host memory for input and output
unsigned int* host_input;
unsigned int* host_output;
unsigned int* host_S;
unsigned int* host_output_cpu;
host_input = (unsigned int*)malloc(input_bytes);
host_output = (unsigned int*)malloc(input_bytes);
host_S = (unsigned int*)malloc(S_bytes);
host_output_cpu = (unsigned int*)malloc(input_bytes);
// Allocate device memory for input and output
unsigned int* device_input;
unsigned int* device_output;
unsigned int* device_S;
checkCudaErrors(hipMalloc((void**)&device_input, input_bytes));
checkCudaErrors(hipMalloc((void**)&device_output, input_bytes));
checkCudaErrors(hipMalloc((void**)&device_S, S_bytes));
// Initialize input with random ints between 0~1024
srand((unsigned int)time(NULL)); // Assigns seed to make random numbers change
for (int i = 0; i < input_size; i++) {
host_input[i] = rand() % 1024; // Not including 1024
//host_input[i] = 1; // for testing
}
//// Print input
//printf("\nInput:\n");
//for (int i = 0; i < input_size; i++) {
// printf("\nValue[%d] = %u", i, host_input[i]);
// if (i == input_size - 1) {
// printf("\n");
// }
//}
// Copy input values from host to device
checkCudaErrors(hipMemcpy(device_input, host_input, input_bytes, hipMemcpyHostToDevice)); //dest, source, size in bytes, direction of transfer
// Set grid and thread block sizes
int block_size = BLOCK_SIZE; // Threads per block
int grid_size = ceil(input_size / SECTION_SIZE) + 1; // Blocks per grid
dim3 dim_block(block_size);
dim3 dim_grid(grid_size);
// Record the start event (for timing GPU calculations)
hipStream_t stream;
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
checkCudaErrors(hipStreamSynchronize(stream));
checkCudaErrors(hipEventRecord(start, stream));
int nIter = NITER; // How many times to run kernel
printf("\nStarting List Scan on GPU\n");
// Launch kernel (repeat nIter times so we can obtain average run time)
for (int i = 0; i < nIter; i++) {
ListScan_GPU_1 << <dim_grid, dim_block >> > (device_input, device_output, input_size, device_S);
ListScan_GPU_2 << <dim_grid, dim_block >> > (device_S, input_size);
ListScan_GPU_3 << <dim_grid, dim_block >> > (device_output, device_S, input_size);
}
printf("\n\GPU List Scan Complete\n");
// Record the stop event
checkCudaErrors(hipEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerHistogram = msecTotal / nIter;
printf("\nGPU Histogram Computation took %.3f msec\n", msecPerHistogram);
printf("\nThreads per block = %d, Blocks per grid = %d, Total threads = %d\n", block_size, grid_size, block_size * grid_size);
// Copy output values from device to host
checkCudaErrors(hipMemcpy(host_output, device_output, input_bytes, hipMemcpyDeviceToHost)); //dest, source, size in bytes, direction of transfer
// Copy S values from device to host
checkCudaErrors(hipMemcpy(host_S, device_S, S_bytes, hipMemcpyDeviceToHost)); //dest, source, size in bytes, direction of transfer
// Make sure GPU results are correct depending on number of iterations
for (int i = 0; i < input_size; i++) {
host_output[i] / nIter;
host_S[i] / nIter;
}
//// Print GPU results
//printf("\nGPU Results:\n");
//for (int i = 0; i < input_size; i++) {
// printf("\nValue[%d] = %u", i, host_output[i]);
// if (i == input_size - 1) {
// printf("\n");
// }
//}
//// Print S values
//printf("\nS Values:\n");
//for (int i = 0; i < input_size / SECTION_SIZE; i++) {
// printf("\nValue[%d] = %u", i, host_S[i]);
// if (i == input_size / SECTION_SIZE - 1) {
// printf("\n");
// }
//}
//Start CPU timer
double time_taken_cpu = 0.0;
clock_t begin_cpu = clock();
// Calculate list scan on CPU
printf("\nStarting List Scan on CPU\n");
for (int i = 0; i < nIter; i++) { // Repeat CPU computation same amount of times as GPU computation
ListScan_CPU(host_input, host_output_cpu, input_size);
}
printf("\nCPU List Scan Complete\n");
clock_t end_cpu = clock();
time_taken_cpu += ((double)(end_cpu - begin_cpu) / CLOCKS_PER_SEC * 1000) / nIter; // in milliseconds
printf("\nCPU List Scan took %.3f msec\n", time_taken_cpu);
// Make sure CPU results are correct depending on number of iterations
for (int i = 0; i < input_size; i++) {
host_output_cpu[i] / nIter;
}
//// Print CPU results
//printf("\nCPU Results:\n");
//for (int i = 0; i < input_size; i++) {
// printf("\nValue[%d] = %u", i, host_output_cpu[i]);
// if (i == input_size - 1) {
// printf("\n");
// }
//}
// Check if GPU and CPU results match
bool check = 0;
for (int i = 0; i < input_size; i++) { // For every value in the outputs
if (host_output[i] != host_output_cpu[i]) { // Check if they match and if not set a flag
check = 1;
}
}
if (check == 1) {
printf("\nGPU and CPU results do not match!\n");
}
else {
printf("\nGPU and CPU results match!\n");
}
// Free memory in device
hipFree(device_input);
hipFree(device_output);
hipFree(device_S);
// Free memory in host
free(host_input);
free(host_output);
free(host_S);
free(host_output_cpu);
} | 99607edd1ad2755efb8bb543a95905d9b0f9973e.cu | // CUDA List Scan/Prefix Sum
// Tim Demetriades
// CPE 810 - GPU & Multicore Programming
// Professor Feng
// Stevens Institute of Technology
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "helper_cuda.h"
//for __syncthreads()
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include <device_functions.h>
#include <stdio.h>
#include <cstdlib>
#include <time.h> // for CPU timer
#include <math.h> // for power function
// Thread block size
#define BLOCK_SIZE 2048 / 2 // block size is half section size
// Section size
#define SECTION_SIZE 2048
// Number of iterations for GPU and CPU List Scans
#define NITER 100
// Kernel function for List Scan (part 1)
__global__ void ListScan_GPU_1(unsigned int* device_input, unsigned int* device_output, unsigned int input_size, unsigned int* S) {
// Shared memory
__device__ __shared__ int device_output_shared[SECTION_SIZE];
int tid = 2 * blockIdx.x * blockDim.x + threadIdx.x; // Set thread index
if (tid < input_size) {
device_output_shared[threadIdx.x] = device_input[tid]; // Move values from global to shared memory
}
if (tid + blockDim.x < input_size) {
device_output_shared[threadIdx.x + blockDim.x] = device_input[tid + blockDim.x];
}
// Reduction phase
for (unsigned int stride = 1; stride <= BLOCK_SIZE; stride *= 2) {
__syncthreads();
int index = (threadIdx.x + 1) * stride * 2 - 1;
if (index < SECTION_SIZE) {
device_output_shared[index] += device_output_shared[index - stride];
}
}
// Post reduction reverse phase
for (unsigned int stride = SECTION_SIZE / 4; stride > 0; stride /= 2) {
__syncthreads();
int index = (threadIdx.x + 1) * stride * 2 - 1;
if (index + stride < SECTION_SIZE) {
device_output_shared[index + stride] += device_output_shared[index];
}
}
// Move output values from shared memory to global memory
__syncthreads();
if (tid < input_size) {
device_output[tid] = device_output_shared[threadIdx.x];
}
if (tid + blockDim.x < input_size) {
device_output[tid + blockDim.x] = device_output_shared[threadIdx.x + blockDim.x];
}
// Fill S array with last value of each section
__syncthreads();
if (threadIdx.x == blockDim.x - 1) {
S[blockIdx.x] = device_output_shared[SECTION_SIZE - 1];
}
}
// Kernel function for List Scan (part 2)
__global__ void ListScan_GPU_2(unsigned int* device_S, unsigned int input_size) {
// Shared memory
__device__ __shared__ int device_S_shared[SECTION_SIZE];
int tid = 2 * blockIdx.x * blockDim.x + threadIdx.x; // Set thread index
if (tid < input_size) {
device_S_shared[threadIdx.x] = device_S[tid]; // Move values from global to shared memory
}
if (tid + blockDim.x < input_size) {
device_S_shared[threadIdx.x + blockDim.x] = device_S[tid + blockDim.x];
}
// Reduction phase
for (unsigned int stride = 1; stride <= BLOCK_SIZE; stride *= 2) {
__syncthreads();
int index = (threadIdx.x + 1) * stride * 2 - 1;
if (index < SECTION_SIZE) {
device_S_shared[index] += device_S_shared[index - stride];
}
}
// Post reduction reverse phase
for (unsigned int stride = SECTION_SIZE / 4; stride > 0; stride /= 2) {
__syncthreads();
int index = (threadIdx.x + 1) * stride * 2 - 1;
if (index + stride < SECTION_SIZE) {
device_S_shared[index + stride] += device_S_shared[index];
}
}
// Move output S values from shared memory to global memory
__syncthreads();
if (tid < input_size) {
device_S[tid] = device_S_shared[threadIdx.x];
}
if (tid + blockDim.x < input_size) {
device_S[tid + blockDim.x] = device_S_shared[threadIdx.x + blockDim.x];
}
}
// Kernel function for List Scan (part 3)
__global__ void ListScan_GPU_3(unsigned int* device_output, unsigned int* device_S, unsigned int input_size) {
int tid = 2 * blockIdx.x * blockDim.x + threadIdx.x; // Set thread index
// Add output value with corresponding value in S array (making sure to skip first section)
if (tid < input_size && blockIdx.x != 0) {
device_output[tid] += device_S[blockIdx.x - 1];
}
if (tid + blockDim.x < input_size && blockIdx.x != 0) {
device_output[tid + blockDim.x] += device_S[blockIdx.x - 1];
}
}
// CPU Sequential List Scan (for comparing with GPU List Scan and veryfing results)
void ListScan_CPU(unsigned int* host_input, unsigned int* host_output_cpu, unsigned int input_size) {
int accumulator = host_input[0]; // Set accumulator to first value of input
host_output_cpu[0] = accumulator; // Set first value of output to accumulator
for (int i = 1; i < input_size; i++) {
accumulator += host_input[i]; // Accumulator = accumulator + current input(next value)
host_output_cpu[i] = accumulator; // Current output = accumulator
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char* argv[]) {
unsigned int input_size = 0;
if (argc == 3) { // 3 arguments expected (filename, -i, <dim>
if (atoi(argv[2]) <= 0 || atoi(argv[2]) > 2048 * 65535) {
printf("\nPlease make sure <dim> is between 1 and 2048 * 65535.\n");
exit(EXIT_FAILURE);
}
// Set input size
input_size = atoi(argv[2]);
printf("\nThe input is %d integers long\n", input_size);
}
else if (argc > 3) {
printf("\nToo many arguments provided.\nEnter arguments like this: \n");
printf("-i <dim> \n");
exit(EXIT_FAILURE);
}
else {
printf("\n2 arguments expected.\nEnter arguments like this: \n");
printf("-i <dim> \n");
exit(EXIT_FAILURE);
}
// Size in bytes of input
size_t input_bytes = input_size * sizeof(int); // unsigned int = int = 4 bytes
// Size in bytes of S (auxiliary array that holds reduction of each scan block)
size_t S_bytes = input_size / SECTION_SIZE * sizeof(int);
// Allocate host memory for input and output
unsigned int* host_input;
unsigned int* host_output;
unsigned int* host_S;
unsigned int* host_output_cpu;
host_input = (unsigned int*)malloc(input_bytes);
host_output = (unsigned int*)malloc(input_bytes);
host_S = (unsigned int*)malloc(S_bytes);
host_output_cpu = (unsigned int*)malloc(input_bytes);
// Allocate device memory for input and output
unsigned int* device_input;
unsigned int* device_output;
unsigned int* device_S;
checkCudaErrors(cudaMalloc((void**)&device_input, input_bytes));
checkCudaErrors(cudaMalloc((void**)&device_output, input_bytes));
checkCudaErrors(cudaMalloc((void**)&device_S, S_bytes));
// Initialize input with random ints between 0~1024
srand((unsigned int)time(NULL)); // Assigns seed to make random numbers change
for (int i = 0; i < input_size; i++) {
host_input[i] = rand() % 1024; // Not including 1024
//host_input[i] = 1; // for testing
}
//// Print input
//printf("\nInput:\n");
//for (int i = 0; i < input_size; i++) {
// printf("\nValue[%d] = %u", i, host_input[i]);
// if (i == input_size - 1) {
// printf("\n");
// }
//}
// Copy input values from host to device
checkCudaErrors(cudaMemcpy(device_input, host_input, input_bytes, cudaMemcpyHostToDevice)); //dest, source, size in bytes, direction of transfer
// Set grid and thread block sizes
int block_size = BLOCK_SIZE; // Threads per block
int grid_size = ceil(input_size / SECTION_SIZE) + 1; // Blocks per grid
dim3 dim_block(block_size);
dim3 dim_grid(grid_size);
// Record the start event (for timing GPU calculations)
cudaStream_t stream;
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
checkCudaErrors(cudaStreamSynchronize(stream));
checkCudaErrors(cudaEventRecord(start, stream));
int nIter = NITER; // How many times to run kernel
printf("\nStarting List Scan on GPU\n");
// Launch kernel (repeat nIter times so we can obtain average run time)
for (int i = 0; i < nIter; i++) {
ListScan_GPU_1 << <dim_grid, dim_block >> > (device_input, device_output, input_size, device_S);
ListScan_GPU_2 << <dim_grid, dim_block >> > (device_S, input_size);
ListScan_GPU_3 << <dim_grid, dim_block >> > (device_output, device_S, input_size);
}
printf("\n\GPU List Scan Complete\n");
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerHistogram = msecTotal / nIter;
printf("\nGPU Histogram Computation took %.3f msec\n", msecPerHistogram);
printf("\nThreads per block = %d, Blocks per grid = %d, Total threads = %d\n", block_size, grid_size, block_size * grid_size);
// Copy output values from device to host
checkCudaErrors(cudaMemcpy(host_output, device_output, input_bytes, cudaMemcpyDeviceToHost)); //dest, source, size in bytes, direction of transfer
// Copy S values from device to host
checkCudaErrors(cudaMemcpy(host_S, device_S, S_bytes, cudaMemcpyDeviceToHost)); //dest, source, size in bytes, direction of transfer
// Make sure GPU results are correct depending on number of iterations
for (int i = 0; i < input_size; i++) {
host_output[i] / nIter;
host_S[i] / nIter;
}
//// Print GPU results
//printf("\nGPU Results:\n");
//for (int i = 0; i < input_size; i++) {
// printf("\nValue[%d] = %u", i, host_output[i]);
// if (i == input_size - 1) {
// printf("\n");
// }
//}
//// Print S values
//printf("\nS Values:\n");
//for (int i = 0; i < input_size / SECTION_SIZE; i++) {
// printf("\nValue[%d] = %u", i, host_S[i]);
// if (i == input_size / SECTION_SIZE - 1) {
// printf("\n");
// }
//}
//Start CPU timer
double time_taken_cpu = 0.0;
clock_t begin_cpu = clock();
// Calculate list scan on CPU
printf("\nStarting List Scan on CPU\n");
for (int i = 0; i < nIter; i++) { // Repeat CPU computation same amount of times as GPU computation
ListScan_CPU(host_input, host_output_cpu, input_size);
}
printf("\nCPU List Scan Complete\n");
clock_t end_cpu = clock();
time_taken_cpu += ((double)(end_cpu - begin_cpu) / CLOCKS_PER_SEC * 1000) / nIter; // in milliseconds
printf("\nCPU List Scan took %.3f msec\n", time_taken_cpu);
// Make sure CPU results are correct depending on number of iterations
for (int i = 0; i < input_size; i++) {
host_output_cpu[i] / nIter;
}
//// Print CPU results
//printf("\nCPU Results:\n");
//for (int i = 0; i < input_size; i++) {
// printf("\nValue[%d] = %u", i, host_output_cpu[i]);
// if (i == input_size - 1) {
// printf("\n");
// }
//}
// Check if GPU and CPU results match
bool check = 0;
for (int i = 0; i < input_size; i++) { // For every value in the outputs
if (host_output[i] != host_output_cpu[i]) { // Check if they match and if not set a flag
check = 1;
}
}
if (check == 1) {
printf("\nGPU and CPU results do not match!\n");
}
else {
printf("\nGPU and CPU results match!\n");
}
// Free memory in device
cudaFree(device_input);
cudaFree(device_output);
cudaFree(device_S);
// Free memory in host
free(host_input);
free(host_output);
free(host_S);
free(host_output_cpu);
} |
93f6ccf748844b2ef3533b23e628ac9ec63e2e6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "diffusion3d/diffusion3d_cuda.h"
#include <assert.h>
#include <stdio.h>
#define CUDA_SAFE_CALL(c) \
do { \
assert(c == hipSuccess); \
} while (0)
namespace diffusion3d {
#if __CUDA_ARCH__ >= 350
#define LDG(x) __ldg(&(x))
#else
#define LDG(x) (x)
#endif
//#define GET(x) LDG(x)
#define GET(x) (x)
#define bdimx (BLOCK_X)
#define bdimy (BLOCK_Y)
#define SHIFT3(x, y, z) x = y; y = z
#define SHIFT4(x, y, z, k) x = y; y = z; z = k
#define diffusion_backward() \
do { \
sb[ps] = s2; \
__syncthreads(); \
f2[p-xy] = cc * s2 \
+ cw * sb[ps+sb_w] + ce * sb[ps+sb_e] \
+ cs * sb[ps+sb_s] + cn * sb[ps+sb_n] + cb*s1 + ct*s3; \
} while (0)
// Temporal blocking
// z blocking
// sperate warp for diagonal points
__global__ void diffusion_kernel_shared6(REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
extern __shared__ REAL sb[];
const int sbx = bdimx+4;
const int tidx = threadIdx.x % bdimx;
const int tidy = threadIdx.x / bdimx - 1;
int i = bdimx * blockIdx.x + tidx;
int j = bdimy * blockIdx.y + tidy;
j = (j < 0) ? 0 : j; // max(j, 0)
j = (j == ny) ? ny - 1 : j; // min(j, ny-1)
int xy = nx * ny;
const int block_z = nz / gridDim.z;
int k = (blockIdx.z == 0) ? 0:
block_z * blockIdx.z - 1;
const int k_end = (blockIdx.z == gridDim.z-1) ? nz:
block_z * (blockIdx.z + 1) + 1;
int p = i + j * nx + k *xy;
int ps = tidx+2 + (tidy+1) * sbx;
if (tidy == -1) {
int s = (j == 0) ? 0 : -nx;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s2, s3;
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s])
+ cn * sb[ps+sbx] + cb*t1 + ct*t3;
p += xy;
__syncthreads();
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s])
+ cn * sb[ps+sbx] + cb*t1 + ct*t3;
__syncthreads();
p += xy;
++k;
}
for (; k < k_end; ++k) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s])
+ cn * sb[ps+sbx] + cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
}
if (k == nz) {
s2 = s3;
sb[ps] = s2;
__syncthreads();
}
} else if (tidy == bdimy) {
int n = (j == ny-1) ? 0 : nx;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s2, s3;
s2 = s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3;
p += xy;
__syncthreads();
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3;
p += xy;
__syncthreads();
++k;
}
for (; k < k_end; ++k) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
}
if (k == nz) {
s2 = s3;
sb[ps] = s2;
__syncthreads();
}
} else if (tidy >= 0 && tidy < bdimy) {
int sb_s = (j == 0) ? 0: -sbx;
int sb_n = (j == ny-1) ? 0: sbx;
int sb_w = (i == 0) ? 0: -1;
int sb_e = (i == nx-1) ? 0: 1;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s1, s2, s3;
s2 = s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
p += xy;
__syncthreads();
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
SHIFT3(s1, s2, s3);
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
p += xy;
__syncthreads();
++k;
}
for (; k < k_end; ++k) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
SHIFT3(s1, s2, s3);
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
__syncthreads();
diffusion_backward();
__syncthreads();
p += xy;
}
if (k == nz) {
SHIFT3(s1, s2, s3);
diffusion_backward();
}
} else if (tidx < 32 && tidy == bdimy + 1) {
// horizontal halo
int xoffset = (tidx & 1) + ((tidx & 2) >> 1) * (bdimx + 2);
int yoffset = (tidx >> 2) + 1;
yoffset = (yoffset >= (bdimy+1)) ? bdimy : yoffset;
i = bdimx * blockIdx.x - 2 + xoffset;
i = (i < 0) ? 0 : i;
i = (i >= nx) ? nx - 1 : i;
j = bdimy * blockIdx.y -1 + yoffset;
j = (j < 0) ? 0 : j; // max(j, 0)
j = (j >= ny) ? ny - 1 : j; // min(j, ny-1)
int s = -sbx;
int n = sbx;
int w = (xoffset == 0) ? 0 : -1;
int e = (xoffset == sbx-1) ? 0 : 1;
p = i + j * nx + k * xy;
ps = xoffset + yoffset * sbx;
float t2 = LDG(f1[p]);
float t1 = (k == 0) ? t2 : LDG(f1[p-xy]);
float t3 = (k < nz-1) ? LDG(f1[p+xy]) : t2;
float t4 = (k < nz-2) ? LDG(f1[p+xy*2]) : t3;
sb[ps] = t2;
__syncthreads();
float s2, s3;
s2 = s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
p += xy;
++k;
if (k != 1) {
SHIFT4(t1, t2, t3, t4);
t4 = LDG(f1[p+xy*2]);
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
p += xy;
++k;
}
#pragma unroll
for (; k < k_end-2; ++k) {
SHIFT4(t1, t2, t3, t4);
t4 = LDG(f1[p+xy*2]);
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
}
SHIFT4(t1, t2, t3, t4);
t4 = (k < nz-2) ? LDG(f1[p+xy*2]) : t4;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
++k;
SHIFT4(t1, t2, t3, t4);
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
++k;
if (k == nz) {
s2 = s3;
sb[ps] = s2;
__syncthreads();
}
} else {
const int tidx2 = tidx & 31;
// 2nd warp
int xoffset = 1 + (tidx & 1) * (bdimx + 1);
int yoffset = ((tidx & 2) >> 1) * (bdimy + 1);
i = bdimx * blockIdx.x - 2 + xoffset;
i = (i < 0) ? 0 : i;
i = (i >= nx) ? nx - 1 : i;
j = bdimy * blockIdx.y -1 + yoffset;
j = (j < 0) ? 0 : j; // max(j, 0)
j = (j >= ny) ? ny - 1 : j; // min(j, ny-1)
p = i + j * nx + k * xy;
ps = xoffset + yoffset * sbx;
float t2, t3, t4;
//bool active = tidx2 < 4;
const bool active = 1;
if (active) {
t2 = LDG(f1[p]);
t3 = LDG(f1[p+xy]);
t4 = LDG(f1[p+xy*2]);
sb[ps] = t2;
}
__syncthreads();
__syncthreads();
p += xy;
++k;
if (k != 1) {
SHIFT3(t2, t3, t4);
if (active) {
t4 = LDG(f1[p+xy*2]);
sb[ps] = t2;
}
__syncthreads();
__syncthreads();
p += xy;
++k;
}
#pragma unroll
for (; k < k_end-2; ++k) {
SHIFT3(t2, t3, t4);
if (active) {
t4 = LDG(f1[p+xy*2]);
sb[ps] = t2;
}
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
p += xy;
}
SHIFT3(t2, t3, t4);
if (active) {
sb[ps] = t2;
}
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
p += xy;
++k;
t2 = t3;
if (active) {
sb[ps] = t2;
}
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
p += xy;
if (k == nz) {
__syncthreads();
}
}
return;
}
void Diffusion3DCUDAShared6::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
CUDA_SAFE_CALL(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice));
assert(count % 2 == 0);
//dim3 block_dim(bdimx * bdimy + 32); // + 1 warp
dim3 block_dim(bdimx * (bdimy+2) + (32*2));
dim3 grid_dim(nx_ / bdimx, ny_ / bdimy, grid_z_);
CUDA_SAFE_CALL(hipEventRecord(ev1_));
for (int i = 0; i < count; i+=2) {
hipLaunchKernelGGL(( diffusion_kernel_shared6), dim3(grid_dim), dim3(block_dim),
(bdimx+4)*(bdimy+2)*sizeof(float), 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CUDA_SAFE_CALL(hipEventRecord(ev2_));
CUDA_SAFE_CALL(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost));
return;
}
void Diffusion3DCUDAShared6::InitializeBenchmark() {
Diffusion3DCUDA::InitializeBenchmark();
CUDA_SAFE_CALL(hipFuncSetCacheConfig(diffusion_kernel_shared6,
hipFuncCachePreferShared));
}
}
| 93f6ccf748844b2ef3533b23e628ac9ec63e2e6a.cu | #include "diffusion3d/diffusion3d_cuda.h"
#include <assert.h>
#include <stdio.h>
#define CUDA_SAFE_CALL(c) \
do { \
assert(c == cudaSuccess); \
} while (0)
namespace diffusion3d {
#if __CUDA_ARCH__ >= 350
#define LDG(x) __ldg(&(x))
#else
#define LDG(x) (x)
#endif
//#define GET(x) LDG(x)
#define GET(x) (x)
#define bdimx (BLOCK_X)
#define bdimy (BLOCK_Y)
#define SHIFT3(x, y, z) x = y; y = z
#define SHIFT4(x, y, z, k) x = y; y = z; z = k
#define diffusion_backward() \
do { \
sb[ps] = s2; \
__syncthreads(); \
f2[p-xy] = cc * s2 \
+ cw * sb[ps+sb_w] + ce * sb[ps+sb_e] \
+ cs * sb[ps+sb_s] + cn * sb[ps+sb_n] + cb*s1 + ct*s3; \
} while (0)
// Temporal blocking
// z blocking
// sperate warp for diagonal points
__global__ void diffusion_kernel_shared6(REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
extern __shared__ REAL sb[];
const int sbx = bdimx+4;
const int tidx = threadIdx.x % bdimx;
const int tidy = threadIdx.x / bdimx - 1;
int i = bdimx * blockIdx.x + tidx;
int j = bdimy * blockIdx.y + tidy;
j = (j < 0) ? 0 : j; // max(j, 0)
j = (j == ny) ? ny - 1 : j; // min(j, ny-1)
int xy = nx * ny;
const int block_z = nz / gridDim.z;
int k = (blockIdx.z == 0) ? 0:
block_z * blockIdx.z - 1;
const int k_end = (blockIdx.z == gridDim.z-1) ? nz:
block_z * (blockIdx.z + 1) + 1;
int p = i + j * nx + k *xy;
int ps = tidx+2 + (tidy+1) * sbx;
if (tidy == -1) {
int s = (j == 0) ? 0 : -nx;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s2, s3;
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s])
+ cn * sb[ps+sbx] + cb*t1 + ct*t3;
p += xy;
__syncthreads();
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s])
+ cn * sb[ps+sbx] + cb*t1 + ct*t3;
__syncthreads();
p += xy;
++k;
}
for (; k < k_end; ++k) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s])
+ cn * sb[ps+sbx] + cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
}
if (k == nz) {
s2 = s3;
sb[ps] = s2;
__syncthreads();
}
} else if (tidy == bdimy) {
int n = (j == ny-1) ? 0 : nx;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s2, s3;
s2 = s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3;
p += xy;
__syncthreads();
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3;
p += xy;
__syncthreads();
++k;
}
for (; k < k_end; ++k) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
}
if (k == nz) {
s2 = s3;
sb[ps] = s2;
__syncthreads();
}
} else if (tidy >= 0 && tidy < bdimy) {
int sb_s = (j == 0) ? 0: -sbx;
int sb_n = (j == ny-1) ? 0: sbx;
int sb_w = (i == 0) ? 0: -1;
int sb_e = (i == nx-1) ? 0: 1;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s1, s2, s3;
s2 = s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
p += xy;
__syncthreads();
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
SHIFT3(s1, s2, s3);
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
p += xy;
__syncthreads();
++k;
}
for (; k < k_end; ++k) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
SHIFT3(s1, s2, s3);
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
__syncthreads();
diffusion_backward();
__syncthreads();
p += xy;
}
if (k == nz) {
SHIFT3(s1, s2, s3);
diffusion_backward();
}
} else if (tidx < 32 && tidy == bdimy + 1) {
// horizontal halo
int xoffset = (tidx & 1) + ((tidx & 2) >> 1) * (bdimx + 2);
int yoffset = (tidx >> 2) + 1;
yoffset = (yoffset >= (bdimy+1)) ? bdimy : yoffset;
i = bdimx * blockIdx.x - 2 + xoffset;
i = (i < 0) ? 0 : i;
i = (i >= nx) ? nx - 1 : i;
j = bdimy * blockIdx.y -1 + yoffset;
j = (j < 0) ? 0 : j; // max(j, 0)
j = (j >= ny) ? ny - 1 : j; // min(j, ny-1)
int s = -sbx;
int n = sbx;
int w = (xoffset == 0) ? 0 : -1;
int e = (xoffset == sbx-1) ? 0 : 1;
p = i + j * nx + k * xy;
ps = xoffset + yoffset * sbx;
float t2 = LDG(f1[p]);
float t1 = (k == 0) ? t2 : LDG(f1[p-xy]);
float t3 = (k < nz-1) ? LDG(f1[p+xy]) : t2;
float t4 = (k < nz-2) ? LDG(f1[p+xy*2]) : t3;
sb[ps] = t2;
__syncthreads();
float s2, s3;
s2 = s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
p += xy;
++k;
if (k != 1) {
SHIFT4(t1, t2, t3, t4);
t4 = LDG(f1[p+xy*2]);
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
p += xy;
++k;
}
#pragma unroll
for (; k < k_end-2; ++k) {
SHIFT4(t1, t2, t3, t4);
t4 = LDG(f1[p+xy*2]);
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
}
SHIFT4(t1, t2, t3, t4);
t4 = (k < nz-2) ? LDG(f1[p+xy*2]) : t4;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
++k;
SHIFT4(t1, t2, t3, t4);
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
++k;
if (k == nz) {
s2 = s3;
sb[ps] = s2;
__syncthreads();
}
} else {
const int tidx2 = tidx & 31;
// 2nd warp
int xoffset = 1 + (tidx & 1) * (bdimx + 1);
int yoffset = ((tidx & 2) >> 1) * (bdimy + 1);
i = bdimx * blockIdx.x - 2 + xoffset;
i = (i < 0) ? 0 : i;
i = (i >= nx) ? nx - 1 : i;
j = bdimy * blockIdx.y -1 + yoffset;
j = (j < 0) ? 0 : j; // max(j, 0)
j = (j >= ny) ? ny - 1 : j; // min(j, ny-1)
p = i + j * nx + k * xy;
ps = xoffset + yoffset * sbx;
float t2, t3, t4;
//bool active = tidx2 < 4;
const bool active = 1;
if (active) {
t2 = LDG(f1[p]);
t3 = LDG(f1[p+xy]);
t4 = LDG(f1[p+xy*2]);
sb[ps] = t2;
}
__syncthreads();
__syncthreads();
p += xy;
++k;
if (k != 1) {
SHIFT3(t2, t3, t4);
if (active) {
t4 = LDG(f1[p+xy*2]);
sb[ps] = t2;
}
__syncthreads();
__syncthreads();
p += xy;
++k;
}
#pragma unroll
for (; k < k_end-2; ++k) {
SHIFT3(t2, t3, t4);
if (active) {
t4 = LDG(f1[p+xy*2]);
sb[ps] = t2;
}
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
p += xy;
}
SHIFT3(t2, t3, t4);
if (active) {
sb[ps] = t2;
}
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
p += xy;
++k;
t2 = t3;
if (active) {
sb[ps] = t2;
}
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
p += xy;
if (k == nz) {
__syncthreads();
}
}
return;
}
void Diffusion3DCUDAShared6::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
CUDA_SAFE_CALL(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice));
assert(count % 2 == 0);
//dim3 block_dim(bdimx * bdimy + 32); // + 1 warp
dim3 block_dim(bdimx * (bdimy+2) + (32*2));
dim3 grid_dim(nx_ / bdimx, ny_ / bdimy, grid_z_);
CUDA_SAFE_CALL(cudaEventRecord(ev1_));
for (int i = 0; i < count; i+=2) {
diffusion_kernel_shared6<<<grid_dim, block_dim,
(bdimx+4)*(bdimy+2)*sizeof(float)>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CUDA_SAFE_CALL(cudaEventRecord(ev2_));
CUDA_SAFE_CALL(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost));
return;
}
void Diffusion3DCUDAShared6::InitializeBenchmark() {
Diffusion3DCUDA::InitializeBenchmark();
CUDA_SAFE_CALL(cudaFuncSetCacheConfig(diffusion_kernel_shared6,
cudaFuncCachePreferShared));
}
}
|
fc60f12968cd6ef9b7a6a4adae44ab2b0485c6dd.hip | // !!! This is a file automatically generated by hipify!!!
/** \file MovingAverage_CUDA_kernel.cu
* \author Tomasz Jakubczyk
* \brief file with CUDA kernel smoothing with MovingAverage
*
*
*
*/
#define WIN32
#include<stdlib.h>
#include <math.h>
#include <float.h>
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <hip/hip_vector_types.h>
#include "helper_math.h"
#include "math_constants.h"
#define STEP 64
extern "C"
{
__global__
void MovingAverageD(float* I, unsigned int I_size, int* I_S, float* sI, float step)
{
// unique block index inside a 3D block grid
const unsigned int blockId = blockIdx.x //1D
+ blockIdx.y * gridDim.x //2D
+ gridDim.x * gridDim.y * blockIdx.z; //3D
uint index = __mul24(blockId,blockDim.x) + threadIdx.x;
if(index>=I_size)
return;
float value;
float* val0;
int hStep=step/2;
#pragma unroll
for(int i=-hStep;i<hStep && index+i<I_size;i++)
{
if(index+i>=0)
{
val0=sI+index+i;
value=I[(unsigned int)round(I_S[index]-1.0f)];
atomicAdd(val0, value);
}
}
}
__global__
void DivD(unsigned int I_size, float* sI, float step)
{
// unique block index inside a 3D block grid
const unsigned int blockId = blockIdx.x //1D
+ blockIdx.y * gridDim.x //2D
+ gridDim.x * gridDim.y * blockIdx.z; //3D
uint index = __mul24(blockId,blockDim.x) + threadIdx.x;
if(index>=I_size)
return;
int hStep=step/2;
if(index>hStep && index+hStep<I_size)
{
sI[index]/=(float)step;
}
else if(index<=hStep)
{
sI[index]/=(float)(index+hStep);
}
else
{
sI[index]/=(float)((I_size-index)+hStep);
}
}
}
| fc60f12968cd6ef9b7a6a4adae44ab2b0485c6dd.cu | /** \file MovingAverage_CUDA_kernel.cu
* \author Tomasz Jakubczyk
* \brief file with CUDA kernel smoothing with MovingAverage
*
*
*
*/
#define WIN32
#include<stdlib.h>
#include <math.h>
#include <float.h>
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <vector_types.h>
#include "helper_math.h"
#include "math_constants.h"
#define STEP 64
extern "C"
{
__global__
void MovingAverageD(float* I, unsigned int I_size, int* I_S, float* sI, float step)
{
// unique block index inside a 3D block grid
const unsigned int blockId = blockIdx.x //1D
+ blockIdx.y * gridDim.x //2D
+ gridDim.x * gridDim.y * blockIdx.z; //3D
uint index = __mul24(blockId,blockDim.x) + threadIdx.x;
if(index>=I_size)
return;
float value;
float* val0;
int hStep=step/2;
#pragma unroll
for(int i=-hStep;i<hStep && index+i<I_size;i++)
{
if(index+i>=0)
{
val0=sI+index+i;
value=I[(unsigned int)round(I_S[index]-1.0f)];
atomicAdd(val0, value);
}
}
}
__global__
void DivD(unsigned int I_size, float* sI, float step)
{
// unique block index inside a 3D block grid
const unsigned int blockId = blockIdx.x //1D
+ blockIdx.y * gridDim.x //2D
+ gridDim.x * gridDim.y * blockIdx.z; //3D
uint index = __mul24(blockId,blockDim.x) + threadIdx.x;
if(index>=I_size)
return;
int hStep=step/2;
if(index>hStep && index+hStep<I_size)
{
sI[index]/=(float)step;
}
else if(index<=hStep)
{
sI[index]/=(float)(index+hStep);
}
else
{
sI[index]/=(float)((I_size-index)+hStep);
}
}
}
|
d24467822aba5dec25675119807e6414801d1364.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pp_dynamic_access_offchip_memory_char_vecadd_repeat.h"
__global__ void AddVectors(const char* A, const char* B, char* C, int N)
{
int blockStartIndex = blockIdx.x * blockDim.x * N;
int threadStartIndex = blockStartIndex + threadIdx.x;
int threadEndIndex = threadStartIndex + N*blockDim.x;
int i,t;
for (t = 0; t < REPS; t++) {
for( i=threadStartIndex; i<threadEndIndex; i=i+blockDim.x ){
C[i] = A[i] + B[i];
}
}
}
| d24467822aba5dec25675119807e6414801d1364.cu | #include "pp_dynamic_access_offchip_memory_char_vecadd_repeat.h"
__global__ void AddVectors(const char* A, const char* B, char* C, int N)
{
int blockStartIndex = blockIdx.x * blockDim.x * N;
int threadStartIndex = blockStartIndex + threadIdx.x;
int threadEndIndex = threadStartIndex + N*blockDim.x;
int i,t;
for (t = 0; t < REPS; t++) {
for( i=threadStartIndex; i<threadEndIndex; i=i+blockDim.x ){
C[i] = A[i] + B[i];
}
}
}
|
15d2f99f3e29b1b2558dce099594e97239269090.hip | // !!! This is a file automatically generated by hipify!!!
/* matrixmul.cu
*
* Jonathan Lehman
* February 22, 2012
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
__global__
void mult( float*, float*, float*, int, int, int, int, int);
void buildArrays( int, int );
void checkArgs(int, char**);
void checkGPUCapabilities(int, int, int, int, int);
int nearestDivInt(int, int);
//set block size
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
//user input
int GRID_WIDTH;
int GRID_HEIGHT;
int MATRIX_A_HEIGHT;
int MATRIX_A_WIDTH;
int MATRIX_B_HEIGHT;
int MATRIX_B_WIDTH;
int TOTAL_ELEM;
int MAT_A_ELEM;
int MAT_B_ELEM;
int MAT_C_ELEM;
// Keep track of the time.
hipEvent_t start, stop;
float elapsedTime;
//arrays
float* a;
float* b;
float* c;
int main( int argc, char *argv[] ){
float *dev_a, *dev_b, *dev_c;
//check validity of arguments
checkArgs(argc, argv);
//assign variables
GRID_WIDTH = atoi(argv[1]);
GRID_HEIGHT = atoi(argv[2]);
MATRIX_A_HEIGHT = atoi(argv[3]);
MATRIX_A_WIDTH = atoi(argv[4]);
MATRIX_B_HEIGHT = atoi(argv[5]);
MATRIX_B_WIDTH = atoi(argv[6]);
//check that multiplication is valid
if(MATRIX_A_WIDTH != MATRIX_B_HEIGHT){
fprintf(stderr, "\nmatrixmul: Matrix A width, %d, must equal Matrix B height, %d, otherwise these matrices cannot be multiplied\n", MATRIX_A_WIDTH, MATRIX_B_HEIGHT );
exit(1);
}
//make sure dimensions of C matrix are divisible by block size
if(nearestDivInt(MATRIX_A_WIDTH, BLOCK_SIZE) != MATRIX_A_WIDTH){
MATRIX_A_WIDTH = nearestDivInt(MATRIX_A_WIDTH, BLOCK_SIZE);
if(MATRIX_A_WIDTH == 0){
MATRIX_A_WIDTH = BLOCK_SIZE;
}
MATRIX_B_HEIGHT = MATRIX_A_WIDTH;
printf("Matrix A width and Matrix B height must be divisible by the block dimension %d\nChanging the dimensions of Matrix A to %d x %d (HxW) and Matrix B to %d x % d (HxW)\n", BLOCK_SIZE, MATRIX_A_HEIGHT, MATRIX_A_WIDTH, MATRIX_B_HEIGHT, MATRIX_B_WIDTH);
}
MAT_A_ELEM = MATRIX_A_WIDTH * MATRIX_A_HEIGHT;
MAT_B_ELEM = MATRIX_B_WIDTH * MATRIX_B_HEIGHT;
//check that matrixA is divisible by block size, if not change dimensions
if(nearestDivInt(MAT_A_ELEM, BLOCK_SIZE * BLOCK_SIZE) != MAT_A_ELEM){
MATRIX_A_HEIGHT = nearestDivInt(MATRIX_A_HEIGHT, BLOCK_SIZE * BLOCK_SIZE);
if(MATRIX_A_HEIGHT == 0){
MATRIX_A_HEIGHT = BLOCK_SIZE * BLOCK_SIZE;
}
printf("Matrix A not divisible by the block size, %d\nChanging the dimensions of Matrix A to %d x %d (HxW)\n", BLOCK_SIZE * BLOCK_SIZE, MATRIX_A_HEIGHT, MATRIX_A_WIDTH);
}
//check that matrixB is divisible by block size, if not change dimensions
if(nearestDivInt(MAT_B_ELEM, BLOCK_SIZE * BLOCK_SIZE) != MAT_B_ELEM){
MATRIX_B_WIDTH = nearestDivInt(MATRIX_B_WIDTH, BLOCK_SIZE * BLOCK_SIZE);
if(MATRIX_B_WIDTH == 0){
MATRIX_B_WIDTH = BLOCK_SIZE * BLOCK_SIZE;
}
printf("Matrix B not divisible by the block size, %d\nChanging the dimensions of Matrix B to %d x %d (HxW)\n", BLOCK_SIZE * BLOCK_SIZE, MATRIX_B_HEIGHT, MATRIX_B_WIDTH);
}
//need to ensure that the gridwidth is the same as this value, to ensure that the multiplier will work in ALL instances
if(MATRIX_B_WIDTH != GRID_WIDTH * BLOCK_SIZE){
MATRIX_B_WIDTH = GRID_WIDTH * BLOCK_SIZE;
printf("Matrix B width must equal the grid width, %d, times the block size, %d\nChanging the dimensions of Matrix B to %d x %d (HxW)\n", GRID_WIDTH, BLOCK_SIZE, MATRIX_B_HEIGHT, MATRIX_B_WIDTH);
}
MAT_A_ELEM = MATRIX_A_WIDTH * MATRIX_A_HEIGHT;
MAT_B_ELEM = MATRIX_B_WIDTH * MATRIX_B_HEIGHT;
MAT_C_ELEM = MATRIX_A_HEIGHT * MATRIX_B_WIDTH;
TOTAL_ELEM = MAT_A_ELEM + MAT_B_ELEM + MAT_C_ELEM;
//check that there are no more elements in the resultant matrix than threads to calculate them
if(GRID_WIDTH * BLOCK_SIZE * GRID_HEIGHT * BLOCK_SIZE < MAT_C_ELEM){
printf("There must be more threads in the grid, %d, than elements in the resulting matrix, %d\n", GRID_WIDTH * BLOCK_SIZE * GRID_HEIGHT * BLOCK_SIZE, MAT_C_ELEM);
exit(1);
}
//check that GPU can handle arguments
checkGPUCapabilities(GRID_WIDTH, GRID_HEIGHT, BLOCK_SIZE, BLOCK_SIZE, TOTAL_ELEM);
/* Initialize the source arrays here. */
a = new float[MAT_A_ELEM];
b = new float[MAT_B_ELEM];
c = new float[MAT_C_ELEM];
//fill array a and b with random doubles
buildArrays(MAT_A_ELEM, MAT_B_ELEM);
/*printf( "The sequence:\n" );
for( int i = 0; i < MAT_A_ELEM; i++ ){
if(i % MATRIX_A_WIDTH == 0){
printf("\n");
}
printf( "%f\t", a[i] );
}
printf( "\n" );
printf( "The sequence:\n" );
for( int i = 0; i < MAT_B_ELEM; i++ ) {
if(i % MATRIX_B_WIDTH == 0){
printf("\n");
}
printf( "%f\t",b[i] );
}
printf( "\n" );*/
//check if there will be enough blocks to handle matrix size (if not some threads will take on more than one addition)
int reps = ceil((double)(MAT_C_ELEM) / (BLOCK_SIZE * BLOCK_SIZE * GRID_WIDTH * GRID_HEIGHT));
/* Allocate global device memory. */
hipMalloc( (void **)&dev_a, sizeof(float) * MAT_A_ELEM );
hipMalloc( (void **)&dev_b, sizeof(float) * MAT_B_ELEM );
hipMalloc( (void **)&dev_c, sizeof(float) * MAT_C_ELEM );
/* Copy the host values to global device memory. */
hipMemcpy( dev_a, a, sizeof(float) * MAT_A_ELEM, hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, sizeof(float) * MAT_B_ELEM, hipMemcpyHostToDevice);
/* Start the timer. */
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
/* Execute the kernel. */
dim3 block(BLOCK_SIZE, BLOCK_SIZE); //threads w x h
dim3 grid(GRID_WIDTH, GRID_HEIGHT); //blocks w x h
hipLaunchKernelGGL(( mult), dim3(grid), dim3(block), 0, 0, dev_a, dev_b, dev_c, MATRIX_A_WIDTH, MATRIX_B_WIDTH, MATRIX_A_HEIGHT, reps, MAT_C_ELEM);
/* Wait for the kernel to complete. Needed for timing. */
hipDeviceSynchronize();
/* Stop the timer and print the resulting time. */
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsedTime, start, stop );
printf( "Time: %f secs\n", elapsedTime / 1000 );
/* Get result from device. */
hipMemcpy(c, dev_c, sizeof(float) * MAT_C_ELEM, hipMemcpyDeviceToHost);
/*printf( "The sequence:\n" );
for( int i = 0; i < MAT_C_ELEM; i++ ){
if(i % MATRIX_B_WIDTH == 0){
printf("\n");
}
printf( "%f\t", c[i] );
}
printf( "\n" );*/
//print any cuda error messages
const char* errorString = hipGetErrorString(hipGetLastError());
printf("GPU Error: %s\n", errorString);
//destroy cuda event
hipEventDestroy( start );
hipEventDestroy( stop );
/* Free the allocated device memory. */
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
//free allocated host memory
free(a);
free(b);
free(c);
}
__global__
void mult( float *a, float *b, float *c, int wA , int wB, int hA, int reps, int size)
{
//grid dimensions (# blocks)
int gridW = gridDim.x;
int gridH = gridDim.y;
//block id
int blockX = blockIdx.x;
int blockY = blockIdx.y;
//thread id
int threadX = threadIdx.x;
int threadY = threadIdx.y;
//float to store c subtotal
float cTot = 0;
//values to iterate through submatrix blocks
int aStart;
int aSize;
int aStop;
int bStart;
int bSize;
//shared memory for each block (A and B matrices)
__shared__ float shA[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float shB[BLOCK_SIZE][BLOCK_SIZE];
//loop through number of times matrix elements fill more than an entire grid
for(int i = 0; i < reps; i++){
//A blocks
// index of first submatrix of A (account for if doesnt fit on initial grid)
if(hA > gridH * BLOCK_SIZE){
aStart = wA * BLOCK_SIZE * (blockY + gridW * i);
}
else{
aStart = wA * BLOCK_SIZE * blockY;
}
// size of each submatrix of A
aSize = BLOCK_SIZE;
// index of last submatrix of A
aStop = aStart + wA - 1;
//B blocks
// index of first submatrix of B (account for if doesnt fit on initial grid)
if(wB > gridW * BLOCK_SIZE){
bStart = BLOCK_SIZE * (blockX + gridH * i);
}
else{
bStart = BLOCK_SIZE * blockX;
}
// size of each submatrix of B
bSize = BLOCK_SIZE * wB;
// loop through submatrices for a and b by specified steps
for (int aVal = aStart, bVal = bStart; aVal <= aStop; aVal += aSize, bVal += bSize){
int aIndex = aVal + wA * threadY + threadX;
int bIndex = bVal + wB * threadY + threadX;
//load memory for matrices a and b into shared memory
shA[threadX][threadY] = a[aIndex];
shB[threadX][threadY] = b[bIndex];
__syncthreads();
for (int i = 0; i < BLOCK_SIZE; i++){
cTot += shA[i][threadX] * shB[threadY][i];
}
__syncthreads();
}
//store values to correct index in c
int cVal = wB * BLOCK_SIZE * blockY + BLOCK_SIZE * blockX;
int index = cVal + wB * threadX + threadY + (gridW * gridH * BLOCK_SIZE * BLOCK_SIZE * i);
if(index < size){
c[index] = cTot;
}
}
}
void buildArrays( int mat_a_size, int mat_b_size ){
/* Seed the random number generator. */
srand( 200 );
for(int i = 0; i < mat_a_size; i++){
float val = rand() / (float(RAND_MAX));
a[i] = val;
}
srand( 300 );
for(int i = 0; i < mat_b_size; i++){
float val = rand() / (float(RAND_MAX));
b[i] = val;
}
}
void checkArgs(int argc, char *argv[]){
//check number of arguments
if(argc != 7){
fprintf(stderr, "\nmatrixmul: Incorrect number of arguments. matrixmul requires 6 arguments not %d\nCorrect usage: \"matrixmul grid-width grid-height matA-height matA-width matB-height matB-width\"\n", argc - 1);
exit(1);
}
char* invalChar;
long arg;
//check each argument
for(int i = 1; i < 7; i++){
//check for overflow of argument
if((arg = strtol(argv[i], &invalChar, 10)) >= INT_MAX){
fprintf(stderr, "\nmatrixmul: Overflow. Invalid argument %d for matrixmul, '%s'.\nThe argument must be a valid, positive, non-zero integer less than %d.\n", i, argv[i], INT_MAX);
exit(1);
}
//check that argument is a valid positive integer and check underflow
if(!(arg > 0) || (*invalChar)){
fprintf(stderr, "\nmatrixmul: Invalid argument %d for matrixmul, '%s'. The argument must be a valid, positive, non-zero integer.\n", i, argv[i]);
exit(1);
}
}
}
void checkGPUCapabilities(int gridW, int gridH, int blockW, int blockH, int size){
//check what GPU is being used
int devId;
hipGetDevice( &devId );
//get device properties for GPU being used
hipDeviceProp_t gpuProp;
hipGetDeviceProperties( &gpuProp, devId );
//check if GPU has enough memory to handle the 3 arrays
if(gpuProp.totalGlobalMem < (size * sizeof(float))){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU does not have enough memory to handle the data size: %ld. It can only handle data sizes up to %ld.\n", (size * sizeof(float)) * 3, gpuProp.totalGlobalMem);
exit(1);
}
//check if GPU can handle the number of threads per bloc
if(gpuProp.maxThreadsPerBlock < (blockW * blockH)){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d threads per block, not %d.\n", gpuProp.maxThreadsPerBlock, (blockW * blockH));
exit(1);
}
//check that GPU can handle the number of threads in the block width
if(gpuProp.maxThreadsDim[0] < blockW){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d threads as the block width of each block, not %d.\n", gpuProp.maxThreadsDim[0], blockW );
exit(1);
}
//check that GPU can handle the number of threads in the block height
if(gpuProp.maxThreadsDim[1] < blockH){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d threads as the block height of each block, not %d.\n", gpuProp.maxThreadsDim[1], blockH );
exit(1);
}
//check that GPU can handle the number of blocks in the grid width
if(gpuProp.maxGridSize[0] < gridW){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d blocks as the grid width of each grid, not %d.\n", gpuProp.maxGridSize[0], gridW );
exit(1);
}
//check that GPU can handle the number of blocks in the grid height
if(gpuProp.maxGridSize[1] < gridH){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d blocks as the grid height of each grid, not %d.\n", gpuProp.maxGridSize[1], gridH );
exit(1);
}
}
//returns nearest int to initVal divisible by divBy
int nearestDivInt(int initVal, int divBy){
int attemptVal = initVal / divBy;
return (abs(initVal - (attemptVal * divBy)) <= abs(initVal - ((attemptVal + 1) * divBy))) ? attemptVal * divBy : (attemptVal + 1) * divBy;
}
| 15d2f99f3e29b1b2558dce099594e97239269090.cu | /* matrixmul.cu
*
* Jonathan Lehman
* February 22, 2012
*/
#include <cuda.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
__global__
void mult( float*, float*, float*, int, int, int, int, int);
void buildArrays( int, int );
void checkArgs(int, char**);
void checkGPUCapabilities(int, int, int, int, int);
int nearestDivInt(int, int);
//set block size
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
//user input
int GRID_WIDTH;
int GRID_HEIGHT;
int MATRIX_A_HEIGHT;
int MATRIX_A_WIDTH;
int MATRIX_B_HEIGHT;
int MATRIX_B_WIDTH;
int TOTAL_ELEM;
int MAT_A_ELEM;
int MAT_B_ELEM;
int MAT_C_ELEM;
// Keep track of the time.
cudaEvent_t start, stop;
float elapsedTime;
//arrays
float* a;
float* b;
float* c;
int main( int argc, char *argv[] ){
float *dev_a, *dev_b, *dev_c;
//check validity of arguments
checkArgs(argc, argv);
//assign variables
GRID_WIDTH = atoi(argv[1]);
GRID_HEIGHT = atoi(argv[2]);
MATRIX_A_HEIGHT = atoi(argv[3]);
MATRIX_A_WIDTH = atoi(argv[4]);
MATRIX_B_HEIGHT = atoi(argv[5]);
MATRIX_B_WIDTH = atoi(argv[6]);
//check that multiplication is valid
if(MATRIX_A_WIDTH != MATRIX_B_HEIGHT){
fprintf(stderr, "\nmatrixmul: Matrix A width, %d, must equal Matrix B height, %d, otherwise these matrices cannot be multiplied\n", MATRIX_A_WIDTH, MATRIX_B_HEIGHT );
exit(1);
}
//make sure dimensions of C matrix are divisible by block size
if(nearestDivInt(MATRIX_A_WIDTH, BLOCK_SIZE) != MATRIX_A_WIDTH){
MATRIX_A_WIDTH = nearestDivInt(MATRIX_A_WIDTH, BLOCK_SIZE);
if(MATRIX_A_WIDTH == 0){
MATRIX_A_WIDTH = BLOCK_SIZE;
}
MATRIX_B_HEIGHT = MATRIX_A_WIDTH;
printf("Matrix A width and Matrix B height must be divisible by the block dimension %d\nChanging the dimensions of Matrix A to %d x %d (HxW) and Matrix B to %d x % d (HxW)\n", BLOCK_SIZE, MATRIX_A_HEIGHT, MATRIX_A_WIDTH, MATRIX_B_HEIGHT, MATRIX_B_WIDTH);
}
MAT_A_ELEM = MATRIX_A_WIDTH * MATRIX_A_HEIGHT;
MAT_B_ELEM = MATRIX_B_WIDTH * MATRIX_B_HEIGHT;
//check that matrixA is divisible by block size, if not change dimensions
if(nearestDivInt(MAT_A_ELEM, BLOCK_SIZE * BLOCK_SIZE) != MAT_A_ELEM){
MATRIX_A_HEIGHT = nearestDivInt(MATRIX_A_HEIGHT, BLOCK_SIZE * BLOCK_SIZE);
if(MATRIX_A_HEIGHT == 0){
MATRIX_A_HEIGHT = BLOCK_SIZE * BLOCK_SIZE;
}
printf("Matrix A not divisible by the block size, %d\nChanging the dimensions of Matrix A to %d x %d (HxW)\n", BLOCK_SIZE * BLOCK_SIZE, MATRIX_A_HEIGHT, MATRIX_A_WIDTH);
}
//check that matrixB is divisible by block size, if not change dimensions
if(nearestDivInt(MAT_B_ELEM, BLOCK_SIZE * BLOCK_SIZE) != MAT_B_ELEM){
MATRIX_B_WIDTH = nearestDivInt(MATRIX_B_WIDTH, BLOCK_SIZE * BLOCK_SIZE);
if(MATRIX_B_WIDTH == 0){
MATRIX_B_WIDTH = BLOCK_SIZE * BLOCK_SIZE;
}
printf("Matrix B not divisible by the block size, %d\nChanging the dimensions of Matrix B to %d x %d (HxW)\n", BLOCK_SIZE * BLOCK_SIZE, MATRIX_B_HEIGHT, MATRIX_B_WIDTH);
}
//need to ensure that the gridwidth is the same as this value, to ensure that the multiplier will work in ALL instances
if(MATRIX_B_WIDTH != GRID_WIDTH * BLOCK_SIZE){
MATRIX_B_WIDTH = GRID_WIDTH * BLOCK_SIZE;
printf("Matrix B width must equal the grid width, %d, times the block size, %d\nChanging the dimensions of Matrix B to %d x %d (HxW)\n", GRID_WIDTH, BLOCK_SIZE, MATRIX_B_HEIGHT, MATRIX_B_WIDTH);
}
MAT_A_ELEM = MATRIX_A_WIDTH * MATRIX_A_HEIGHT;
MAT_B_ELEM = MATRIX_B_WIDTH * MATRIX_B_HEIGHT;
MAT_C_ELEM = MATRIX_A_HEIGHT * MATRIX_B_WIDTH;
TOTAL_ELEM = MAT_A_ELEM + MAT_B_ELEM + MAT_C_ELEM;
//check that there are no more elements in the resultant matrix than threads to calculate them
if(GRID_WIDTH * BLOCK_SIZE * GRID_HEIGHT * BLOCK_SIZE < MAT_C_ELEM){
printf("There must be more threads in the grid, %d, than elements in the resulting matrix, %d\n", GRID_WIDTH * BLOCK_SIZE * GRID_HEIGHT * BLOCK_SIZE, MAT_C_ELEM);
exit(1);
}
//check that GPU can handle arguments
checkGPUCapabilities(GRID_WIDTH, GRID_HEIGHT, BLOCK_SIZE, BLOCK_SIZE, TOTAL_ELEM);
/* Initialize the source arrays here. */
a = new float[MAT_A_ELEM];
b = new float[MAT_B_ELEM];
c = new float[MAT_C_ELEM];
//fill array a and b with random doubles
buildArrays(MAT_A_ELEM, MAT_B_ELEM);
/*printf( "The sequence:\n" );
for( int i = 0; i < MAT_A_ELEM; i++ ){
if(i % MATRIX_A_WIDTH == 0){
printf("\n");
}
printf( "%f\t", a[i] );
}
printf( "\n" );
printf( "The sequence:\n" );
for( int i = 0; i < MAT_B_ELEM; i++ ) {
if(i % MATRIX_B_WIDTH == 0){
printf("\n");
}
printf( "%f\t",b[i] );
}
printf( "\n" );*/
//check if there will be enough blocks to handle matrix size (if not some threads will take on more than one addition)
int reps = ceil((double)(MAT_C_ELEM) / (BLOCK_SIZE * BLOCK_SIZE * GRID_WIDTH * GRID_HEIGHT));
/* Allocate global device memory. */
cudaMalloc( (void **)&dev_a, sizeof(float) * MAT_A_ELEM );
cudaMalloc( (void **)&dev_b, sizeof(float) * MAT_B_ELEM );
cudaMalloc( (void **)&dev_c, sizeof(float) * MAT_C_ELEM );
/* Copy the host values to global device memory. */
cudaMemcpy( dev_a, a, sizeof(float) * MAT_A_ELEM, cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, sizeof(float) * MAT_B_ELEM, cudaMemcpyHostToDevice);
/* Start the timer. */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
/* Execute the kernel. */
dim3 block(BLOCK_SIZE, BLOCK_SIZE); //threads w x h
dim3 grid(GRID_WIDTH, GRID_HEIGHT); //blocks w x h
mult<<<grid, block>>>(dev_a, dev_b, dev_c, MATRIX_A_WIDTH, MATRIX_B_WIDTH, MATRIX_A_HEIGHT, reps, MAT_C_ELEM);
/* Wait for the kernel to complete. Needed for timing. */
cudaThreadSynchronize();
/* Stop the timer and print the resulting time. */
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
printf( "Time: %f secs\n", elapsedTime / 1000 );
/* Get result from device. */
cudaMemcpy(c, dev_c, sizeof(float) * MAT_C_ELEM, cudaMemcpyDeviceToHost);
/*printf( "The sequence:\n" );
for( int i = 0; i < MAT_C_ELEM; i++ ){
if(i % MATRIX_B_WIDTH == 0){
printf("\n");
}
printf( "%f\t", c[i] );
}
printf( "\n" );*/
//print any cuda error messages
const char* errorString = cudaGetErrorString(cudaGetLastError());
printf("GPU Error: %s\n", errorString);
//destroy cuda event
cudaEventDestroy( start );
cudaEventDestroy( stop );
/* Free the allocated device memory. */
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
//free allocated host memory
free(a);
free(b);
free(c);
}
__global__
void mult( float *a, float *b, float *c, int wA , int wB, int hA, int reps, int size)
{
//grid dimensions (# blocks)
int gridW = gridDim.x;
int gridH = gridDim.y;
//block id
int blockX = blockIdx.x;
int blockY = blockIdx.y;
//thread id
int threadX = threadIdx.x;
int threadY = threadIdx.y;
//float to store c subtotal
float cTot = 0;
//values to iterate through submatrix blocks
int aStart;
int aSize;
int aStop;
int bStart;
int bSize;
//shared memory for each block (A and B matrices)
__shared__ float shA[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float shB[BLOCK_SIZE][BLOCK_SIZE];
//loop through number of times matrix elements fill more than an entire grid
for(int i = 0; i < reps; i++){
//A blocks
// index of first submatrix of A (account for if doesnt fit on initial grid)
if(hA > gridH * BLOCK_SIZE){
aStart = wA * BLOCK_SIZE * (blockY + gridW * i);
}
else{
aStart = wA * BLOCK_SIZE * blockY;
}
// size of each submatrix of A
aSize = BLOCK_SIZE;
// index of last submatrix of A
aStop = aStart + wA - 1;
//B blocks
// index of first submatrix of B (account for if doesnt fit on initial grid)
if(wB > gridW * BLOCK_SIZE){
bStart = BLOCK_SIZE * (blockX + gridH * i);
}
else{
bStart = BLOCK_SIZE * blockX;
}
// size of each submatrix of B
bSize = BLOCK_SIZE * wB;
// loop through submatrices for a and b by specified steps
for (int aVal = aStart, bVal = bStart; aVal <= aStop; aVal += aSize, bVal += bSize){
int aIndex = aVal + wA * threadY + threadX;
int bIndex = bVal + wB * threadY + threadX;
//load memory for matrices a and b into shared memory
shA[threadX][threadY] = a[aIndex];
shB[threadX][threadY] = b[bIndex];
__syncthreads();
for (int i = 0; i < BLOCK_SIZE; i++){
cTot += shA[i][threadX] * shB[threadY][i];
}
__syncthreads();
}
//store values to correct index in c
int cVal = wB * BLOCK_SIZE * blockY + BLOCK_SIZE * blockX;
int index = cVal + wB * threadX + threadY + (gridW * gridH * BLOCK_SIZE * BLOCK_SIZE * i);
if(index < size){
c[index] = cTot;
}
}
}
void buildArrays( int mat_a_size, int mat_b_size ){
/* Seed the random number generator. */
srand( 200 );
for(int i = 0; i < mat_a_size; i++){
float val = rand() / (float(RAND_MAX));
a[i] = val;
}
srand( 300 );
for(int i = 0; i < mat_b_size; i++){
float val = rand() / (float(RAND_MAX));
b[i] = val;
}
}
void checkArgs(int argc, char *argv[]){
//check number of arguments
if(argc != 7){
fprintf(stderr, "\nmatrixmul: Incorrect number of arguments. matrixmul requires 6 arguments not %d\nCorrect usage: \"matrixmul grid-width grid-height matA-height matA-width matB-height matB-width\"\n", argc - 1);
exit(1);
}
char* invalChar;
long arg;
//check each argument
for(int i = 1; i < 7; i++){
//check for overflow of argument
if((arg = strtol(argv[i], &invalChar, 10)) >= INT_MAX){
fprintf(stderr, "\nmatrixmul: Overflow. Invalid argument %d for matrixmul, '%s'.\nThe argument must be a valid, positive, non-zero integer less than %d.\n", i, argv[i], INT_MAX);
exit(1);
}
//check that argument is a valid positive integer and check underflow
if(!(arg > 0) || (*invalChar)){
fprintf(stderr, "\nmatrixmul: Invalid argument %d for matrixmul, '%s'. The argument must be a valid, positive, non-zero integer.\n", i, argv[i]);
exit(1);
}
}
}
void checkGPUCapabilities(int gridW, int gridH, int blockW, int blockH, int size){
//check what GPU is being used
int devId;
cudaGetDevice( &devId );
//get device properties for GPU being used
cudaDeviceProp gpuProp;
cudaGetDeviceProperties( &gpuProp, devId );
//check if GPU has enough memory to handle the 3 arrays
if(gpuProp.totalGlobalMem < (size * sizeof(float))){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU does not have enough memory to handle the data size: %ld. It can only handle data sizes up to %ld.\n", (size * sizeof(float)) * 3, gpuProp.totalGlobalMem);
exit(1);
}
//check if GPU can handle the number of threads per bloc
if(gpuProp.maxThreadsPerBlock < (blockW * blockH)){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d threads per block, not %d.\n", gpuProp.maxThreadsPerBlock, (blockW * blockH));
exit(1);
}
//check that GPU can handle the number of threads in the block width
if(gpuProp.maxThreadsDim[0] < blockW){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d threads as the block width of each block, not %d.\n", gpuProp.maxThreadsDim[0], blockW );
exit(1);
}
//check that GPU can handle the number of threads in the block height
if(gpuProp.maxThreadsDim[1] < blockH){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d threads as the block height of each block, not %d.\n", gpuProp.maxThreadsDim[1], blockH );
exit(1);
}
//check that GPU can handle the number of blocks in the grid width
if(gpuProp.maxGridSize[0] < gridW){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d blocks as the grid width of each grid, not %d.\n", gpuProp.maxGridSize[0], gridW );
exit(1);
}
//check that GPU can handle the number of blocks in the grid height
if(gpuProp.maxGridSize[1] < gridH){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d blocks as the grid height of each grid, not %d.\n", gpuProp.maxGridSize[1], gridH );
exit(1);
}
}
//returns nearest int to initVal divisible by divBy
int nearestDivInt(int initVal, int divBy){
int attemptVal = initVal / divBy;
return (abs(initVal - (attemptVal * divBy)) <= abs(initVal - ((attemptVal + 1) * divBy))) ? attemptVal * divBy : (attemptVal + 1) * divBy;
}
|
a2f1c9c49bdccb8ced5aa0a52597bc19d2d5d2b1.hip | // !!! This is a file automatically generated by hipify!!!
/**
* layer_core_convolution.cu
*/
#include "layer_run.h"
#include "hip/hip_runtime.h"
__global__ void layerDevPredictConvolution(double* output, double* input, double* weights,
int kernelHeight, int kernelWidth, int inputChannels,
int rowStep, int colStep,
int rowBasis, int colBasis,
int inputSize, int inputChannelSize, int inputHeight, int inputWidth,
int outputSize, int outputChannelSize, int outputHeight, int outputWidth,
int weightsSize
)
{
double* outputBase = output + blockIdx.x * outputSize + blockIdx.y * outputChannelSize;
double* inputBase = input + blockIdx.x * inputSize;
double* weightsBase = weights + blockIdx.y * weightsSize;
// double* b = weightsBase;
double* w = weightsBase + 1;
int inputRowBegin = threadIdx.x * rowStep + rowBasis;
int inputColBegin = threadIdx.y * colStep + colBasis;
double z = *weightsBase;
for (int d = 0; d < inputChannels; d++) {
for (int kRow = 0; kRow < kernelHeight; kRow++) {
int xRow = inputRowBegin + kRow;
if (xRow < 0 || xRow >= inputHeight) continue;
for (int kCol = 0; kCol < kernelWidth; kCol++) {
int xCol = inputColBegin + kCol;
if (xCol < 0 || xCol >= inputWidth) continue;
int xIndex = (d * inputHeight + xRow) * inputWidth + xCol;
int wIndex = (d * kernelHeight + kRow) * kernelWidth + kCol;
z += w[wIndex] * inputBase[xIndex];
}
}
}
outputBase[threadIdx.x * blockDim.y + threadIdx.y] = z;
}
int layerPredictConvolution(layer_schema_t* schema, int batchSize) {
int inputSize = schema->inputDepth * schema->inputHeight * schema->inputWidth;
int outputSize = schema->outputDepth * schema->outputHeight * schema->outputWidth;
int inputChannelSize = schema->inputHeight * schema->inputWidth;
int outputChannelSize = schema->outputHeight * schema->outputWidth;
int weightsSize = schema->inputDepth * schema->operationHeight * schema->operationWidth + 1;
dim3 gridSize(batchSize, schema->outputDepth);
dim3 blockSize(schema->outputHeight, schema->outputWidth);
hipLaunchKernelGGL(( layerDevPredictConvolution), dim3(gridSize), dim3(blockSize), 0, 0, schema->predictOutput, schema->predictInput, schema->weights,
schema->operationHeight, schema->operationWidth, schema->inputDepth,
schema->operationRowStep, schema->operationColStep,
schema->operationRowBasis, schema->operationColBasis,
inputSize, inputChannelSize, schema->inputHeight, schema->inputWidth,
outputSize, outputChannelSize, schema->outputHeight, schema->outputWidth,
weightsSize
);
return layerIfError(schema->layerIndex);
}
__device__ int layerConvolutionBackwardIndexBegin(int index, int basis, int window, int step) {
//
return (index - basis - window + step) / step;
}
__device__ int layerConvolutionBackwardIndexEnd(int index, int basis, int window, int step) {
return (index - basis - 0) / step;
}
//
__global__ void layerDevTrainConvolution1(double* trainOutput, double* trainInput, double* weights,
int kernelHeight, int kernelWidth, int outputChannels,
int rowStep, int colStep,
int rowBasis, int colBasis,
int inputSize, int inputChannelSize, int inputHeight, int inputWidth,
int outputSize, int outputChannelSize, int outputHeight, int outputWidth,
int weightsSize
)
{
double* trainInputBaseBase = trainInput + blockIdx.x * outputSize;
int rowBegin = layerConvolutionBackwardIndexBegin(threadIdx.x, rowBasis, kernelHeight, rowStep);
int rowEnd = layerConvolutionBackwardIndexEnd(threadIdx.x, rowBasis, kernelHeight, rowStep);
int colBegin = layerConvolutionBackwardIndexBegin(threadIdx.y, colBasis, kernelWidth, colStep);
int colEnd = layerConvolutionBackwardIndexEnd(threadIdx.y, colBasis, kernelWidth, colStep);
if (rowBegin < 0) rowBegin = 0;
if (rowEnd > inputHeight) rowBegin = inputHeight;
if (colBegin < 0) colBegin = 0;
if (colEnd > inputHeight) colBegin = inputHeight;
double v = 0;
for (int k = 0; k < outputChannels; k++) {
double* trainInputBase = trainInputBaseBase + k * outputChannelSize;
double* w = weights + k * weightsSize + 1 + blockIdx.y * kernelHeight * kernelWidth;
for (int i = rowBegin; i <= rowEnd; i++) {
for (int j = colBegin; j <= colEnd; j++) {
int rowOffset = threadIdx.x - (i * rowStep + rowBasis);
int colOffset = threadIdx.y - (j * colStep + colBasis);
double dprev = trainInputBase[i * outputWidth + j];
double dtemp = w[rowOffset * kernelWidth + colOffset];
v += dprev * dtemp;
}
}
}
trainOutput[blockIdx.x * inputSize + blockIdx.y * inputChannelSize + threadIdx.x * inputWidth + threadIdx.y] = v;
}
int layerTrainConvolution1(layer_schema_t* schema, int batchSize) {
int inputSize = schema->inputDepth * schema->inputHeight * schema->inputWidth;
int outputSize = schema->outputDepth * schema->outputHeight * schema->outputWidth;
int inputChannelSize = schema->inputHeight * schema->inputWidth;
int outputChannelSize = schema->outputHeight * schema->outputWidth;
int weightsSize = schema->inputDepth * schema->operationHeight * schema->operationWidth + 1;
dim3 gridSize(batchSize, schema->inputDepth);
dim3 blockSize(schema->inputHeight, schema->inputWidth);
hipLaunchKernelGGL(( layerDevTrainConvolution1), dim3(gridSize), dim3(blockSize), 0, 0, schema->trainOutput, schema->trainInput, schema->weights,
schema->operationHeight, schema->operationWidth, schema->outputDepth,
schema->operationRowStep, schema->operationColStep,
schema->operationRowBasis, schema->operationColBasis,
inputSize, inputChannelSize, schema->inputHeight, schema->inputWidth,
outputSize, outputChannelSize, schema->outputHeight, schema->outputWidth,
weightsSize
);
return layerIfError(schema->layerIndex);
}
// w
__global__ void layerDevTrainConvolution2(double* dweights, double* trainInput, double* predictInput,
int rowStep, int colStep,
int rowBasis, int colBasis,
int inputSize, int inputChannelSize, int inputHeight, int inputWidth,
int outputSize, int outputChannelSize, int outputHeight, int outputWidth,
int weightsSize, int batchSize
)
{
double* wBase = dweights + blockIdx.x * weightsSize + 1;
double v = 0;
for (int mi = 0; mi < batchSize; mi++) {
double* trainInputBase = trainInput + mi * outputSize + blockIdx.x * outputChannelSize;
double* predictInputBase = predictInput + mi * inputSize + blockIdx.y * inputChannelSize;
for (int i = 0; i < outputHeight; i++) {
int xRow = i * rowStep + rowBasis + threadIdx.x;
if (xRow < 0 || xRow >= inputHeight) continue;
for (int j = 0; j < outputWidth; j++) {
int xCol = j * colStep + colBasis + threadIdx.y;
if (xCol < 0 || xCol >= inputWidth) continue;
double dprev = trainInputBase[i * outputWidth + j];
double dtemp = predictInputBase[xRow * inputWidth + xCol];
v += dprev * dtemp;
}
}
}
v /= batchSize;
wBase[(blockIdx.y * blockDim.x + threadIdx.x) * blockDim.y + threadIdx.y] = v;
}
int layerTrainConvolution2(layer_schema_t* schema, int batchSize) {
int inputSize = schema->inputDepth * schema->inputHeight * schema->inputWidth;
int outputSize = schema->outputDepth * schema->outputHeight * schema->outputWidth;
int inputChannelSize = schema->inputHeight * schema->inputWidth;
int outputChannelSize = schema->outputHeight * schema->outputWidth;
int weightsSize = schema->inputDepth * schema->operationHeight * schema->operationWidth + 1;
dim3 gridSize(schema->outputDepth, schema->inputDepth);
dim3 blockSize(schema->operationHeight, schema->operationWidth);
hipLaunchKernelGGL(( layerDevTrainConvolution2), dim3(gridSize), dim3(blockSize), 0, 0, schema->dweights, schema->trainInput, schema->predictInput,
schema->operationRowStep, schema->operationColStep,
schema->operationRowBasis, schema->operationColBasis,
inputSize, inputChannelSize, schema->inputHeight, schema->inputWidth,
outputSize, outputChannelSize, schema->outputHeight, schema->outputWidth,
weightsSize, batchSize
);
return layerIfError(schema->layerIndex);
}
// b
__global__ void layerDevTrainConvolution3(double* dweights, double* trainInput,
int outputSize, int outputChannelSize, int outputHeight, int outputWidth,
int weightsSize, int batchSize
)
{
double* bBase = dweights + blockIdx.x * weightsSize;
double v = 0;
for (int mi = 0; mi < batchSize; mi++) {
double* trainInputBase = trainInput + mi * outputSize + blockIdx.x * outputChannelSize;
for (int i = 0; i < outputHeight; i++) {
for (int j = 0; j < outputWidth; j++) {
v += trainInputBase[i * outputWidth + j];
}
}
}
v /= batchSize;
*bBase = v;
}
int layerTrainConvolution3(layer_schema_t* schema, int batchSize) {
int outputSize = schema->outputDepth * schema->outputHeight * schema->outputWidth;
int outputChannelSize = schema->outputHeight * schema->outputWidth;
int weightsSize = schema->inputDepth * schema->operationHeight * schema->operationWidth + 1;
dim3 gridSize(schema->outputDepth);
dim3 blockSize(1);
hipLaunchKernelGGL(( layerDevTrainConvolution3), dim3(gridSize), dim3(blockSize), 0, 0,
schema->dweights, schema->trainInput,
outputSize, outputChannelSize, schema->outputHeight, schema->outputWidth,
weightsSize, batchSize
);
return layerIfError(schema->layerIndex);
}
int layerTrainConvolution(layer_schema_t* schema, int batchSize) {
int ret = 0;
ret = ret || layerTrainConvolution1(schema, batchSize);
ret = ret || layerTrainConvolution2(schema, batchSize);
ret = ret || layerTrainConvolution3(schema, batchSize);
return ret;
}
| a2f1c9c49bdccb8ced5aa0a52597bc19d2d5d2b1.cu | /**
* layer_core_convolution.cu
*/
#include "layer_run.h"
#include "cuda.h"
__global__ void layerDevPredictConvolution(double* output, double* input, double* weights,
int kernelHeight, int kernelWidth, int inputChannels,
int rowStep, int colStep,
int rowBasis, int colBasis,
int inputSize, int inputChannelSize, int inputHeight, int inputWidth,
int outputSize, int outputChannelSize, int outputHeight, int outputWidth,
int weightsSize
)
{
double* outputBase = output + blockIdx.x * outputSize + blockIdx.y * outputChannelSize;
double* inputBase = input + blockIdx.x * inputSize;
double* weightsBase = weights + blockIdx.y * weightsSize;
// double* b = weightsBase;
double* w = weightsBase + 1;
int inputRowBegin = threadIdx.x * rowStep + rowBasis;
int inputColBegin = threadIdx.y * colStep + colBasis;
double z = *weightsBase;
for (int d = 0; d < inputChannels; d++) {
for (int kRow = 0; kRow < kernelHeight; kRow++) {
int xRow = inputRowBegin + kRow;
if (xRow < 0 || xRow >= inputHeight) continue;
for (int kCol = 0; kCol < kernelWidth; kCol++) {
int xCol = inputColBegin + kCol;
if (xCol < 0 || xCol >= inputWidth) continue;
int xIndex = (d * inputHeight + xRow) * inputWidth + xCol;
int wIndex = (d * kernelHeight + kRow) * kernelWidth + kCol;
z += w[wIndex] * inputBase[xIndex];
}
}
}
outputBase[threadIdx.x * blockDim.y + threadIdx.y] = z;
}
int layerPredictConvolution(layer_schema_t* schema, int batchSize) {
int inputSize = schema->inputDepth * schema->inputHeight * schema->inputWidth;
int outputSize = schema->outputDepth * schema->outputHeight * schema->outputWidth;
int inputChannelSize = schema->inputHeight * schema->inputWidth;
int outputChannelSize = schema->outputHeight * schema->outputWidth;
int weightsSize = schema->inputDepth * schema->operationHeight * schema->operationWidth + 1;
dim3 gridSize(batchSize, schema->outputDepth);
dim3 blockSize(schema->outputHeight, schema->outputWidth);
layerDevPredictConvolution<<<gridSize, blockSize>>>(schema->predictOutput, schema->predictInput, schema->weights,
schema->operationHeight, schema->operationWidth, schema->inputDepth,
schema->operationRowStep, schema->operationColStep,
schema->operationRowBasis, schema->operationColBasis,
inputSize, inputChannelSize, schema->inputHeight, schema->inputWidth,
outputSize, outputChannelSize, schema->outputHeight, schema->outputWidth,
weightsSize
);
return layerIfError(schema->layerIndex);
}
__device__ int layerConvolutionBackwardIndexBegin(int index, int basis, int window, int step) {
// 对于被除数为负数时计算结果有误,但是由于不访问对应下标的单元,结果是一致的
return (index - basis - window + step) / step;
}
__device__ int layerConvolutionBackwardIndexEnd(int index, int basis, int window, int step) {
return (index - basis - 0) / step;
}
// 计算前一层的导数
__global__ void layerDevTrainConvolution1(double* trainOutput, double* trainInput, double* weights,
int kernelHeight, int kernelWidth, int outputChannels,
int rowStep, int colStep,
int rowBasis, int colBasis,
int inputSize, int inputChannelSize, int inputHeight, int inputWidth,
int outputSize, int outputChannelSize, int outputHeight, int outputWidth,
int weightsSize
)
{
double* trainInputBaseBase = trainInput + blockIdx.x * outputSize;
int rowBegin = layerConvolutionBackwardIndexBegin(threadIdx.x, rowBasis, kernelHeight, rowStep);
int rowEnd = layerConvolutionBackwardIndexEnd(threadIdx.x, rowBasis, kernelHeight, rowStep);
int colBegin = layerConvolutionBackwardIndexBegin(threadIdx.y, colBasis, kernelWidth, colStep);
int colEnd = layerConvolutionBackwardIndexEnd(threadIdx.y, colBasis, kernelWidth, colStep);
if (rowBegin < 0) rowBegin = 0;
if (rowEnd > inputHeight) rowBegin = inputHeight;
if (colBegin < 0) colBegin = 0;
if (colEnd > inputHeight) colBegin = inputHeight;
double v = 0;
for (int k = 0; k < outputChannels; k++) {
double* trainInputBase = trainInputBaseBase + k * outputChannelSize;
double* w = weights + k * weightsSize + 1 + blockIdx.y * kernelHeight * kernelWidth;
for (int i = rowBegin; i <= rowEnd; i++) {
for (int j = colBegin; j <= colEnd; j++) {
int rowOffset = threadIdx.x - (i * rowStep + rowBasis);
int colOffset = threadIdx.y - (j * colStep + colBasis);
double dprev = trainInputBase[i * outputWidth + j];
double dtemp = w[rowOffset * kernelWidth + colOffset];
v += dprev * dtemp;
}
}
}
trainOutput[blockIdx.x * inputSize + blockIdx.y * inputChannelSize + threadIdx.x * inputWidth + threadIdx.y] = v;
}
int layerTrainConvolution1(layer_schema_t* schema, int batchSize) {
int inputSize = schema->inputDepth * schema->inputHeight * schema->inputWidth;
int outputSize = schema->outputDepth * schema->outputHeight * schema->outputWidth;
int inputChannelSize = schema->inputHeight * schema->inputWidth;
int outputChannelSize = schema->outputHeight * schema->outputWidth;
int weightsSize = schema->inputDepth * schema->operationHeight * schema->operationWidth + 1;
dim3 gridSize(batchSize, schema->inputDepth);
dim3 blockSize(schema->inputHeight, schema->inputWidth);
layerDevTrainConvolution1<<<gridSize, blockSize>>>(schema->trainOutput, schema->trainInput, schema->weights,
schema->operationHeight, schema->operationWidth, schema->outputDepth,
schema->operationRowStep, schema->operationColStep,
schema->operationRowBasis, schema->operationColBasis,
inputSize, inputChannelSize, schema->inputHeight, schema->inputWidth,
outputSize, outputChannelSize, schema->outputHeight, schema->outputWidth,
weightsSize
);
return layerIfError(schema->layerIndex);
}
// 计算w变化量
__global__ void layerDevTrainConvolution2(double* dweights, double* trainInput, double* predictInput,
int rowStep, int colStep,
int rowBasis, int colBasis,
int inputSize, int inputChannelSize, int inputHeight, int inputWidth,
int outputSize, int outputChannelSize, int outputHeight, int outputWidth,
int weightsSize, int batchSize
)
{
double* wBase = dweights + blockIdx.x * weightsSize + 1;
double v = 0;
for (int mi = 0; mi < batchSize; mi++) {
double* trainInputBase = trainInput + mi * outputSize + blockIdx.x * outputChannelSize;
double* predictInputBase = predictInput + mi * inputSize + blockIdx.y * inputChannelSize;
for (int i = 0; i < outputHeight; i++) {
int xRow = i * rowStep + rowBasis + threadIdx.x;
if (xRow < 0 || xRow >= inputHeight) continue;
for (int j = 0; j < outputWidth; j++) {
int xCol = j * colStep + colBasis + threadIdx.y;
if (xCol < 0 || xCol >= inputWidth) continue;
double dprev = trainInputBase[i * outputWidth + j];
double dtemp = predictInputBase[xRow * inputWidth + xCol];
v += dprev * dtemp;
}
}
}
v /= batchSize;
wBase[(blockIdx.y * blockDim.x + threadIdx.x) * blockDim.y + threadIdx.y] = v;
}
int layerTrainConvolution2(layer_schema_t* schema, int batchSize) {
int inputSize = schema->inputDepth * schema->inputHeight * schema->inputWidth;
int outputSize = schema->outputDepth * schema->outputHeight * schema->outputWidth;
int inputChannelSize = schema->inputHeight * schema->inputWidth;
int outputChannelSize = schema->outputHeight * schema->outputWidth;
int weightsSize = schema->inputDepth * schema->operationHeight * schema->operationWidth + 1;
dim3 gridSize(schema->outputDepth, schema->inputDepth);
dim3 blockSize(schema->operationHeight, schema->operationWidth);
layerDevTrainConvolution2<<<gridSize, blockSize>>>(schema->dweights, schema->trainInput, schema->predictInput,
schema->operationRowStep, schema->operationColStep,
schema->operationRowBasis, schema->operationColBasis,
inputSize, inputChannelSize, schema->inputHeight, schema->inputWidth,
outputSize, outputChannelSize, schema->outputHeight, schema->outputWidth,
weightsSize, batchSize
);
return layerIfError(schema->layerIndex);
}
// 计算b变化量
__global__ void layerDevTrainConvolution3(double* dweights, double* trainInput,
int outputSize, int outputChannelSize, int outputHeight, int outputWidth,
int weightsSize, int batchSize
)
{
double* bBase = dweights + blockIdx.x * weightsSize;
double v = 0;
for (int mi = 0; mi < batchSize; mi++) {
double* trainInputBase = trainInput + mi * outputSize + blockIdx.x * outputChannelSize;
for (int i = 0; i < outputHeight; i++) {
for (int j = 0; j < outputWidth; j++) {
v += trainInputBase[i * outputWidth + j];
}
}
}
v /= batchSize;
*bBase = v;
}
int layerTrainConvolution3(layer_schema_t* schema, int batchSize) {
int outputSize = schema->outputDepth * schema->outputHeight * schema->outputWidth;
int outputChannelSize = schema->outputHeight * schema->outputWidth;
int weightsSize = schema->inputDepth * schema->operationHeight * schema->operationWidth + 1;
dim3 gridSize(schema->outputDepth);
dim3 blockSize(1);
layerDevTrainConvolution3<<<gridSize, blockSize>>>(
schema->dweights, schema->trainInput,
outputSize, outputChannelSize, schema->outputHeight, schema->outputWidth,
weightsSize, batchSize
);
return layerIfError(schema->layerIndex);
}
int layerTrainConvolution(layer_schema_t* schema, int batchSize) {
int ret = 0;
ret = ret || layerTrainConvolution1(schema, batchSize);
ret = ret || layerTrainConvolution2(schema, batchSize);
ret = ret || layerTrainConvolution3(schema, batchSize);
return ret;
}
|
b201e01d8f5c0cef8e607451774c61176270b952.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <time.h>
#include <stdlib.h>
#include "Functions.h"
__device__ int SymbolArray1[32];
__constant__ int SymbolArray2[32];
__global__ void SymbolKernel() {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid < 32)
SymbolArray1[tid] += SymbolArray2[tid];
}
void SymbolTest() {
cuda_error( hipSetDevice(0) );
int harray1[32];
int harray2[32];
int result[32];
for(int i = 0; i < 32; i++) {
harray1[i] = rand() % 10000;
harray2[i] = rand() % 10000;
}
cuda_error( hipMemcpyToSymbol(SymbolArray1, harray1, 32*sizeof(int)) );
cuda_error( hipMemcpyToSymbol(SymbolArray2, harray2, 32*sizeof(int)) );
hipLaunchKernelGGL(( SymbolKernel), dim3(1), dim3(32), 0, 0, );
cuda_error( hipMemcpyFromSymbol(result, SymbolArray1, 32*sizeof(int)) );
for(int i = 0; i < 32; i++) {
if(result[i] != harray1[i] + harray2[i]) {
printf("error at: %d\n", i);
return;
}
}
printf("PASSED\n");
}
| b201e01d8f5c0cef8e607451774c61176270b952.cu |
#include <cuda_runtime_api.h>
#include <time.h>
#include <stdlib.h>
#include "Functions.h"
__device__ int SymbolArray1[32];
__constant__ int SymbolArray2[32];
__global__ void SymbolKernel() {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid < 32)
SymbolArray1[tid] += SymbolArray2[tid];
}
void SymbolTest() {
cuda_error( cudaSetDevice(0) );
int harray1[32];
int harray2[32];
int result[32];
for(int i = 0; i < 32; i++) {
harray1[i] = rand() % 10000;
harray2[i] = rand() % 10000;
}
cuda_error( cudaMemcpyToSymbol(SymbolArray1, harray1, 32*sizeof(int)) );
cuda_error( cudaMemcpyToSymbol(SymbolArray2, harray2, 32*sizeof(int)) );
SymbolKernel<<<1, 32>>>();
cuda_error( cudaMemcpyFromSymbol(result, SymbolArray1, 32*sizeof(int)) );
for(int i = 0; i < 32; i++) {
if(result[i] != harray1[i] + harray2[i]) {
printf("error at: %d\n", i);
return;
}
}
printf("PASSED\n");
}
|
local_variable.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void kernel() {
double a = 2.71828; //register variables, automatic
double c[100]; //local variable, automatic
__shared__ double b; //shared variable
int tx = threadIdx.x; //register variable
if (tx == 0) {
b = 3.1415926f;
}
//__syncthreads(); // run with/without this line
printf("id = %d, a=%7.5f, b=%9.7f\n", tx, a, b);
}
int main() {
hipLaunchKernelGGL(( kernel), dim3(1),dim3(8), 0, 0, );
hipDeviceReset();
return 0;
}
| local_variable.cu | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void kernel() {
double a = 2.71828; //register variables, automatic
double c[100]; //local variable, automatic
__shared__ double b; //shared variable
int tx = threadIdx.x; //register variable
if (tx == 0) {
b = 3.1415926f;
}
//__syncthreads(); // run with/without this line
printf("id = %d, a=%7.5f, b=%9.7f\n", tx, a, b);
}
int main() {
kernel<<<1,8>>>();
cudaDeviceReset();
return 0;
}
|
fc986fd25f3b82f3febd1b841c53d509aa8c44d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void calcLeakyReluForwardGPU(float *in, float *out, int elements)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float v = in[id];
if ( v < 0 ){
v = 0.01;
}
out[id] = v;
}
/* original
for( unsigned i = 0; i < data_size; ++i ){
float v = in.data[i];
if ( v < 0 ){
v = 0.01;
}
out.data[i] = v;
}
*/
} | fc986fd25f3b82f3febd1b841c53d509aa8c44d5.cu | #include "includes.h"
__global__ void calcLeakyReluForwardGPU(float *in, float *out, int elements)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float v = in[id];
if ( v < 0 ){
v = 0.01;
}
out[id] = v;
}
/* original
for( unsigned i = 0; i < data_size; ++i ){
float v = in.data[i];
if ( v < 0 ){
v = 0.01;
}
out.data[i] = v;
}
*/
} |
135845e575a5aeaf08143b629c0ab85be457be7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/quantized_layer.hpp"
#include "caffe/quantized_layer.cuh"
namespace caffe {
template<typename Ftype, typename Btype>
void QuantizedLayer<Ftype, Btype>::Quantize_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
if (this->layer_param_.has_quantization_param()) {
//LOG(INFO) << "Quantizing layer: " << this->layer_param_.name();
const vector<shared_ptr<Blob > >& blobs = this->blobs();
const QuantizationParameter& param = this->layer_param_.quantization_param();
if (param.precision() != QuantizationParameter_Precision_FLOAT) {
// Trim layer input
for (int i = 0; i < std::min<int>(param.qparam_in_size(),bottom.size()); ++i) {
if(param.qparam_in(i).quantize()) {
this->QuantizeLayerInputs_gpu(bottom[i]->mutable_gpu_data<Ftype>(), i, bottom[i]->count());
}
}
// Trim weights - do it only at the start of quantization
if(param.qparam_w().quantize() && blobs.size() > 0 && param.quantized_infer_count() == 0) {
this->QuantizeWeights_gpu(blobs[0]->mutable_gpu_data<Ftype>(), blobs[0]->count(), true);
//if (blobs.size() > 1) { //(this->bias_term_) {
// this->QuantizeWeights_gpu(blobs[1]->mutable_gpu_data<Ftype>(), blobs[1]->count(), false);
//}
}
// Trim layer output
if(param.qparam_out().quantize()) {
for (int i = 0; i < top.size(); ++i) {
this->QuantizeLayerOutputs_gpu(top[i]->mutable_gpu_data<Ftype>(), top[i]->count());
}
}
}
}
}
template<typename Ftype, typename Btype>
void QuantizedLayer<Ftype, Btype>::QuantizeWeights_gpu(Ftype* data, const int count, bool clip) {
const QuantizationParameter& param = this->layer_param_.quantization_param();
const QuantizationParameter::QParams& qparam_w = param.qparam_w();
switch (param.precision()) {
case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT:
Trim2FixedPoint_gpu(data, count, param.power2_range(), qparam_w.bitwidth(),
param.rounding_scheme(), qparam_w.fracbits(), qparam_w.scale(),
qparam_w.offset(), qparam_w.unsigned_quant(), clip);
break;
case QuantizationParameter_Precision_FLOAT:
break;
default:
LOG(FATAL) << "Unknown trimming mode: " << param.precision() << " for layer:" << this->layer_param_.name();
break;
}
}
template<typename Ftype, typename Btype>
void QuantizedLayer<Ftype, Btype>::QuantizeLayerInputs_gpu(
Ftype* data, const int blob_id, const int count) {
const QuantizationParameter& param = this->layer_param_.quantization_param();
const QuantizationParameter::QParams& qparam_in = param.qparam_in(blob_id);
switch (param.precision()) {
case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT:
Trim2FixedPoint_gpu(data, count, param.power2_range(), qparam_in.bitwidth(),
param.rounding_scheme(), qparam_in.fracbits(), qparam_in.scale(),
qparam_in.offset(), qparam_in.unsigned_quant(), true);
break;
case QuantizationParameter_Precision_FLOAT:
break;
default:
LOG(FATAL) << "Unknown trimming mode: " << param.precision() << " for layer:" << this->layer_param_.name();
break;
}
}
template<typename Ftype, typename Btype>
void QuantizedLayer<Ftype, Btype>::QuantizeLayerOutputs_gpu(Ftype* data,
const int count) {
const QuantizationParameter& param = this->layer_param_.quantization_param();
const QuantizationParameter::QParams& qparam_out = param.qparam_out();
switch (param.precision()) {
case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT:
Trim2FixedPoint_gpu(data, count, param.power2_range(), qparam_out.bitwidth(),
param.rounding_scheme(), qparam_out.fracbits(), qparam_out.scale(),
qparam_out.offset(), qparam_out.unsigned_quant(), true);
break;
case QuantizationParameter_Precision_FLOAT:
break;
default:
LOG(FATAL) << "Unknown trimming mode: " << param.precision() << " for layer:" << this->layer_param_.name();
break;
}
}
template <typename Dtype>
__global__ void Trim2FixedPoint_kernel(Dtype* data, const int cnt,
const int bitwidth, const int rounding, float scale, float inv_scale, float offset, float min_data, float max_data, bool clip) {
CUDA_KERNEL_LOOP(index, cnt) {
data[index] = (data[index] * scale) + offset;
// Round data
switch (rounding) {
case QuantizationParameter_Rounding_NEAREST:
data[index] = rint(data[index]);
break;
case QuantizationParameter_Rounding_STOCHASTIC:
data[index] = __float2int_rd(data[index] + RandUniform_device(index));
break;
default:
break;
}
// Saturate data
if(clip) {
data[index] = (data[index]>(Dtype)max_data? (Dtype)max_data:
(data[index]<(Dtype)min_data?(Dtype)min_data:data[index]));
}
data[index] = (data[index] - offset) * inv_scale;
}
}
template<typename Ftype, typename Btype>
void QuantizedLayer<Ftype, Btype>::Trim2FixedPoint_gpu(Ftype* data, const int cnt, bool power2_range,
const int bitwidth, const int rounding, int fracbits, float scale, float offset, bool unsigned_quant, bool clip) {
float inv_scale = 1.0f/scale;
int qrange = unsigned_quant? bitwidth : (bitwidth - 1);
float min_data = unsigned_quant? 0 : -(powf(2, qrange));
float max_data = +(powf(2, qrange) - 1);
hipLaunchKernelGGL(( Trim2FixedPoint_kernel), dim3(CAFFE_GET_BLOCKS(cnt)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
data, cnt, bitwidth, rounding, scale, inv_scale, offset, min_data, max_data, clip);
}
template void QuantizedLayer<double, double>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<double, float>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<double, float16>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<float, double>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<float, float>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<float, float16>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<float16, double>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<float16, float>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<float16, float16>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
} // namespace caffe
| 135845e575a5aeaf08143b629c0ab85be457be7f.cu | #include "caffe/quantized_layer.hpp"
#include "caffe/quantized_layer.cuh"
namespace caffe {
template<typename Ftype, typename Btype>
void QuantizedLayer<Ftype, Btype>::Quantize_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
if (this->layer_param_.has_quantization_param()) {
//LOG(INFO) << "Quantizing layer: " << this->layer_param_.name();
const vector<shared_ptr<Blob > >& blobs = this->blobs();
const QuantizationParameter& param = this->layer_param_.quantization_param();
if (param.precision() != QuantizationParameter_Precision_FLOAT) {
// Trim layer input
for (int i = 0; i < std::min<int>(param.qparam_in_size(),bottom.size()); ++i) {
if(param.qparam_in(i).quantize()) {
this->QuantizeLayerInputs_gpu(bottom[i]->mutable_gpu_data<Ftype>(), i, bottom[i]->count());
}
}
// Trim weights - do it only at the start of quantization
if(param.qparam_w().quantize() && blobs.size() > 0 && param.quantized_infer_count() == 0) {
this->QuantizeWeights_gpu(blobs[0]->mutable_gpu_data<Ftype>(), blobs[0]->count(), true);
//if (blobs.size() > 1) { //(this->bias_term_) {
// this->QuantizeWeights_gpu(blobs[1]->mutable_gpu_data<Ftype>(), blobs[1]->count(), false);
//}
}
// Trim layer output
if(param.qparam_out().quantize()) {
for (int i = 0; i < top.size(); ++i) {
this->QuantizeLayerOutputs_gpu(top[i]->mutable_gpu_data<Ftype>(), top[i]->count());
}
}
}
}
}
template<typename Ftype, typename Btype>
void QuantizedLayer<Ftype, Btype>::QuantizeWeights_gpu(Ftype* data, const int count, bool clip) {
const QuantizationParameter& param = this->layer_param_.quantization_param();
const QuantizationParameter::QParams& qparam_w = param.qparam_w();
switch (param.precision()) {
case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT:
Trim2FixedPoint_gpu(data, count, param.power2_range(), qparam_w.bitwidth(),
param.rounding_scheme(), qparam_w.fracbits(), qparam_w.scale(),
qparam_w.offset(), qparam_w.unsigned_quant(), clip);
break;
case QuantizationParameter_Precision_FLOAT:
break;
default:
LOG(FATAL) << "Unknown trimming mode: " << param.precision() << " for layer:" << this->layer_param_.name();
break;
}
}
template<typename Ftype, typename Btype>
void QuantizedLayer<Ftype, Btype>::QuantizeLayerInputs_gpu(
Ftype* data, const int blob_id, const int count) {
const QuantizationParameter& param = this->layer_param_.quantization_param();
const QuantizationParameter::QParams& qparam_in = param.qparam_in(blob_id);
switch (param.precision()) {
case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT:
Trim2FixedPoint_gpu(data, count, param.power2_range(), qparam_in.bitwidth(),
param.rounding_scheme(), qparam_in.fracbits(), qparam_in.scale(),
qparam_in.offset(), qparam_in.unsigned_quant(), true);
break;
case QuantizationParameter_Precision_FLOAT:
break;
default:
LOG(FATAL) << "Unknown trimming mode: " << param.precision() << " for layer:" << this->layer_param_.name();
break;
}
}
template<typename Ftype, typename Btype>
void QuantizedLayer<Ftype, Btype>::QuantizeLayerOutputs_gpu(Ftype* data,
const int count) {
const QuantizationParameter& param = this->layer_param_.quantization_param();
const QuantizationParameter::QParams& qparam_out = param.qparam_out();
switch (param.precision()) {
case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT:
Trim2FixedPoint_gpu(data, count, param.power2_range(), qparam_out.bitwidth(),
param.rounding_scheme(), qparam_out.fracbits(), qparam_out.scale(),
qparam_out.offset(), qparam_out.unsigned_quant(), true);
break;
case QuantizationParameter_Precision_FLOAT:
break;
default:
LOG(FATAL) << "Unknown trimming mode: " << param.precision() << " for layer:" << this->layer_param_.name();
break;
}
}
template <typename Dtype>
__global__ void Trim2FixedPoint_kernel(Dtype* data, const int cnt,
const int bitwidth, const int rounding, float scale, float inv_scale, float offset, float min_data, float max_data, bool clip) {
CUDA_KERNEL_LOOP(index, cnt) {
data[index] = (data[index] * scale) + offset;
// Round data
switch (rounding) {
case QuantizationParameter_Rounding_NEAREST:
data[index] = rint(data[index]);
break;
case QuantizationParameter_Rounding_STOCHASTIC:
data[index] = __float2int_rd(data[index] + RandUniform_device(index));
break;
default:
break;
}
// Saturate data
if(clip) {
data[index] = (data[index]>(Dtype)max_data? (Dtype)max_data:
(data[index]<(Dtype)min_data?(Dtype)min_data:data[index]));
}
data[index] = (data[index] - offset) * inv_scale;
}
}
template<typename Ftype, typename Btype>
void QuantizedLayer<Ftype, Btype>::Trim2FixedPoint_gpu(Ftype* data, const int cnt, bool power2_range,
const int bitwidth, const int rounding, int fracbits, float scale, float offset, bool unsigned_quant, bool clip) {
float inv_scale = 1.0f/scale;
int qrange = unsigned_quant? bitwidth : (bitwidth - 1);
float min_data = unsigned_quant? 0 : -(powf(2, qrange));
float max_data = +(powf(2, qrange) - 1);
Trim2FixedPoint_kernel<<<CAFFE_GET_BLOCKS(cnt), CAFFE_CUDA_NUM_THREADS>>>(
data, cnt, bitwidth, rounding, scale, inv_scale, offset, min_data, max_data, clip);
}
template void QuantizedLayer<double, double>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<double, float>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<double, float16>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<float, double>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<float, float>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<float, float16>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<float16, double>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<float16, float>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
template void QuantizedLayer<float16, float16>::Quantize_gpu(const vector<Blob*>& bottom,const vector<Blob*>& top);
} // namespace caffe
|
426aaa199151eaf1f6df4af9b3fcb5ae274a2c4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<time.h>
#define SIZE 50
using namespace std;
__global__ void mul(int (*mat1)[SIZE][SIZE] , int (*mat2)[SIZE][SIZE] , long (*result)[SIZE][SIZE]){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if(i < SIZE && j < SIZE){
(*result)[i][j] = 0;
for(int k = 0 ; k < SIZE ; k++)
(*result)[i][j] += (*mat1)[i][k]*(*mat2)[k][j];
}
}
int main(){
int mat1[SIZE][SIZE];
int mat2[SIZE][SIZE];
long result[SIZE][SIZE];
// pointer to gpu location
int (*d_in_mat1)[SIZE][SIZE], (*d_in_mat2)[SIZE][SIZE];
long (*d_out_result)[SIZE][SIZE];
// intialize
for(int i = 0 ; i < SIZE ; i++){
for(int j = 0 ; j < SIZE ; j++){
mat1[i][j] = i+1;
mat2[i][j] = i+1;
result[i][j] = 0;
}
}
// Allocate memory to gpu
hipMalloc((void**)&d_in_mat1,SIZE*SIZE*sizeof(int));
hipMalloc((void**)&d_in_mat2,SIZE*SIZE*sizeof(int));
hipMalloc((void**)&d_out_result,SIZE*SIZE*sizeof(long));
// Copy the contents to gpu
hipMemcpy(d_in_mat1,mat1,SIZE*SIZE*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_in_mat2,mat2,SIZE*SIZE*sizeof(int),hipMemcpyHostToDevice);
// invoke the kernel function
dim3 threadsPerBlock(SIZE, SIZE);
dim3 blocksPerGrid(1, 1);
if(SIZE*SIZE > 512){
threadsPerBlock.x = 512;
threadsPerBlock.y = 512;
blocksPerGrid.x = ceil(double(SIZE)/double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(SIZE)/double(threadsPerBlock.y));
}
clock_t startTime = clock();
hipLaunchKernelGGL(( mul), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_in_mat1,d_in_mat2,d_out_result);
clock_t endTime = clock();
printf("\n\nTime for GPU: %f",(float)(endTime-startTime)/CLOCKS_PER_SEC);
// cpy the result back
hipMemcpy(result,d_out_result,SIZE*SIZE*sizeof(long),hipMemcpyDeviceToHost);
printf("\nres GPU: %ld", result[0][0]);
// sequential code
startTime = clock();
for(int i = 0 ; i < SIZE ; i++){
for(int j = 0 ; j < SIZE ; j++){
result[i][j] = 0;
for(int k = 0 ; k < SIZE ; k++)
result[i][j] += mat1[i][k]*mat2[k][j];
}
}
printf("\nres seq: %ld", result[0][0]);
endTime = clock();
printf("\n\nTime for sequential: %f",(float)(endTime-startTime)/CLOCKS_PER_SEC);
// print result
// for(int i = 0 ; i < SIZE ; i++){
// cout<<result[i]<<" ";
// }
return 0;
} | 426aaa199151eaf1f6df4af9b3fcb5ae274a2c4c.cu | #include<iostream>
#include<time.h>
#define SIZE 50
using namespace std;
__global__ void mul(int (*mat1)[SIZE][SIZE] , int (*mat2)[SIZE][SIZE] , long (*result)[SIZE][SIZE]){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if(i < SIZE && j < SIZE){
(*result)[i][j] = 0;
for(int k = 0 ; k < SIZE ; k++)
(*result)[i][j] += (*mat1)[i][k]*(*mat2)[k][j];
}
}
int main(){
int mat1[SIZE][SIZE];
int mat2[SIZE][SIZE];
long result[SIZE][SIZE];
// pointer to gpu location
int (*d_in_mat1)[SIZE][SIZE], (*d_in_mat2)[SIZE][SIZE];
long (*d_out_result)[SIZE][SIZE];
// intialize
for(int i = 0 ; i < SIZE ; i++){
for(int j = 0 ; j < SIZE ; j++){
mat1[i][j] = i+1;
mat2[i][j] = i+1;
result[i][j] = 0;
}
}
// Allocate memory to gpu
cudaMalloc((void**)&d_in_mat1,SIZE*SIZE*sizeof(int));
cudaMalloc((void**)&d_in_mat2,SIZE*SIZE*sizeof(int));
cudaMalloc((void**)&d_out_result,SIZE*SIZE*sizeof(long));
// Copy the contents to gpu
cudaMemcpy(d_in_mat1,mat1,SIZE*SIZE*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_in_mat2,mat2,SIZE*SIZE*sizeof(int),cudaMemcpyHostToDevice);
// invoke the kernel function
dim3 threadsPerBlock(SIZE, SIZE);
dim3 blocksPerGrid(1, 1);
if(SIZE*SIZE > 512){
threadsPerBlock.x = 512;
threadsPerBlock.y = 512;
blocksPerGrid.x = ceil(double(SIZE)/double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(SIZE)/double(threadsPerBlock.y));
}
clock_t startTime = clock();
mul<<<blocksPerGrid,threadsPerBlock>>>(d_in_mat1,d_in_mat2,d_out_result);
clock_t endTime = clock();
printf("\n\nTime for GPU: %f",(float)(endTime-startTime)/CLOCKS_PER_SEC);
// cpy the result back
cudaMemcpy(result,d_out_result,SIZE*SIZE*sizeof(long),cudaMemcpyDeviceToHost);
printf("\nres GPU: %ld", result[0][0]);
// sequential code
startTime = clock();
for(int i = 0 ; i < SIZE ; i++){
for(int j = 0 ; j < SIZE ; j++){
result[i][j] = 0;
for(int k = 0 ; k < SIZE ; k++)
result[i][j] += mat1[i][k]*mat2[k][j];
}
}
printf("\nres seq: %ld", result[0][0]);
endTime = clock();
printf("\n\nTime for sequential: %f",(float)(endTime-startTime)/CLOCKS_PER_SEC);
// print result
// for(int i = 0 ; i < SIZE ; i++){
// cout<<result[i]<<" ";
// }
return 0;
} |
a5b0e6b06676a9600863f4e6157a10f4db55a04b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "HugeCTR/include/embeddings/hybrid_embedding/data.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/frequent_embedding.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/infrequent_embedding.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/model.hpp"
#include "HugeCTR/include/utils.cuh"
#include "hybrid_embedding_cpu.hpp"
#include "test_common.cuh"
/**************** Infrequent embedding: forward sent message ****************/
template <typename dtype, typename emtype>
class ForwardSentMessageTest : public HybridEmbeddingUnitTest<dtype, emtype> {
public:
ForwardSentMessageTest(const HybridEmbeddingConfig<dtype> config, size_t batch_size,
size_t seed = 1234ll)
: HybridEmbeddingUnitTest<dtype, emtype>(config, batch_size, seed) {}
void run() {
uint32_t local_batch_size = ceildiv<uint32_t>(this->batch_size, this->num_instances);
uint32_t instances_per_node = this->num_instances / this->config.num_nodes;
/* Compute expected results on host */
HybridEmbeddingCpu<dtype, emtype> cpu_embedding(this->config, this->batch_size,
this->category_location,
this->category_frequent_index, this->samples);
cpu_embedding.calculate_infrequent_model_indices();
cpu_embedding.generate_embedding_vectors();
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
cpu_embedding.forward_a2a_messages_hier();
} else {
cpu_embedding.forward_a2a_messages();
}
/* Tensors and vectors for the generated messages */
std::shared_ptr<GeneralBuffer2<CudaAllocator>> buff = GeneralBuffer2<CudaAllocator>::create();
std::vector<Tensor2<emtype>> sent_messages(this->num_instances);
std::vector<Tensor2<emtype*>> message_buffer_pointers(this->num_instances);
std::vector<std::vector<emtype>> h_sent_messages(this->num_instances);
for (size_t i = 0; i < this->num_instances; i++) {
buff->reserve({this->num_instances * local_batch_size * this->config.num_tables,
this->config.embedding_vec_size},
&sent_messages[i]);
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
buff->reserve({instances_per_node, 1}, &message_buffer_pointers[i]);
}
}
buff->allocate();
this->build_infrequent();
std::vector<std::vector<emtype*>> h_message_buffer_pointers(this->config.num_nodes);
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
/* Construct the arrays of pointers for each node */
for (size_t i = 0; i < this->config.num_nodes; i++) {
h_message_buffer_pointers[i].resize(instances_per_node);
}
for (size_t i = 0; i < this->num_instances; i++) {
h_message_buffer_pointers[i / instances_per_node][i % instances_per_node] =
sent_messages[i].get_ptr();
}
/* Copy the arrays to device */
for (size_t i = 0; i < this->num_instances; i++) {
CK_CUDA_THROW_(hipMemcpyAsync(message_buffer_pointers[i].get_ptr(),
h_message_buffer_pointers[i / instances_per_node].data(),
instances_per_node * sizeof(emtype*), hipMemcpyHostToDevice,
this->stream));
}
/* Fill buffers with zeroes */
for (size_t i = 0; i < this->num_instances; i++) {
CK_CUDA_THROW_(
hipMemsetAsync(sent_messages[i].get_ptr(), 0,
this->num_instances * local_batch_size * this->config.num_tables *
this->config.embedding_vec_size * sizeof(emtype),
this->stream));
}
}
/* Infrequent forward_model */
for (size_t i = 0; i < this->num_instances; i++) {
upload_tensor(cpu_embedding.infrequent_embedding_vectors[i],
this->infrequent_embeddings[i].infrequent_embedding_vectors_, this->stream);
this->infrequent_embeddings[i].calculate_model_indices(this->stream);
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
this->infrequent_embeddings[i].fused_intra_forward_model(
message_buffer_pointers[i].get_ptr(), this->stream);
} else {
this->infrequent_embeddings[i].forward_model(sent_messages[i].get_ptr(), this->stream);
}
}
for (size_t i = 0; i < this->num_instances; i++) {
download_tensor(h_sent_messages[i], sent_messages[i], this->stream);
}
/* Compare */
for (size_t i = 0; i < this->num_instances; i++) {
uint32_t message_size = this->config.comm_type == CommunicationType::IB_NVLink_Hier
? (this->num_instances * local_batch_size *
this->config.num_tables * this->config.embedding_vec_size)
: (this->config.embedding_vec_size *
cpu_embedding.model_indices_offsets[i][this->num_instances]);
ASSERT_TRUE(compare_array(message_size, h_sent_messages[i].data(),
cpu_embedding.forward_sent_messages[i].data(), 1e-2));
}
}
};
/**************** Infrequent embedding: backward sent message ****************/
template <typename dtype, typename emtype>
class BackwardSentMessageTest : public HybridEmbeddingUnitTest<dtype, emtype> {
public:
BackwardSentMessageTest(const HybridEmbeddingConfig<dtype> config, size_t batch_size,
size_t seed = 1234ll)
: HybridEmbeddingUnitTest<dtype, emtype>(config, batch_size, seed) {}
void run() {
uint32_t local_batch_size = ceildiv<uint32_t>(this->batch_size, this->num_instances);
uint32_t instances_per_node = this->num_instances / this->config.num_nodes;
/* Compute expected results on host */
HybridEmbeddingCpu<dtype, emtype> cpu_embedding(this->config, this->batch_size,
this->category_location,
this->category_frequent_index, this->samples);
cpu_embedding.calculate_infrequent_model_indices();
cpu_embedding.calculate_infrequent_network_indices();
cpu_embedding.generate_gradients();
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
cpu_embedding.backward_a2a_messages_hier();
} else {
cpu_embedding.backward_a2a_messages();
}
/* Tensors and vectors for the gradients and generated messages */
std::shared_ptr<GeneralBuffer2<CudaAllocator>> buff = GeneralBuffer2<CudaAllocator>::create();
std::vector<Tensor2<emtype>> sent_messages(this->num_instances);
std::vector<Tensor2<emtype*>> message_buffer_pointers(this->num_instances);
std::vector<std::vector<emtype>> h_sent_messages(this->num_instances);
for (size_t i = 0; i < this->num_instances; i++) {
buff->reserve({this->num_instances * local_batch_size * this->config.num_tables,
this->config.embedding_vec_size},
&sent_messages[i]);
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
buff->reserve({instances_per_node, 1}, &message_buffer_pointers[i]);
}
}
std::vector<Tensor2<emtype>> gradients(this->num_instances);
for (size_t i = 0; i < this->num_instances; i++) {
buff->reserve({local_batch_size * this->config.num_tables, this->config.embedding_vec_size},
&gradients[i]);
}
buff->allocate();
this->build_infrequent();
std::vector<std::vector<emtype*>> h_message_buffer_pointers(this->config.num_nodes);
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
/* Construct the arrays of pointers for each node */
for (size_t i = 0; i < this->config.num_nodes; i++) {
h_message_buffer_pointers[i].resize(instances_per_node);
}
for (size_t i = 0; i < this->num_instances; i++) {
h_message_buffer_pointers[i / instances_per_node][i % instances_per_node] =
sent_messages[i].get_ptr();
}
/* Copy the arrays to device */
for (size_t i = 0; i < this->num_instances; i++) {
CK_CUDA_THROW_(hipMemcpyAsync(message_buffer_pointers[i].get_ptr(),
h_message_buffer_pointers[i / instances_per_node].data(),
instances_per_node * sizeof(emtype*), hipMemcpyHostToDevice,
this->stream));
}
/* Fill buffers with zeroes */
for (size_t i = 0; i < this->num_instances; i++) {
CK_CUDA_THROW_(
hipMemsetAsync(sent_messages[i].get_ptr(), 0,
this->num_instances * local_batch_size * this->config.num_tables *
this->config.embedding_vec_size * sizeof(emtype),
this->stream));
}
}
/* Infrequent update_network */
for (size_t i = 0; i < this->num_instances; i++) {
upload_tensor(cpu_embedding.gradients[i], gradients[i], this->stream);
this->infrequent_embeddings[i].calculate_network_indices(this->stream);
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
this->infrequent_embeddings[i].fused_intra_update_network(
gradients[i].get_ptr(), message_buffer_pointers[i].get_ptr(), this->stream);
} else {
this->infrequent_embeddings[i].update_network(gradients[i].get_ptr(),
sent_messages[i].get_ptr(), this->stream);
}
}
for (size_t i = 0; i < this->num_instances; i++) {
download_tensor(h_sent_messages[i], sent_messages[i], this->stream);
}
/* Compare */
for (size_t i = 0; i < this->num_instances; i++) {
uint32_t message_size = this->config.comm_type == CommunicationType::IB_NVLink_Hier
? (this->num_instances * local_batch_size *
this->config.num_tables * this->config.embedding_vec_size)
: (this->config.embedding_vec_size *
cpu_embedding.network_indices_offsets[i][this->num_instances]);
ASSERT_TRUE(compare_array(message_size, h_sent_messages[i].data(),
cpu_embedding.backward_sent_messages[i].data(), 1e-2));
}
}
};
/**************************** Test instantiations ****************************/
static const HybridEmbeddingConfig<uint32_t> config_uint32 = {
4, 32, 10, 128, 1000, 128, 0.5f, CommunicationType::IB_NVLink};
static const HybridEmbeddingConfig<long long> config_int64 = {
4, 32, 10, 128, 1000, 128, 0.5f, CommunicationType::IB_NVLink};
// Edge cases: no frequent, all frequent
static const HybridEmbeddingConfig<uint32_t> config_no_freq = {
4, 32, 10, 128, 1000, 0, 0.5f, CommunicationType::IB_NVLink};
static const HybridEmbeddingConfig<uint32_t> config_all_freq = {
4, 32, 10, 128, 1000, 1000, 0.5f, CommunicationType::IB_NVLink};
// Hierarchical A2A
static const HybridEmbeddingConfig<uint32_t> config_uint32_hier = {
4, 32, 10, 128, 1000, 128, 0.5f, CommunicationType::IB_NVLink_Hier};
static const HybridEmbeddingConfig<long long> config_int64_hier = {
4, 32, 10, 128, 1000, 128, 0.5f, CommunicationType::IB_NVLink_Hier};
static const HybridEmbeddingConfig<uint32_t> config_no_freq_hier = {
4, 32, 10, 128, 1000, 0, 0.5f, CommunicationType::IB_NVLink_Hier};
static const HybridEmbeddingConfig<uint32_t> config_all_freq_hier = {
4, 32, 10, 128, 1000, 1000, 0.5f, CommunicationType::IB_NVLink_Hier};
/* hybrid_embedding_forward_sent_message_test */
TEST(hybrid_embedding_forward_sent_message_test, uint32_half_64) {
ForwardSentMessageTest<uint32_t, __half>(config_uint32, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_test, int64_half_64) {
ForwardSentMessageTest<long long, __half>(config_int64, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_test, uint32_half_2048) {
ForwardSentMessageTest<uint32_t, __half>(config_uint32, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_test, int64_half_2048) {
ForwardSentMessageTest<long long, __half>(config_int64, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_test, uint32_float_64) {
ForwardSentMessageTest<uint32_t, float>(config_uint32, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_test, int64_float_64) {
ForwardSentMessageTest<long long, float>(config_int64, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_test, uint32_float_2048) {
ForwardSentMessageTest<uint32_t, float>(config_uint32, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_test, int64_float_2048) {
ForwardSentMessageTest<long long, float>(config_int64, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_test, uint32_float_128_no_freq) {
ForwardSentMessageTest<uint32_t, float>(config_no_freq, 128).run();
}
TEST(hybrid_embedding_forward_sent_message_test, uint32_float_128_all_freq) {
ForwardSentMessageTest<uint32_t, float>(config_all_freq, 128).run();
}
/* hybrid_embedding_forward_sent_message_hier_test */
TEST(hybrid_embedding_forward_sent_message_hier_test, uint32_half_64) {
ForwardSentMessageTest<uint32_t, __half>(config_uint32_hier, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, int64_half_64) {
ForwardSentMessageTest<long long, __half>(config_int64_hier, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, uint32_half_2048) {
ForwardSentMessageTest<uint32_t, __half>(config_uint32_hier, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, int64_half_2048) {
ForwardSentMessageTest<long long, __half>(config_int64_hier, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, uint32_float_64) {
ForwardSentMessageTest<uint32_t, float>(config_uint32_hier, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, int64_float_64) {
ForwardSentMessageTest<long long, float>(config_int64_hier, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, uint32_float_2048) {
ForwardSentMessageTest<uint32_t, float>(config_uint32_hier, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, int64_float_2048) {
ForwardSentMessageTest<long long, float>(config_int64_hier, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, uint32_float_128_no_freq) {
ForwardSentMessageTest<uint32_t, float>(config_no_freq_hier, 128).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, uint32_float_128_all_freq) {
ForwardSentMessageTest<uint32_t, float>(config_all_freq_hier, 128).run();
}
/* hybrid_embedding_backward_sent_message_test */
TEST(hybrid_embedding_backward_sent_message_test, uint32_half_64) {
BackwardSentMessageTest<uint32_t, __half>(config_uint32, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_test, int64_half_64) {
BackwardSentMessageTest<long long, __half>(config_int64, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_test, uint32_half_2048) {
BackwardSentMessageTest<uint32_t, __half>(config_uint32, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_test, int64_half_2048) {
BackwardSentMessageTest<long long, __half>(config_int64, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_test, uint32_float_64) {
BackwardSentMessageTest<uint32_t, float>(config_uint32, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_test, int64_float_64) {
BackwardSentMessageTest<long long, float>(config_int64, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_test, uint32_float_2048) {
BackwardSentMessageTest<uint32_t, float>(config_uint32, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_test, int64_float_2048) {
BackwardSentMessageTest<long long, float>(config_int64, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_test, uint32_float_128_no_freq) {
BackwardSentMessageTest<uint32_t, float>(config_no_freq, 128).run();
}
TEST(hybrid_embedding_backward_sent_message_test, uint32_float_128_all_freq) {
BackwardSentMessageTest<uint32_t, float>(config_all_freq, 128).run();
}
/* hybrid_embedding_backward_sent_message_hier_test */
TEST(hybrid_embedding_backward_sent_message_hier_test, uint32_half_64) {
BackwardSentMessageTest<uint32_t, __half>(config_uint32_hier, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, int64_half_64) {
BackwardSentMessageTest<long long, __half>(config_int64_hier, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, uint32_half_2048) {
BackwardSentMessageTest<uint32_t, __half>(config_uint32_hier, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, int64_half_2048) {
BackwardSentMessageTest<long long, __half>(config_int64_hier, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, uint32_float_64) {
BackwardSentMessageTest<uint32_t, float>(config_uint32_hier, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, int64_float_64) {
BackwardSentMessageTest<long long, float>(config_int64_hier, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, uint32_float_2048) {
BackwardSentMessageTest<uint32_t, float>(config_uint32_hier, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, int64_float_2048) {
BackwardSentMessageTest<long long, float>(config_int64_hier, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, uint32_float_128_no_freq) {
BackwardSentMessageTest<uint32_t, float>(config_no_freq_hier, 128).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, uint32_float_128_all_freq) {
BackwardSentMessageTest<uint32_t, float>(config_all_freq_hier, 128).run();
}
| a5b0e6b06676a9600863f4e6157a10f4db55a04b.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "HugeCTR/include/embeddings/hybrid_embedding/data.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/frequent_embedding.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/infrequent_embedding.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/model.hpp"
#include "HugeCTR/include/utils.cuh"
#include "hybrid_embedding_cpu.hpp"
#include "test_common.cuh"
/**************** Infrequent embedding: forward sent message ****************/
template <typename dtype, typename emtype>
class ForwardSentMessageTest : public HybridEmbeddingUnitTest<dtype, emtype> {
public:
ForwardSentMessageTest(const HybridEmbeddingConfig<dtype> config, size_t batch_size,
size_t seed = 1234ll)
: HybridEmbeddingUnitTest<dtype, emtype>(config, batch_size, seed) {}
void run() {
uint32_t local_batch_size = ceildiv<uint32_t>(this->batch_size, this->num_instances);
uint32_t instances_per_node = this->num_instances / this->config.num_nodes;
/* Compute expected results on host */
HybridEmbeddingCpu<dtype, emtype> cpu_embedding(this->config, this->batch_size,
this->category_location,
this->category_frequent_index, this->samples);
cpu_embedding.calculate_infrequent_model_indices();
cpu_embedding.generate_embedding_vectors();
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
cpu_embedding.forward_a2a_messages_hier();
} else {
cpu_embedding.forward_a2a_messages();
}
/* Tensors and vectors for the generated messages */
std::shared_ptr<GeneralBuffer2<CudaAllocator>> buff = GeneralBuffer2<CudaAllocator>::create();
std::vector<Tensor2<emtype>> sent_messages(this->num_instances);
std::vector<Tensor2<emtype*>> message_buffer_pointers(this->num_instances);
std::vector<std::vector<emtype>> h_sent_messages(this->num_instances);
for (size_t i = 0; i < this->num_instances; i++) {
buff->reserve({this->num_instances * local_batch_size * this->config.num_tables,
this->config.embedding_vec_size},
&sent_messages[i]);
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
buff->reserve({instances_per_node, 1}, &message_buffer_pointers[i]);
}
}
buff->allocate();
this->build_infrequent();
std::vector<std::vector<emtype*>> h_message_buffer_pointers(this->config.num_nodes);
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
/* Construct the arrays of pointers for each node */
for (size_t i = 0; i < this->config.num_nodes; i++) {
h_message_buffer_pointers[i].resize(instances_per_node);
}
for (size_t i = 0; i < this->num_instances; i++) {
h_message_buffer_pointers[i / instances_per_node][i % instances_per_node] =
sent_messages[i].get_ptr();
}
/* Copy the arrays to device */
for (size_t i = 0; i < this->num_instances; i++) {
CK_CUDA_THROW_(cudaMemcpyAsync(message_buffer_pointers[i].get_ptr(),
h_message_buffer_pointers[i / instances_per_node].data(),
instances_per_node * sizeof(emtype*), cudaMemcpyHostToDevice,
this->stream));
}
/* Fill buffers with zeroes */
for (size_t i = 0; i < this->num_instances; i++) {
CK_CUDA_THROW_(
cudaMemsetAsync(sent_messages[i].get_ptr(), 0,
this->num_instances * local_batch_size * this->config.num_tables *
this->config.embedding_vec_size * sizeof(emtype),
this->stream));
}
}
/* Infrequent forward_model */
for (size_t i = 0; i < this->num_instances; i++) {
upload_tensor(cpu_embedding.infrequent_embedding_vectors[i],
this->infrequent_embeddings[i].infrequent_embedding_vectors_, this->stream);
this->infrequent_embeddings[i].calculate_model_indices(this->stream);
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
this->infrequent_embeddings[i].fused_intra_forward_model(
message_buffer_pointers[i].get_ptr(), this->stream);
} else {
this->infrequent_embeddings[i].forward_model(sent_messages[i].get_ptr(), this->stream);
}
}
for (size_t i = 0; i < this->num_instances; i++) {
download_tensor(h_sent_messages[i], sent_messages[i], this->stream);
}
/* Compare */
for (size_t i = 0; i < this->num_instances; i++) {
uint32_t message_size = this->config.comm_type == CommunicationType::IB_NVLink_Hier
? (this->num_instances * local_batch_size *
this->config.num_tables * this->config.embedding_vec_size)
: (this->config.embedding_vec_size *
cpu_embedding.model_indices_offsets[i][this->num_instances]);
ASSERT_TRUE(compare_array(message_size, h_sent_messages[i].data(),
cpu_embedding.forward_sent_messages[i].data(), 1e-2));
}
}
};
/**************** Infrequent embedding: backward sent message ****************/
template <typename dtype, typename emtype>
class BackwardSentMessageTest : public HybridEmbeddingUnitTest<dtype, emtype> {
public:
BackwardSentMessageTest(const HybridEmbeddingConfig<dtype> config, size_t batch_size,
size_t seed = 1234ll)
: HybridEmbeddingUnitTest<dtype, emtype>(config, batch_size, seed) {}
void run() {
uint32_t local_batch_size = ceildiv<uint32_t>(this->batch_size, this->num_instances);
uint32_t instances_per_node = this->num_instances / this->config.num_nodes;
/* Compute expected results on host */
HybridEmbeddingCpu<dtype, emtype> cpu_embedding(this->config, this->batch_size,
this->category_location,
this->category_frequent_index, this->samples);
cpu_embedding.calculate_infrequent_model_indices();
cpu_embedding.calculate_infrequent_network_indices();
cpu_embedding.generate_gradients();
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
cpu_embedding.backward_a2a_messages_hier();
} else {
cpu_embedding.backward_a2a_messages();
}
/* Tensors and vectors for the gradients and generated messages */
std::shared_ptr<GeneralBuffer2<CudaAllocator>> buff = GeneralBuffer2<CudaAllocator>::create();
std::vector<Tensor2<emtype>> sent_messages(this->num_instances);
std::vector<Tensor2<emtype*>> message_buffer_pointers(this->num_instances);
std::vector<std::vector<emtype>> h_sent_messages(this->num_instances);
for (size_t i = 0; i < this->num_instances; i++) {
buff->reserve({this->num_instances * local_batch_size * this->config.num_tables,
this->config.embedding_vec_size},
&sent_messages[i]);
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
buff->reserve({instances_per_node, 1}, &message_buffer_pointers[i]);
}
}
std::vector<Tensor2<emtype>> gradients(this->num_instances);
for (size_t i = 0; i < this->num_instances; i++) {
buff->reserve({local_batch_size * this->config.num_tables, this->config.embedding_vec_size},
&gradients[i]);
}
buff->allocate();
this->build_infrequent();
std::vector<std::vector<emtype*>> h_message_buffer_pointers(this->config.num_nodes);
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
/* Construct the arrays of pointers for each node */
for (size_t i = 0; i < this->config.num_nodes; i++) {
h_message_buffer_pointers[i].resize(instances_per_node);
}
for (size_t i = 0; i < this->num_instances; i++) {
h_message_buffer_pointers[i / instances_per_node][i % instances_per_node] =
sent_messages[i].get_ptr();
}
/* Copy the arrays to device */
for (size_t i = 0; i < this->num_instances; i++) {
CK_CUDA_THROW_(cudaMemcpyAsync(message_buffer_pointers[i].get_ptr(),
h_message_buffer_pointers[i / instances_per_node].data(),
instances_per_node * sizeof(emtype*), cudaMemcpyHostToDevice,
this->stream));
}
/* Fill buffers with zeroes */
for (size_t i = 0; i < this->num_instances; i++) {
CK_CUDA_THROW_(
cudaMemsetAsync(sent_messages[i].get_ptr(), 0,
this->num_instances * local_batch_size * this->config.num_tables *
this->config.embedding_vec_size * sizeof(emtype),
this->stream));
}
}
/* Infrequent update_network */
for (size_t i = 0; i < this->num_instances; i++) {
upload_tensor(cpu_embedding.gradients[i], gradients[i], this->stream);
this->infrequent_embeddings[i].calculate_network_indices(this->stream);
if (this->config.comm_type == CommunicationType::IB_NVLink_Hier) {
this->infrequent_embeddings[i].fused_intra_update_network(
gradients[i].get_ptr(), message_buffer_pointers[i].get_ptr(), this->stream);
} else {
this->infrequent_embeddings[i].update_network(gradients[i].get_ptr(),
sent_messages[i].get_ptr(), this->stream);
}
}
for (size_t i = 0; i < this->num_instances; i++) {
download_tensor(h_sent_messages[i], sent_messages[i], this->stream);
}
/* Compare */
for (size_t i = 0; i < this->num_instances; i++) {
uint32_t message_size = this->config.comm_type == CommunicationType::IB_NVLink_Hier
? (this->num_instances * local_batch_size *
this->config.num_tables * this->config.embedding_vec_size)
: (this->config.embedding_vec_size *
cpu_embedding.network_indices_offsets[i][this->num_instances]);
ASSERT_TRUE(compare_array(message_size, h_sent_messages[i].data(),
cpu_embedding.backward_sent_messages[i].data(), 1e-2));
}
}
};
/**************************** Test instantiations ****************************/
static const HybridEmbeddingConfig<uint32_t> config_uint32 = {
4, 32, 10, 128, 1000, 128, 0.5f, CommunicationType::IB_NVLink};
static const HybridEmbeddingConfig<long long> config_int64 = {
4, 32, 10, 128, 1000, 128, 0.5f, CommunicationType::IB_NVLink};
// Edge cases: no frequent, all frequent
static const HybridEmbeddingConfig<uint32_t> config_no_freq = {
4, 32, 10, 128, 1000, 0, 0.5f, CommunicationType::IB_NVLink};
static const HybridEmbeddingConfig<uint32_t> config_all_freq = {
4, 32, 10, 128, 1000, 1000, 0.5f, CommunicationType::IB_NVLink};
// Hierarchical A2A
static const HybridEmbeddingConfig<uint32_t> config_uint32_hier = {
4, 32, 10, 128, 1000, 128, 0.5f, CommunicationType::IB_NVLink_Hier};
static const HybridEmbeddingConfig<long long> config_int64_hier = {
4, 32, 10, 128, 1000, 128, 0.5f, CommunicationType::IB_NVLink_Hier};
static const HybridEmbeddingConfig<uint32_t> config_no_freq_hier = {
4, 32, 10, 128, 1000, 0, 0.5f, CommunicationType::IB_NVLink_Hier};
static const HybridEmbeddingConfig<uint32_t> config_all_freq_hier = {
4, 32, 10, 128, 1000, 1000, 0.5f, CommunicationType::IB_NVLink_Hier};
/* hybrid_embedding_forward_sent_message_test */
TEST(hybrid_embedding_forward_sent_message_test, uint32_half_64) {
ForwardSentMessageTest<uint32_t, __half>(config_uint32, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_test, int64_half_64) {
ForwardSentMessageTest<long long, __half>(config_int64, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_test, uint32_half_2048) {
ForwardSentMessageTest<uint32_t, __half>(config_uint32, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_test, int64_half_2048) {
ForwardSentMessageTest<long long, __half>(config_int64, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_test, uint32_float_64) {
ForwardSentMessageTest<uint32_t, float>(config_uint32, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_test, int64_float_64) {
ForwardSentMessageTest<long long, float>(config_int64, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_test, uint32_float_2048) {
ForwardSentMessageTest<uint32_t, float>(config_uint32, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_test, int64_float_2048) {
ForwardSentMessageTest<long long, float>(config_int64, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_test, uint32_float_128_no_freq) {
ForwardSentMessageTest<uint32_t, float>(config_no_freq, 128).run();
}
TEST(hybrid_embedding_forward_sent_message_test, uint32_float_128_all_freq) {
ForwardSentMessageTest<uint32_t, float>(config_all_freq, 128).run();
}
/* hybrid_embedding_forward_sent_message_hier_test */
TEST(hybrid_embedding_forward_sent_message_hier_test, uint32_half_64) {
ForwardSentMessageTest<uint32_t, __half>(config_uint32_hier, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, int64_half_64) {
ForwardSentMessageTest<long long, __half>(config_int64_hier, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, uint32_half_2048) {
ForwardSentMessageTest<uint32_t, __half>(config_uint32_hier, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, int64_half_2048) {
ForwardSentMessageTest<long long, __half>(config_int64_hier, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, uint32_float_64) {
ForwardSentMessageTest<uint32_t, float>(config_uint32_hier, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, int64_float_64) {
ForwardSentMessageTest<long long, float>(config_int64_hier, 64).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, uint32_float_2048) {
ForwardSentMessageTest<uint32_t, float>(config_uint32_hier, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, int64_float_2048) {
ForwardSentMessageTest<long long, float>(config_int64_hier, 2048).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, uint32_float_128_no_freq) {
ForwardSentMessageTest<uint32_t, float>(config_no_freq_hier, 128).run();
}
TEST(hybrid_embedding_forward_sent_message_hier_test, uint32_float_128_all_freq) {
ForwardSentMessageTest<uint32_t, float>(config_all_freq_hier, 128).run();
}
/* hybrid_embedding_backward_sent_message_test */
TEST(hybrid_embedding_backward_sent_message_test, uint32_half_64) {
BackwardSentMessageTest<uint32_t, __half>(config_uint32, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_test, int64_half_64) {
BackwardSentMessageTest<long long, __half>(config_int64, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_test, uint32_half_2048) {
BackwardSentMessageTest<uint32_t, __half>(config_uint32, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_test, int64_half_2048) {
BackwardSentMessageTest<long long, __half>(config_int64, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_test, uint32_float_64) {
BackwardSentMessageTest<uint32_t, float>(config_uint32, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_test, int64_float_64) {
BackwardSentMessageTest<long long, float>(config_int64, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_test, uint32_float_2048) {
BackwardSentMessageTest<uint32_t, float>(config_uint32, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_test, int64_float_2048) {
BackwardSentMessageTest<long long, float>(config_int64, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_test, uint32_float_128_no_freq) {
BackwardSentMessageTest<uint32_t, float>(config_no_freq, 128).run();
}
TEST(hybrid_embedding_backward_sent_message_test, uint32_float_128_all_freq) {
BackwardSentMessageTest<uint32_t, float>(config_all_freq, 128).run();
}
/* hybrid_embedding_backward_sent_message_hier_test */
TEST(hybrid_embedding_backward_sent_message_hier_test, uint32_half_64) {
BackwardSentMessageTest<uint32_t, __half>(config_uint32_hier, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, int64_half_64) {
BackwardSentMessageTest<long long, __half>(config_int64_hier, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, uint32_half_2048) {
BackwardSentMessageTest<uint32_t, __half>(config_uint32_hier, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, int64_half_2048) {
BackwardSentMessageTest<long long, __half>(config_int64_hier, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, uint32_float_64) {
BackwardSentMessageTest<uint32_t, float>(config_uint32_hier, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, int64_float_64) {
BackwardSentMessageTest<long long, float>(config_int64_hier, 64).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, uint32_float_2048) {
BackwardSentMessageTest<uint32_t, float>(config_uint32_hier, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, int64_float_2048) {
BackwardSentMessageTest<long long, float>(config_int64_hier, 2048).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, uint32_float_128_no_freq) {
BackwardSentMessageTest<uint32_t, float>(config_no_freq_hier, 128).run();
}
TEST(hybrid_embedding_backward_sent_message_hier_test, uint32_float_128_all_freq) {
BackwardSentMessageTest<uint32_t, float>(config_all_freq_hier, 128).run();
}
|
3c1498715a6a8bf2e88fa3ab7bbe4b2fa72c9cb6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reduce.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *g_idata = NULL;
hipMalloc(&g_idata, XSIZE*YSIZE);
double *g_odata = NULL;
hipMalloc(&g_odata, XSIZE*YSIZE);
unsigned int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3c1498715a6a8bf2e88fa3ab7bbe4b2fa72c9cb6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reduce.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *g_idata = NULL;
cudaMalloc(&g_idata, XSIZE*YSIZE);
double *g_odata = NULL;
cudaMalloc(&g_odata, XSIZE*YSIZE);
unsigned int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reduce<<<gridBlock,threadBlock>>>(g_idata,g_odata,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reduce<<<gridBlock,threadBlock>>>(g_idata,g_odata,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reduce<<<gridBlock,threadBlock>>>(g_idata,g_odata,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
84975c0fb7649b80377fb11e4ba25ff8bd7a723b.hip | // !!! This is a file automatically generated by hipify!!!
//jacobi7.cu
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include "getopt.h"
#include "jacobi7_cuda_3d.h"
#include "jacobi7.h"
#include <assert.h>
//#ifndef TIME_TILE_SIZE
//#warning TIME_TILE_SIZE is not set, defaulting to 1
//#define TIME_TILE_SIZE 2
//#endif
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
//#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
//#endif
return result;
}
// Timer function
double rtclock(){
struct timeval tp;
gettimeofday(&tp, NULL);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main(int argc, char* *argv){
if(argc != 8) {
printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TZ> <TIME STEPS>\n", argv[0]);
return 1;
}
// program parameters trans
const int nx = atoi(argv[1]);
const int ny = atoi(argv[2]);
const int nz = atoi(argv[3]);
const int tx = atoi(argv[4]);
const int ty = atoi(argv[5]);
const int tz = atoi(argv[6]);
const int timesteps = atoi(argv[7]);
const int xyz = nx * ny * nz;
const int xyz_bytes = xyz * sizeof(float);
float *h_A;
float *h_B;
float *d_A;
float *d_B;
float *h_A1;
float *h_B1;
// Allocate host buffers
checkCuda(hipHostMalloc((void**)&h_A, xyz_bytes)); // host pinned
checkCuda(hipHostMalloc((void**)&h_B, xyz_bytes));
// for comparison btw CPU and GPU version
checkCuda(hipHostMalloc((void**)&h_A1, xyz_bytes));
checkCuda(hipHostMalloc((void**)&h_B1, xyz_bytes));
// grid data iniatialization
// randomly generaed test data
srand(time(NULL));
int i = 0;
for(; i < xyz; i++) {
h_A[i] = 1 + (float)rand() / (float)RAND_MAX;
h_B[i] = h_A[i];
h_A1[i] = h_A[i];
h_B1[i] = h_A[i];
}
int testIndex = 3 + 3*nx+ 3*nx*ny;
printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]);
printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
// Always use device 0
hipSetDevice(0);
printf("Start computing...\n");
/* set the ratio of cache/shared memory
hipFuncCachePreferNone: Default function cache configuration, no preference
hipFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache
hipFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory
*/
//checkCuda(hipDeviceSetCacheConfig(hipFuncCachePreferShared));
// Allocate device buffers
checkCuda(hipMalloc((void**)&d_A, xyz_bytes));
checkCuda(hipMalloc((void**)&d_B, xyz_bytes));
// Copy to device
checkCuda(hipMemcpy(d_A, h_A, xyz_bytes, hipMemcpyHostToDevice));
checkCuda(hipMemcpy(d_B, d_A, xyz_bytes, hipMemcpyDeviceToDevice));
// Setup the kernel
float* input = d_A;
float* output = d_B;
dim3 grid((nx+tx-1)/tx, (ny+ty-1)/ty, (nz+tz-1)/tz);
dim3 block(tx, ty, tz);
printf("grid:(%d, %d, %d)\n", grid.x, grid.y, grid.z);
printf("block:(%d, %d, %d)\n", tx, ty, tz);
float *tmp;
float *tmp1;
float fac = 6.0/(h_A[0] * h_A[0]);
const int sharedMemSize = (tx + 2) * (ty + 2) * (tz + 2) * sizeof(float);
printf("sharedMemSize:%d\n",sharedMemSize);
float ms;
hipEvent_t startEvent, stopEvent;
checkCuda(hipEventCreate(&startEvent));
checkCuda(hipEventCreate(&stopEvent));
checkCuda(hipEventRecord(startEvent,0) );
// Run the GPU kernel
for(int t = 0; t < timesteps; t += 2) {
hipLaunchKernelGGL(( jacobi3d_7p_shmem_3d_temporal), dim3(grid), dim3(block), sharedMemSize, 0, input, output, nx, ny, nz, fac);
// swap input and output
tmp = input;
input = output;
output = tmp;
}
checkCuda( hipEventRecord(stopEvent, 0));
checkCuda( hipEventSynchronize(stopEvent));
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent));
printf("Time of shared memory version (pure GPU) (ms): %f\n", ms);
double gflop = (xyz * 1e-9) * 7.0 * timesteps;
double gflop_per_sec = gflop * 1e3 / ms;
printf("(GPU) %lf GFlop/s\n", gflop_per_sec);
double mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms;
printf("(GPU) %lf M updates/s\n", mupdate_per_sec);
// Copy the result to main memory
checkCuda( hipMemcpy(h_A, input, xyz_bytes, hipMemcpyDeviceToHost));
float *gpuResult = h_A;
// Run the CPU version
//double startTime = rtclock();
for(int t = 0; t < timesteps; t += 1) {
jacobi7(nx, ny, nz, h_A1, h_B1, fac);
tmp1 = h_A1;
h_A1 = h_B1;
h_B1 = tmp1;
}
//double endTime = rtclock();
//double elapsedTimeC = endTime - startTime;
float *cpuResult = h_A1;
/*printf("Elapsed Time:%lf\n", elapsedTimeC);
double flops = xyz * 7.0 * timesteps;
double gflops = flops / elapsedTimeC / 1e9;
printf("(CPU) %lf GFlop/s\n", gflops);*/
// compare the results btw CPU and GPU version
double errorNorm, refNorm, diff;
errorNorm = 0.0;
refNorm = 0.0;
int errorCount=0;
i = 0;
for (; i < xyz; ++i){
diff = cpuResult[i] - gpuResult[i];
errorNorm += diff * diff;
refNorm += cpuResult[i] * cpuResult[i];
if (abs(diff)> 1e-4)
{
//printf("GPU[%d]=%f\n", i, gpuResult[i]);
//printf("CPU[%d]=%f\n", i, cpuResult[i]);
errorCount++;
}
}
printf("errorCount:%d\n", errorCount);
errorNorm = sqrt(errorNorm);
refNorm = sqrt(refNorm);
printf("Error Norm:%lf\n", errorNorm);
printf("Ref Norm:%lf\n", refNorm);
if(abs(refNorm) < 1e-7) {
printf("Correctness, FAILED\n");
}
else if((errorNorm / refNorm) > 1e-2) {
printf("Correctness, FAILED\n");
}
else {
printf("Correctness, PASSED\n");
}
testIndex = 3 + 3*nx+ 3*nx*ny;
printf("GPU[%d]=%f\n", testIndex, gpuResult[testIndex]);
printf("CPU[%d]=%f\n", testIndex, cpuResult[testIndex]);
printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]);
printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
// cleanup
checkCuda( hipEventDestroy(startEvent));
checkCuda( hipEventDestroy(stopEvent));
hipHostFree(h_A);
hipHostFree(h_B);
hipHostFree(h_A1);
hipHostFree(h_B1);
hipFree(d_A);
hipFree(d_B);
return 0;
} | 84975c0fb7649b80377fb11e4ba25ff8bd7a723b.cu | //jacobi7.cu
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include "getopt.h"
#include "jacobi7_cuda_3d.h"
#include "jacobi7.h"
#include <assert.h>
//#ifndef TIME_TILE_SIZE
//#warning TIME_TILE_SIZE is not set, defaulting to 1
//#define TIME_TILE_SIZE 2
//#endif
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
//#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
//#endif
return result;
}
// Timer function
double rtclock(){
struct timeval tp;
gettimeofday(&tp, NULL);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main(int argc, char* *argv){
if(argc != 8) {
printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TZ> <TIME STEPS>\n", argv[0]);
return 1;
}
// program parameters trans
const int nx = atoi(argv[1]);
const int ny = atoi(argv[2]);
const int nz = atoi(argv[3]);
const int tx = atoi(argv[4]);
const int ty = atoi(argv[5]);
const int tz = atoi(argv[6]);
const int timesteps = atoi(argv[7]);
const int xyz = nx * ny * nz;
const int xyz_bytes = xyz * sizeof(float);
float *h_A;
float *h_B;
float *d_A;
float *d_B;
float *h_A1;
float *h_B1;
// Allocate host buffers
checkCuda(cudaMallocHost((void**)&h_A, xyz_bytes)); // host pinned
checkCuda(cudaMallocHost((void**)&h_B, xyz_bytes));
// for comparison btw CPU and GPU version
checkCuda(cudaMallocHost((void**)&h_A1, xyz_bytes));
checkCuda(cudaMallocHost((void**)&h_B1, xyz_bytes));
// grid data iniatialization
// randomly generaed test data
srand(time(NULL));
int i = 0;
for(; i < xyz; i++) {
h_A[i] = 1 + (float)rand() / (float)RAND_MAX;
h_B[i] = h_A[i];
h_A1[i] = h_A[i];
h_B1[i] = h_A[i];
}
int testIndex = 3 + 3*nx+ 3*nx*ny;
printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]);
printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
// Always use device 0
cudaSetDevice(0);
printf("Start computing...\n");
/* set the ratio of cache/shared memory
cudaFuncCachePreferNone: Default function cache configuration, no preference
cudaFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache
cudaFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory
*/
//checkCuda(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared));
// Allocate device buffers
checkCuda(cudaMalloc((void**)&d_A, xyz_bytes));
checkCuda(cudaMalloc((void**)&d_B, xyz_bytes));
// Copy to device
checkCuda(cudaMemcpy(d_A, h_A, xyz_bytes, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(d_B, d_A, xyz_bytes, cudaMemcpyDeviceToDevice));
// Setup the kernel
float* input = d_A;
float* output = d_B;
dim3 grid((nx+tx-1)/tx, (ny+ty-1)/ty, (nz+tz-1)/tz);
dim3 block(tx, ty, tz);
printf("grid:(%d, %d, %d)\n", grid.x, grid.y, grid.z);
printf("block:(%d, %d, %d)\n", tx, ty, tz);
float *tmp;
float *tmp1;
float fac = 6.0/(h_A[0] * h_A[0]);
const int sharedMemSize = (tx + 2) * (ty + 2) * (tz + 2) * sizeof(float);
printf("sharedMemSize:%d\n",sharedMemSize);
float ms;
cudaEvent_t startEvent, stopEvent;
checkCuda(cudaEventCreate(&startEvent));
checkCuda(cudaEventCreate(&stopEvent));
checkCuda(cudaEventRecord(startEvent,0) );
// Run the GPU kernel
for(int t = 0; t < timesteps; t += 2) {
jacobi3d_7p_shmem_3d_temporal<<<grid, block, sharedMemSize>>>(input, output, nx, ny, nz, fac);
// swap input and output
tmp = input;
input = output;
output = tmp;
}
checkCuda( cudaEventRecord(stopEvent, 0));
checkCuda( cudaEventSynchronize(stopEvent));
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent));
printf("Time of shared memory version (pure GPU) (ms): %f\n", ms);
double gflop = (xyz * 1e-9) * 7.0 * timesteps;
double gflop_per_sec = gflop * 1e3 / ms;
printf("(GPU) %lf GFlop/s\n", gflop_per_sec);
double mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms;
printf("(GPU) %lf M updates/s\n", mupdate_per_sec);
// Copy the result to main memory
checkCuda( cudaMemcpy(h_A, input, xyz_bytes, cudaMemcpyDeviceToHost));
float *gpuResult = h_A;
// Run the CPU version
//double startTime = rtclock();
for(int t = 0; t < timesteps; t += 1) {
jacobi7(nx, ny, nz, h_A1, h_B1, fac);
tmp1 = h_A1;
h_A1 = h_B1;
h_B1 = tmp1;
}
//double endTime = rtclock();
//double elapsedTimeC = endTime - startTime;
float *cpuResult = h_A1;
/*printf("Elapsed Time:%lf\n", elapsedTimeC);
double flops = xyz * 7.0 * timesteps;
double gflops = flops / elapsedTimeC / 1e9;
printf("(CPU) %lf GFlop/s\n", gflops);*/
// compare the results btw CPU and GPU version
double errorNorm, refNorm, diff;
errorNorm = 0.0;
refNorm = 0.0;
int errorCount=0;
i = 0;
for (; i < xyz; ++i){
diff = cpuResult[i] - gpuResult[i];
errorNorm += diff * diff;
refNorm += cpuResult[i] * cpuResult[i];
if (abs(diff)> 1e-4)
{
//printf("GPU[%d]=%f\n", i, gpuResult[i]);
//printf("CPU[%d]=%f\n", i, cpuResult[i]);
errorCount++;
}
}
printf("errorCount:%d\n", errorCount);
errorNorm = sqrt(errorNorm);
refNorm = sqrt(refNorm);
printf("Error Norm:%lf\n", errorNorm);
printf("Ref Norm:%lf\n", refNorm);
if(abs(refNorm) < 1e-7) {
printf("Correctness, FAILED\n");
}
else if((errorNorm / refNorm) > 1e-2) {
printf("Correctness, FAILED\n");
}
else {
printf("Correctness, PASSED\n");
}
testIndex = 3 + 3*nx+ 3*nx*ny;
printf("GPU[%d]=%f\n", testIndex, gpuResult[testIndex]);
printf("CPU[%d]=%f\n", testIndex, cpuResult[testIndex]);
printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]);
printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
// cleanup
checkCuda( cudaEventDestroy(startEvent));
checkCuda( cudaEventDestroy(stopEvent));
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(h_A1);
cudaFreeHost(h_B1);
cudaFree(d_A);
cudaFree(d_B);
return 0;
} |
499750e2d0f1c5271ad72170c61f37493dd5c734.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "accuracy_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "../accuracy_layer.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
__global__ void accuracy_kernel(
float * __restrict output,
const float * __restrict predicted,
const float * __restrict actual,
const float * __restrict scale_mask,
int input_feature_map_count,
int elem_count_per_feature_map,
int output_elem_count_per_entry,
unsigned int top_n,
int entry_count)
{
int start_feature_map_id = threadIdx.x;
int neuron_id = blockIdx.x;
int entry_id = blockIdx.y;
int threadblock_size = blockDim.x;
float mask = 1.0F;
if (scale_mask)
mask = scale_mask[entry_id * elem_count_per_feature_map + neuron_id];
int sum = 0;
int thread_id = threadIdx.x;
if (mask != 0.0F)
{
int start_input_offset = (entry_id * input_feature_map_count + start_feature_map_id) * elem_count_per_feature_map + neuron_id;
int max_val_feature_map_id = -1;
float max_val = -1.0e37F;
int warp_count = threadblock_size >> 5;
float * val_sh = arr_sh;
int * fm_sh = (int *)(arr_sh + warp_count);
int * cnt_sh = (int *)(arr_sh + 2 * warp_count);
int lane_id = thread_id & 31;
int input_offset = start_input_offset;
int feature_map_id = start_feature_map_id;
while (feature_map_id < input_feature_map_count)
{
float new_val = actual[input_offset];
if (new_val > max_val)
{
max_val = new_val;
max_val_feature_map_id = feature_map_id;
}
feature_map_id += threadblock_size;
input_offset += threadblock_size * elem_count_per_feature_map;
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
float new_val = __shfl_down(max_val, tx);
int feature_map_id = __shfl_down(max_val_feature_map_id, tx);
if ((new_val > max_val) || ((new_val == max_val) && (feature_map_id < max_val_feature_map_id)))
{
max_val = new_val;
max_val_feature_map_id = feature_map_id;
}
}
if (warp_count > 1)
{
if (lane_id == 0)
{
val_sh[thread_id >> 5] = max_val;
fm_sh[thread_id >> 5] = max_val_feature_map_id;
}
__syncthreads();
if (thread_id < 32)
{
max_val = -1.0e37F;
max_val_feature_map_id = -1;
if (thread_id < warp_count)
{
max_val = val_sh[thread_id];
max_val_feature_map_id = fm_sh[thread_id];
}
#pragma unroll
for(int tx = 4; tx > 0; tx >>= 1)
{
float new_val = __shfl_down(max_val, tx);
int feature_map_id = __shfl_down(max_val_feature_map_id, tx);
if ((new_val > max_val) || ((new_val == max_val) && (feature_map_id < max_val_feature_map_id)))
{
max_val = new_val;
max_val_feature_map_id = feature_map_id;
}
}
}
if (thread_id == 0)
{
val_sh[0] = predicted[(entry_id * input_feature_map_count + max_val_feature_map_id) * elem_count_per_feature_map + neuron_id];
fm_sh[0] = max_val_feature_map_id;
}
__syncthreads();
max_val = val_sh[0];
max_val_feature_map_id = fm_sh[0];
} // if (warp_count > 1)
else
{
if (thread_id == 0)
max_val = predicted[(entry_id * input_feature_map_count + max_val_feature_map_id) * elem_count_per_feature_map + neuron_id];
max_val_feature_map_id = __shfl(max_val_feature_map_id, 0);
max_val = __shfl(max_val, 0);
}
// max_val and max_val_feature_map_id set for all threads
// Writing to val_sh and fm_sh is not safe here yet
sum = 0;
input_offset = start_input_offset;
feature_map_id = start_feature_map_id;
while (feature_map_id < input_feature_map_count)
{
float val = predicted[input_offset];
if ((val > max_val) || ((val == max_val) && (feature_map_id < max_val_feature_map_id)))
++sum;
feature_map_id += threadblock_size;
input_offset += threadblock_size * elem_count_per_feature_map;
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
sum += __shfl_down(sum, tx);
if (warp_count > 1)
{
if (lane_id == 0)
cnt_sh[thread_id >> 5] = sum;
__syncthreads();
if (thread_id < 32)
{
sum = 0;
if (thread_id < warp_count)
sum = cnt_sh[thread_id];
#pragma unroll
for(int tx = 4; tx > 0; tx >>= 1)
sum += __shfl_down(sum, tx);
}
}
}
if (thread_id == 0)
{
int output_offset = entry_id * output_elem_count_per_entry + neuron_id;
for(int i = 0; i < top_n; ++i)
{
output[output_offset] = ((sum <= i) ? mask : 0.0F);
output_offset += elem_count_per_feature_map;
}
output[output_offset] = mask; // Scale
}
}
accuracy_layer_tester_cuda::accuracy_layer_tester_cuda()
{
}
accuracy_layer_tester_cuda::~accuracy_layer_tester_cuda()
{
}
void accuracy_layer_tester_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count);
const float * scale_mask = 0;
if (input_buffers.size() > 2)
scale_mask = *input_buffers[2];
int smem_size = ((threadblock_size + 32 - 1) / 32) * (sizeof(float) + 2 * sizeof(int));
hipLaunchKernelGGL(( accuracy_kernel), dim3(dim3(input_elem_count_per_feature_map_list[0], entry_count)), dim3(threadblock_size), smem_size, stream_id,
*output_buffer,
*input_buffers[0],
*input_buffers[1],
scale_mask,
input_configuration_specific_list[0].feature_map_count,
input_elem_count_per_feature_map_list[0],
output_elem_count_per_entry,
top_n,
entry_count);
}
void accuracy_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const accuracy_layer> layer_derived = nnforge_dynamic_pointer_cast<const accuracy_layer>(layer_schema);
top_n = layer_derived->top_n;
}
int accuracy_layer_tester_cuda::get_threadblock_size(int input_feature_map_count)
{
int threadblock_size;
if (input_feature_map_count < 256)
{
threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (input_feature_map_count + 256 - 1) / 256;
threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
}
}
| 499750e2d0f1c5271ad72170c61f37493dd5c734.cu | /*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "accuracy_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "../accuracy_layer.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
__global__ void accuracy_kernel(
float * __restrict output,
const float * __restrict predicted,
const float * __restrict actual,
const float * __restrict scale_mask,
int input_feature_map_count,
int elem_count_per_feature_map,
int output_elem_count_per_entry,
unsigned int top_n,
int entry_count)
{
int start_feature_map_id = threadIdx.x;
int neuron_id = blockIdx.x;
int entry_id = blockIdx.y;
int threadblock_size = blockDim.x;
float mask = 1.0F;
if (scale_mask)
mask = scale_mask[entry_id * elem_count_per_feature_map + neuron_id];
int sum = 0;
int thread_id = threadIdx.x;
if (mask != 0.0F)
{
int start_input_offset = (entry_id * input_feature_map_count + start_feature_map_id) * elem_count_per_feature_map + neuron_id;
int max_val_feature_map_id = -1;
float max_val = -1.0e37F;
int warp_count = threadblock_size >> 5;
float * val_sh = arr_sh;
int * fm_sh = (int *)(arr_sh + warp_count);
int * cnt_sh = (int *)(arr_sh + 2 * warp_count);
int lane_id = thread_id & 31;
int input_offset = start_input_offset;
int feature_map_id = start_feature_map_id;
while (feature_map_id < input_feature_map_count)
{
float new_val = actual[input_offset];
if (new_val > max_val)
{
max_val = new_val;
max_val_feature_map_id = feature_map_id;
}
feature_map_id += threadblock_size;
input_offset += threadblock_size * elem_count_per_feature_map;
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
float new_val = __shfl_down(max_val, tx);
int feature_map_id = __shfl_down(max_val_feature_map_id, tx);
if ((new_val > max_val) || ((new_val == max_val) && (feature_map_id < max_val_feature_map_id)))
{
max_val = new_val;
max_val_feature_map_id = feature_map_id;
}
}
if (warp_count > 1)
{
if (lane_id == 0)
{
val_sh[thread_id >> 5] = max_val;
fm_sh[thread_id >> 5] = max_val_feature_map_id;
}
__syncthreads();
if (thread_id < 32)
{
max_val = -1.0e37F;
max_val_feature_map_id = -1;
if (thread_id < warp_count)
{
max_val = val_sh[thread_id];
max_val_feature_map_id = fm_sh[thread_id];
}
#pragma unroll
for(int tx = 4; tx > 0; tx >>= 1)
{
float new_val = __shfl_down(max_val, tx);
int feature_map_id = __shfl_down(max_val_feature_map_id, tx);
if ((new_val > max_val) || ((new_val == max_val) && (feature_map_id < max_val_feature_map_id)))
{
max_val = new_val;
max_val_feature_map_id = feature_map_id;
}
}
}
if (thread_id == 0)
{
val_sh[0] = predicted[(entry_id * input_feature_map_count + max_val_feature_map_id) * elem_count_per_feature_map + neuron_id];
fm_sh[0] = max_val_feature_map_id;
}
__syncthreads();
max_val = val_sh[0];
max_val_feature_map_id = fm_sh[0];
} // if (warp_count > 1)
else
{
if (thread_id == 0)
max_val = predicted[(entry_id * input_feature_map_count + max_val_feature_map_id) * elem_count_per_feature_map + neuron_id];
max_val_feature_map_id = __shfl(max_val_feature_map_id, 0);
max_val = __shfl(max_val, 0);
}
// max_val and max_val_feature_map_id set for all threads
// Writing to val_sh and fm_sh is not safe here yet
sum = 0;
input_offset = start_input_offset;
feature_map_id = start_feature_map_id;
while (feature_map_id < input_feature_map_count)
{
float val = predicted[input_offset];
if ((val > max_val) || ((val == max_val) && (feature_map_id < max_val_feature_map_id)))
++sum;
feature_map_id += threadblock_size;
input_offset += threadblock_size * elem_count_per_feature_map;
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
sum += __shfl_down(sum, tx);
if (warp_count > 1)
{
if (lane_id == 0)
cnt_sh[thread_id >> 5] = sum;
__syncthreads();
if (thread_id < 32)
{
sum = 0;
if (thread_id < warp_count)
sum = cnt_sh[thread_id];
#pragma unroll
for(int tx = 4; tx > 0; tx >>= 1)
sum += __shfl_down(sum, tx);
}
}
}
if (thread_id == 0)
{
int output_offset = entry_id * output_elem_count_per_entry + neuron_id;
for(int i = 0; i < top_n; ++i)
{
output[output_offset] = ((sum <= i) ? mask : 0.0F);
output_offset += elem_count_per_feature_map;
}
output[output_offset] = mask; // Scale
}
}
accuracy_layer_tester_cuda::accuracy_layer_tester_cuda()
{
}
accuracy_layer_tester_cuda::~accuracy_layer_tester_cuda()
{
}
void accuracy_layer_tester_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count);
const float * scale_mask = 0;
if (input_buffers.size() > 2)
scale_mask = *input_buffers[2];
int smem_size = ((threadblock_size + 32 - 1) / 32) * (sizeof(float) + 2 * sizeof(int));
accuracy_kernel<<<dim3(input_elem_count_per_feature_map_list[0], entry_count), threadblock_size, smem_size, stream_id>>>(
*output_buffer,
*input_buffers[0],
*input_buffers[1],
scale_mask,
input_configuration_specific_list[0].feature_map_count,
input_elem_count_per_feature_map_list[0],
output_elem_count_per_entry,
top_n,
entry_count);
}
void accuracy_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const accuracy_layer> layer_derived = nnforge_dynamic_pointer_cast<const accuracy_layer>(layer_schema);
top_n = layer_derived->top_n;
}
int accuracy_layer_tester_cuda::get_threadblock_size(int input_feature_map_count)
{
int threadblock_size;
if (input_feature_map_count < 256)
{
threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (input_feature_map_count + 256 - 1) / 256;
threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
}
}
|
154b617168537e33ca19f3e900f4c750fba53df6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void initU_formula_gpu( const float *coords, float *values, const double *time) {
float x = coords[0];
float y = coords[1];
float t = *time;
float val = 0.0f;
values[1] += val;
}
// CUDA kernel function
__global__ void op_cuda_initU_formula(
const float *__restrict arg0,
float *arg1,
const double *arg2,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
initU_formula_gpu(arg0+n*2,
arg1+n*4,
arg2);
}
}
//host stub function
void op_par_loop_initU_formula(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2){
double*arg2h = (double *)arg2.data;
int nargs = 3;
op_arg args[3];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(6);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[6].name = name;
OP_kernels[6].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: initU_formula");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(double));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OP_consts_h + consts_bytes;
arg2.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((double *)arg2.data)[d] = arg2h[d];
}
consts_bytes += ROUND_UP(1*sizeof(double));
mvConstArraysToDevice(consts_bytes);
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_6
int nthread = OP_BLOCK_SIZE_6;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
hipLaunchKernelGGL(( op_cuda_initU_formula), dim3(nblocks),dim3(nthread), 0, 0,
(float *) arg0.data_d,
(float *) arg1.data_d,
(double *) arg2.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
if (OP_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[6].time += wall_t2 - wall_t1;
OP_kernels[6].transfer += (float)set->size * arg0.size;
OP_kernels[6].transfer += (float)set->size * arg1.size * 2.0f;
}
| 154b617168537e33ca19f3e900f4c750fba53df6.cu | //
// auto-generated by op2.py
//
//user function
__device__ void initU_formula_gpu( const float *coords, float *values, const double *time) {
float x = coords[0];
float y = coords[1];
float t = *time;
float val = 0.0f;
values[1] += val;
}
// CUDA kernel function
__global__ void op_cuda_initU_formula(
const float *__restrict arg0,
float *arg1,
const double *arg2,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
initU_formula_gpu(arg0+n*2,
arg1+n*4,
arg2);
}
}
//host stub function
void op_par_loop_initU_formula(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2){
double*arg2h = (double *)arg2.data;
int nargs = 3;
op_arg args[3];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(6);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[6].name = name;
OP_kernels[6].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: initU_formula");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(double));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OP_consts_h + consts_bytes;
arg2.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((double *)arg2.data)[d] = arg2h[d];
}
consts_bytes += ROUND_UP(1*sizeof(double));
mvConstArraysToDevice(consts_bytes);
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_6
int nthread = OP_BLOCK_SIZE_6;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
op_cuda_initU_formula<<<nblocks,nthread>>>(
(float *) arg0.data_d,
(float *) arg1.data_d,
(double *) arg2.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
if (OP_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[6].time += wall_t2 - wall_t1;
OP_kernels[6].transfer += (float)set->size * arg0.size;
OP_kernels[6].transfer += (float)set->size * arg1.size * 2.0f;
}
|
5ebaf03ff99ac0d5b6e8e4cd2ffdf4fef5e570ca.hip | // !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include "functions/logisticReg.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LogRegLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class LogRegLossTest: public ::testing::TestWithParam<LogRegLossInputs<T> > {
protected:
void SetUp() override {
params = ::testing::TestWithParam<LogRegLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocator.reset(new defaultDeviceAllocator);
allocate(in, len);
allocate(out, 1);
allocate(out_lasso, 1);
allocate(out_ridge, 1);
allocate(out_elasticnet, 1);
allocate(out_grad, n_cols);
allocate(out_lasso_grad, n_cols);
allocate(out_ridge_grad, n_cols);
allocate(out_elasticnet_grad, n_cols);
allocate(out_ref, 1);
allocate(out_lasso_ref, 1);
allocate(out_ridge_ref, 1);
allocate(out_elasticnet_ref, 1);
allocate(out_grad_ref, n_cols);
allocate(out_lasso_grad_ref, n_cols);
allocate(out_ridge_grad_ref, n_cols);
allocate(out_elasticnet_grad_ref, n_cols);
allocate(labels, params.n_rows);
allocate(coef, params.n_cols);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
updateDevice(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
updateDevice(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
updateDevice(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {0.38752545};
updateDevice(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {0.74152};
updateDevice(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {0.4955854};
updateDevice(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {0.618555};
updateDevice(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.58284, 0.207666};
updateDevice(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.0171, -0.39233};
updateDevice(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols, stream);
T h_out_ridge_grad_ref[n_cols] = {-0.16284, -0.080333};
updateDevice(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols, stream);
T h_out_elasticnet_grad_ref[n_cols] = {-0.07284, -0.23633};
updateDevice(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref, n_cols, stream);
T alpha = 0.6;
T l1_ratio = 0.5;
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out, penalty::NONE,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_grad, penalty::NONE,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_lasso, penalty::L1,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_lasso_grad, penalty::L1,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_ridge, penalty::L2,
alpha, l1_ratio, cublas_handle, allocator, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_ridge_grad, penalty::L2,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_elasticnet, penalty::ELASTICNET,
alpha, l1_ratio, cublas_handle, allocator, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_elasticnet_grad, penalty::ELASTICNET,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(coef));
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out_lasso));
CUDA_CHECK(hipFree(out_ridge));
CUDA_CHECK(hipFree(out_elasticnet));
CUDA_CHECK(hipFree(out_grad));
CUDA_CHECK(hipFree(out_lasso_grad));
CUDA_CHECK(hipFree(out_ridge_grad));
CUDA_CHECK(hipFree(out_elasticnet_grad));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out_lasso_ref));
CUDA_CHECK(hipFree(out_ridge_ref));
CUDA_CHECK(hipFree(out_elasticnet_ref));
CUDA_CHECK(hipFree(out_grad_ref));
CUDA_CHECK(hipFree(out_lasso_grad_ref));
CUDA_CHECK(hipFree(out_ridge_grad_ref));
CUDA_CHECK(hipFree(out_elasticnet_grad_ref));
}
protected:
LogRegLossInputs<T> params;
T *in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref, *out_elasticnet_grad_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<LogRegLossInputs<float> > inputsf = {
{0.01f, 3, 2, 6}
};
const std::vector<LogRegLossInputs<double> > inputsd = {
{0.01, 3, 2, 6}
};
typedef LogRegLossTest<float> LogRegLossTestF;
TEST_P(LogRegLossTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
}
typedef LogRegLossTest<double> LogRegLossTestD;
TEST_P(LogRegLossTestD, Result){
ASSERT_TRUE(devArrMatch(out_ref, out, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LogRegLossTests, LogRegLossTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(LogRegLossTests, LogRegLossTestD, ::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
| 5ebaf03ff99ac0d5b6e8e4cd2ffdf4fef5e570ca.cu | #include <gtest/gtest.h>
#include "functions/logisticReg.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LogRegLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class LogRegLossTest: public ::testing::TestWithParam<LogRegLossInputs<T> > {
protected:
void SetUp() override {
params = ::testing::TestWithParam<LogRegLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocator.reset(new defaultDeviceAllocator);
allocate(in, len);
allocate(out, 1);
allocate(out_lasso, 1);
allocate(out_ridge, 1);
allocate(out_elasticnet, 1);
allocate(out_grad, n_cols);
allocate(out_lasso_grad, n_cols);
allocate(out_ridge_grad, n_cols);
allocate(out_elasticnet_grad, n_cols);
allocate(out_ref, 1);
allocate(out_lasso_ref, 1);
allocate(out_ridge_ref, 1);
allocate(out_elasticnet_ref, 1);
allocate(out_grad_ref, n_cols);
allocate(out_lasso_grad_ref, n_cols);
allocate(out_ridge_grad_ref, n_cols);
allocate(out_elasticnet_grad_ref, n_cols);
allocate(labels, params.n_rows);
allocate(coef, params.n_cols);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
updateDevice(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
updateDevice(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
updateDevice(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {0.38752545};
updateDevice(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {0.74152};
updateDevice(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {0.4955854};
updateDevice(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {0.618555};
updateDevice(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.58284, 0.207666};
updateDevice(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.0171, -0.39233};
updateDevice(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols, stream);
T h_out_ridge_grad_ref[n_cols] = {-0.16284, -0.080333};
updateDevice(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols, stream);
T h_out_elasticnet_grad_ref[n_cols] = {-0.07284, -0.23633};
updateDevice(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref, n_cols, stream);
T alpha = 0.6;
T l1_ratio = 0.5;
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out, penalty::NONE,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_grad, penalty::NONE,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_lasso, penalty::L1,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_lasso_grad, penalty::L1,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_ridge, penalty::L2,
alpha, l1_ratio, cublas_handle, allocator, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_ridge_grad, penalty::L2,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_elasticnet, penalty::ELASTICNET,
alpha, l1_ratio, cublas_handle, allocator, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_elasticnet_grad, penalty::ELASTICNET,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(coef));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out_lasso));
CUDA_CHECK(cudaFree(out_ridge));
CUDA_CHECK(cudaFree(out_elasticnet));
CUDA_CHECK(cudaFree(out_grad));
CUDA_CHECK(cudaFree(out_lasso_grad));
CUDA_CHECK(cudaFree(out_ridge_grad));
CUDA_CHECK(cudaFree(out_elasticnet_grad));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out_lasso_ref));
CUDA_CHECK(cudaFree(out_ridge_ref));
CUDA_CHECK(cudaFree(out_elasticnet_ref));
CUDA_CHECK(cudaFree(out_grad_ref));
CUDA_CHECK(cudaFree(out_lasso_grad_ref));
CUDA_CHECK(cudaFree(out_ridge_grad_ref));
CUDA_CHECK(cudaFree(out_elasticnet_grad_ref));
}
protected:
LogRegLossInputs<T> params;
T *in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref, *out_elasticnet_grad_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<LogRegLossInputs<float> > inputsf = {
{0.01f, 3, 2, 6}
};
const std::vector<LogRegLossInputs<double> > inputsd = {
{0.01, 3, 2, 6}
};
typedef LogRegLossTest<float> LogRegLossTestF;
TEST_P(LogRegLossTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
}
typedef LogRegLossTest<double> LogRegLossTestD;
TEST_P(LogRegLossTestD, Result){
ASSERT_TRUE(devArrMatch(out_ref, out, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LogRegLossTests, LogRegLossTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(LogRegLossTests, LogRegLossTestD, ::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
|
65852a87249378dea125d43dd8b75e5133157cf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
CUDA code for GPU optimization of camera placement problem
With support of 2-coverage Region of Interest
Author : Vegard Tveit
Date : 17.04.2018
Comment : The user has to specify:
- Number of sensors to be placed
- Number of possible combinations(nchoosek)
- Modify UNISIZE
- Number of datapoints
- Number of possible placement points
- Number of possible pan angles
- "subsets.txt", "annotations.txt" and "combinations.txt"
*/
// Initial Setup
#include <iostream>
#include <string>
#include <fstream>
#include <vector>
#include <new>
#define UNISIZE 9084
#include <cmath>
#include <algorithm>
#include <numeric>
#include <functional>
#include <fstream>
__global__ void mykernel(int* annotations, int* devarr, bool* subs, int* sum, unsigned long len, unsigned long nsubs, unsigned long usize, int roisum)
{
// Kernel function to run on GPU
// Defining variables (stored in each kernel)
unsigned long th_id = blockIdx.x * blockDim.x + threadIdx.x; // The id of the current thread
int barr[9084] = {0}; //Array for storing coverage
int totsum = 0; // Sum of covered points
int count_roi = 0;
int penalty = 0;
int alpha = 4000;
int ct = 0;
if(th_id < len){
for(unsigned long i = 0; i < nsubs; i++)
{
int ind = devarr[th_id*nsubs + i];
for(unsigned long j = 0; j < usize; j++)
{
if(subs[ind*usize + j]){
barr[j] += 1;
}
}
}
for(int i = 0 ; i < usize ; i++){
if(barr[i] >= annotations[i]){
totsum += 1;
if(annotations[i] == 2){
count_roi += 1;
}
}
}
if(count_roi > 0 && count_roi < roisum){
penalty = alpha*(roisum/count_roi);
}
if(count_roi == 0){
penalty = alpha*roisum;
}
sum[th_id] = totsum - penalty;
}else sum[th_id] = 0;
}
void readfromtxt(){
//int num_sensors = 1;
//int ncombs = 210;
int num_sensors = 5;
int ncombs = 1.5e8;
//unsigned long num_sensors = 3;
//unsigned long ncombs = 1521520;
//unsigned long num_sensors = 4;
//unsigned long ncombs = 78738660;
unsigned long ndp = 9084;
unsigned long campos = 83;
unsigned long numpans = 3;
std::cout << "num combs : " << ncombs << std::endl;
// Dynamically allocate arrays on CPU
int* array = (int*)malloc(ncombs*num_sensors*sizeof(int));
bool* subs_array = (bool*)malloc(ndp*campos*numpans*sizeof(bool*));
int* annot_array = (int*)malloc(ndp*sizeof(int));
//Load subsets from txt file and store in 1D array
std::ifstream subsfile("Subsets_4cam.txt");
double b;
unsigned long col_s = 0;
while (subsfile >> b)
{
subs_array[col_s] = (bool) b;
col_s +=1;
}
for(int i = 0; i < 15 ; i++){
std::cout << subs_array[i] << std::endl;
}
std::cout << std::endl << std::endl <<std::endl;
std::cout << col_s << " is col_s" << std::endl;
// Store combinations array in a 1D array
std::ifstream myfile("combtests_1.txt");
double bb;
unsigned long col = 0;
std::cout << "Hei!" << std::endl;
while (myfile >> bb)
{
array[col] = (int) bb;
if(col < 10) std::cout << bb << std::endl;
col += 1;
}
std::cout << col << " is col" << std::endl;
// Store annotations in a 1D array
// The annotation of a point describes whether it is
// a ROI, obstacle or normal data point
std::ifstream annotfile("Annotations.txt");
double an;
unsigned long col2 = 0;
while (annotfile >> an)
{
annot_array[col2] =(int) an;
col2 += 1;
}
// Make annotation array (to be used inside kernel)
int* init_cov = (int*)malloc(ndp*sizeof(int));
//int init_cov[ndp];
int c = 0;
for(int i = 0 ; i < ndp ; i++){
if(annot_array[i] == 2){
c += 1;
init_cov[i] = 2;
}else{
init_cov[i] = 1;
}
}
std::cout << "Num of roi dp: " << c << std::endl;
//GPU variables
unsigned long n_threads_per_block = 1024;
unsigned long n_blocks = (ncombs + n_threads_per_block - 1)/n_threads_per_block;
std::cout << "Number of blocks :" << n_blocks << std::endl;
unsigned long data_n = n_blocks*n_threads_per_block; // Total number of available threads
//Vectorize array for GPU calculations
unsigned long chop_combs;
chop_combs = ncombs;
std::cout << "No. of available threads: " << data_n << std::endl;
std::cout << "Number of used threads : " << chop_combs << std::endl;
size_t i_datasize = chop_combs*sizeof(int);
size_t array_datas = chop_combs*num_sensors*sizeof(int);
size_t bool_subs_size = ndp*numpans*campos*sizeof(bool);
size_t annot_size = ndp*sizeof(int);
std::cout << "i_datasize [bytes] : " << i_datasize << std::endl;
// Allocate CPU Memory
int* sum_host = new int[chop_combs];
std:: cout << "Array size : " << array_datas <<" and subs size " << bool_subs_size << std::endl;
// Allocate GPU Memory
int* annot_dev;
bool* subs_dev;
int* sum_dev;
int* array_dev;
hipMalloc(&subs_dev,bool_subs_size);
hipMalloc(&array_dev, array_datas);
hipMalloc(&sum_dev,i_datasize);
hipMalloc(&annot_dev,annot_size);
// Copy host (CPU) arrays to device (GPU) arrays
hipMemcpy(subs_dev, subs_array, bool_subs_size, hipMemcpyHostToDevice);
hipMemcpy(sum_dev, sum_host, i_datasize, hipMemcpyHostToDevice);
hipMemcpy(array_dev, array, array_datas, hipMemcpyHostToDevice);
hipMemcpy(annot_dev,init_cov,annot_size,hipMemcpyHostToDevice);
// Run "mykernel" function on GPU threads with gpu timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( mykernel) , dim3(n_blocks),dim3(n_threads_per_block) , 0, 0, annot_dev,array_dev,subs_dev,sum_dev,chop_combs,num_sensors,ndp,c);
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("The elapsed time for kernel execution was %.2f ms\n", milliseconds);
// Copy results back to cpu memory
hipMemcpy(sum_host, sum_dev, i_datasize, hipMemcpyDeviceToHost);
// Post process
int max = 0;
unsigned long ind = 0;
for (unsigned long i = 0; i < chop_combs ; i++){
if(sum_host[i] > max){
max = sum_host[i];
ind = i;
}
}
std::cout << "Max val : " << max << std::endl;
printf("Highest coverage value at index %lu. \n",ind);
std::cout << "The index represents camera index: ";
for(int m = 0; m < num_sensors ; m++){
printf("%i ", array[ind*num_sensors + m]);
}
std::cout << std::endl;
//Free allocated memory on CPU and GPU
hipFree(subs_dev);
hipFree(sum_dev);
hipFree(array_dev);
delete[] sum_host;
free(array);
free(subs_array);
}
| 65852a87249378dea125d43dd8b75e5133157cf8.cu | /*
CUDA code for GPU optimization of camera placement problem
With support of 2-coverage Region of Interest
Author : Vegard Tveit
Date : 17.04.2018
Comment : The user has to specify:
- Number of sensors to be placed
- Number of possible combinations(nchoosek)
- Modify UNISIZE
- Number of datapoints
- Number of possible placement points
- Number of possible pan angles
- "subsets.txt", "annotations.txt" and "combinations.txt"
*/
// Initial Setup
#include <iostream>
#include <string>
#include <fstream>
#include <vector>
#include <new>
#define UNISIZE 9084
#include <cmath>
#include <algorithm>
#include <numeric>
#include <functional>
#include <fstream>
__global__ void mykernel(int* annotations, int* devarr, bool* subs, int* sum, unsigned long len, unsigned long nsubs, unsigned long usize, int roisum)
{
// Kernel function to run on GPU
// Defining variables (stored in each kernel)
unsigned long th_id = blockIdx.x * blockDim.x + threadIdx.x; // The id of the current thread
int barr[9084] = {0}; //Array for storing coverage
int totsum = 0; // Sum of covered points
int count_roi = 0;
int penalty = 0;
int alpha = 4000;
int ct = 0;
if(th_id < len){
for(unsigned long i = 0; i < nsubs; i++)
{
int ind = devarr[th_id*nsubs + i];
for(unsigned long j = 0; j < usize; j++)
{
if(subs[ind*usize + j]){
barr[j] += 1;
}
}
}
for(int i = 0 ; i < usize ; i++){
if(barr[i] >= annotations[i]){
totsum += 1;
if(annotations[i] == 2){
count_roi += 1;
}
}
}
if(count_roi > 0 && count_roi < roisum){
penalty = alpha*(roisum/count_roi);
}
if(count_roi == 0){
penalty = alpha*roisum;
}
sum[th_id] = totsum - penalty;
}else sum[th_id] = 0;
}
void readfromtxt(){
//int num_sensors = 1;
//int ncombs = 210;
int num_sensors = 5;
int ncombs = 1.5e8;
//unsigned long num_sensors = 3;
//unsigned long ncombs = 1521520;
//unsigned long num_sensors = 4;
//unsigned long ncombs = 78738660;
unsigned long ndp = 9084;
unsigned long campos = 83;
unsigned long numpans = 3;
std::cout << "num combs : " << ncombs << std::endl;
// Dynamically allocate arrays on CPU
int* array = (int*)malloc(ncombs*num_sensors*sizeof(int));
bool* subs_array = (bool*)malloc(ndp*campos*numpans*sizeof(bool*));
int* annot_array = (int*)malloc(ndp*sizeof(int));
//Load subsets from txt file and store in 1D array
std::ifstream subsfile("Subsets_4cam.txt");
double b;
unsigned long col_s = 0;
while (subsfile >> b)
{
subs_array[col_s] = (bool) b;
col_s +=1;
}
for(int i = 0; i < 15 ; i++){
std::cout << subs_array[i] << std::endl;
}
std::cout << std::endl << std::endl <<std::endl;
std::cout << col_s << " is col_s" << std::endl;
// Store combinations array in a 1D array
std::ifstream myfile("combtests_1.txt");
double bb;
unsigned long col = 0;
std::cout << "Hei!" << std::endl;
while (myfile >> bb)
{
array[col] = (int) bb;
if(col < 10) std::cout << bb << std::endl;
col += 1;
}
std::cout << col << " is col" << std::endl;
// Store annotations in a 1D array
// The annotation of a point describes whether it is
// a ROI, obstacle or normal data point
std::ifstream annotfile("Annotations.txt");
double an;
unsigned long col2 = 0;
while (annotfile >> an)
{
annot_array[col2] =(int) an;
col2 += 1;
}
// Make annotation array (to be used inside kernel)
int* init_cov = (int*)malloc(ndp*sizeof(int));
//int init_cov[ndp];
int c = 0;
for(int i = 0 ; i < ndp ; i++){
if(annot_array[i] == 2){
c += 1;
init_cov[i] = 2;
}else{
init_cov[i] = 1;
}
}
std::cout << "Num of roi dp: " << c << std::endl;
//GPU variables
unsigned long n_threads_per_block = 1024;
unsigned long n_blocks = (ncombs + n_threads_per_block - 1)/n_threads_per_block;
std::cout << "Number of blocks :" << n_blocks << std::endl;
unsigned long data_n = n_blocks*n_threads_per_block; // Total number of available threads
//Vectorize array for GPU calculations
unsigned long chop_combs;
chop_combs = ncombs;
std::cout << "No. of available threads: " << data_n << std::endl;
std::cout << "Number of used threads : " << chop_combs << std::endl;
size_t i_datasize = chop_combs*sizeof(int);
size_t array_datas = chop_combs*num_sensors*sizeof(int);
size_t bool_subs_size = ndp*numpans*campos*sizeof(bool);
size_t annot_size = ndp*sizeof(int);
std::cout << "i_datasize [bytes] : " << i_datasize << std::endl;
// Allocate CPU Memory
int* sum_host = new int[chop_combs];
std:: cout << "Array size : " << array_datas <<" and subs size " << bool_subs_size << std::endl;
// Allocate GPU Memory
int* annot_dev;
bool* subs_dev;
int* sum_dev;
int* array_dev;
cudaMalloc(&subs_dev,bool_subs_size);
cudaMalloc(&array_dev, array_datas);
cudaMalloc(&sum_dev,i_datasize);
cudaMalloc(&annot_dev,annot_size);
// Copy host (CPU) arrays to device (GPU) arrays
cudaMemcpy(subs_dev, subs_array, bool_subs_size, cudaMemcpyHostToDevice);
cudaMemcpy(sum_dev, sum_host, i_datasize, cudaMemcpyHostToDevice);
cudaMemcpy(array_dev, array, array_datas, cudaMemcpyHostToDevice);
cudaMemcpy(annot_dev,init_cov,annot_size,cudaMemcpyHostToDevice);
// Run "mykernel" function on GPU threads with gpu timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
mykernel <<< n_blocks,n_threads_per_block >>> (annot_dev,array_dev,subs_dev,sum_dev,chop_combs,num_sensors,ndp,c);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("The elapsed time for kernel execution was %.2f ms\n", milliseconds);
// Copy results back to cpu memory
cudaMemcpy(sum_host, sum_dev, i_datasize, cudaMemcpyDeviceToHost);
// Post process
int max = 0;
unsigned long ind = 0;
for (unsigned long i = 0; i < chop_combs ; i++){
if(sum_host[i] > max){
max = sum_host[i];
ind = i;
}
}
std::cout << "Max val : " << max << std::endl;
printf("Highest coverage value at index %lu. \n",ind);
std::cout << "The index represents camera index: ";
for(int m = 0; m < num_sensors ; m++){
printf("%i ", array[ind*num_sensors + m]);
}
std::cout << std::endl;
//Free allocated memory on CPU and GPU
cudaFree(subs_dev);
cudaFree(sum_dev);
cudaFree(array_dev);
delete[] sum_host;
free(array);
free(subs_array);
}
|
567242717015801b7855e4889f0b7b787c005c4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zlobpcg_shift.cu normal z -> s, Fri Sep 11 18:29:42 2015
*/
#include "common_magma.h"
__global__ void
magma_slobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
float * x )
{
int idx = threadIdx.x; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if ( row<num_rows) {
float tmp = x[idx];
__syncthreads();
if ( idx > shift-1 ) {
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in,out]
x magmaFloat_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_slobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloat_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( float );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = int( sqrt( float( num_rows )));
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
hipLaunchKernelGGL(( magma_slobpcg_shift_kernel), dim3(grid), dim3(block), Ms, queue ,
num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
| 567242717015801b7855e4889f0b7b787c005c4e.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zlobpcg_shift.cu normal z -> s, Fri Sep 11 18:29:42 2015
*/
#include "common_magma.h"
__global__ void
magma_slobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
float * x )
{
int idx = threadIdx.x; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if ( row<num_rows) {
float tmp = x[idx];
__syncthreads();
if ( idx > shift-1 ) {
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in,out]
x magmaFloat_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_slobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloat_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( float );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = int( sqrt( float( num_rows )));
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
magma_slobpcg_shift_kernel<<< grid, block, Ms, queue >>>
( num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
|
5952b27057f6681e1b919270f3ccfc2c3b511fb5.hip | // !!! This is a file automatically generated by hipify!!!
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//#include <time.h>
//
//void CHECK(hipError_t error)
//{
// if (error != hipSuccess)
// {
// printf("Error : %s : %d, ", __FILE__, __LINE__);
// printf("code : %d, reason: %s \n", error, hipGetErrorString(error));
// exit(1);
// }
//}
//
//void checkResult(float *host_ref, float *gpu_ref, const int N)
//{
// double epsilon = 0.0000001;
// bool match = 1;
//
// for (size_t i = 0; i < N; i++)
// {
// if (abs(host_ref[i] - gpu_ref[i]) > epsilon)
// {
// match = 0;
// printf("Arrays do not match! \n");
// printf("host %5.2f gpu %5.2f at current %d\n", host_ref[i], gpu_ref[i], N);
// break;
// }
// }
//
// if (match) printf("Arrays match . \n\n");
//}
//
//void initialize_data_s(float * ip, int size)
//{
// time_t t;
// srand((unsigned)time(&t));
//
// for (size_t i = 0; i < size; i++)
// {
// ip[i] = (float)(rand() & 0xFF) / 10.0f;
// }
//}
//
//void sum_array_cpu(float * a, float * b, float * c, const int N)
//{
// for (size_t i = 0; i < N; i++)
// {
// c[i] = a[i] + b[i];
// }
//}
//
//__global__ void sum_array_gpu(float * a, float * b, float * c)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
// printf("a =%f b = %f c = %f \n", a[i], b[i], c[i]);
//}
//
//void run_code()
//{
// int element_Count = 32;
// size_t number_bytes = element_Count * sizeof(float);
//
// float *h_a, *h_b, *host_ref, *gpu_ref;
//
// h_a = (float *)malloc(number_bytes);
// h_b = (float *)malloc(number_bytes);
// host_ref = (float *)malloc(number_bytes);
// gpu_ref = (float *)malloc(number_bytes);
//
// initialize_data_s(h_a, element_Count);
// initialize_data_s(h_b, element_Count);
//
// memset(host_ref, 0, number_bytes);
// memset(gpu_ref, 0, number_bytes);
//
// float *d_a, *d_b, *d_c;
// hipMalloc((float **)&d_a, number_bytes);
// hipMalloc((float **)&d_b, number_bytes);
// hipMalloc((float **)&d_c, number_bytes);
//
// hipMemcpy(d_a, h_a, number_bytes, hipMemcpyHostToDevice);
// hipMemcpy(d_b, h_b, number_bytes, hipMemcpyHostToDevice);
//
// dim3 block(element_Count);
// dim3 grid(element_Count / block.x);
//
// sum_array_gpu << <grid, block >> > (d_a, d_b, d_c);
//
// hipMemcpy(gpu_ref, d_c, number_bytes, hipMemcpyDeviceToHost);
//
// hipFree(d_a);
// hipFree(d_b);
// hipFree(d_c);
//
// free(h_a);
// free(h_b);
// free(host_ref);
// free(gpu_ref);
//}
//
////int main()
////{
//// run_code();
//// system("pause");
//// return 0;
////} | 5952b27057f6681e1b919270f3ccfc2c3b511fb5.cu | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//#include <time.h>
//
//void CHECK(cudaError_t error)
//{
// if (error != cudaSuccess)
// {
// printf("Error : %s : %d, ", __FILE__, __LINE__);
// printf("code : %d, reason: %s \n", error, cudaGetErrorString(error));
// exit(1);
// }
//}
//
//void checkResult(float *host_ref, float *gpu_ref, const int N)
//{
// double epsilon = 0.0000001;
// bool match = 1;
//
// for (size_t i = 0; i < N; i++)
// {
// if (abs(host_ref[i] - gpu_ref[i]) > epsilon)
// {
// match = 0;
// printf("Arrays do not match! \n");
// printf("host %5.2f gpu %5.2f at current %d\n", host_ref[i], gpu_ref[i], N);
// break;
// }
// }
//
// if (match) printf("Arrays match . \n\n");
//}
//
//void initialize_data_s(float * ip, int size)
//{
// time_t t;
// srand((unsigned)time(&t));
//
// for (size_t i = 0; i < size; i++)
// {
// ip[i] = (float)(rand() & 0xFF) / 10.0f;
// }
//}
//
//void sum_array_cpu(float * a, float * b, float * c, const int N)
//{
// for (size_t i = 0; i < N; i++)
// {
// c[i] = a[i] + b[i];
// }
//}
//
//__global__ void sum_array_gpu(float * a, float * b, float * c)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
// printf("a =%f b = %f c = %f \n", a[i], b[i], c[i]);
//}
//
//void run_code()
//{
// int element_Count = 32;
// size_t number_bytes = element_Count * sizeof(float);
//
// float *h_a, *h_b, *host_ref, *gpu_ref;
//
// h_a = (float *)malloc(number_bytes);
// h_b = (float *)malloc(number_bytes);
// host_ref = (float *)malloc(number_bytes);
// gpu_ref = (float *)malloc(number_bytes);
//
// initialize_data_s(h_a, element_Count);
// initialize_data_s(h_b, element_Count);
//
// memset(host_ref, 0, number_bytes);
// memset(gpu_ref, 0, number_bytes);
//
// float *d_a, *d_b, *d_c;
// cudaMalloc((float **)&d_a, number_bytes);
// cudaMalloc((float **)&d_b, number_bytes);
// cudaMalloc((float **)&d_c, number_bytes);
//
// cudaMemcpy(d_a, h_a, number_bytes, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, h_b, number_bytes, cudaMemcpyHostToDevice);
//
// dim3 block(element_Count);
// dim3 grid(element_Count / block.x);
//
// sum_array_gpu << <grid, block >> > (d_a, d_b, d_c);
//
// cudaMemcpy(gpu_ref, d_c, number_bytes, cudaMemcpyDeviceToHost);
//
// cudaFree(d_a);
// cudaFree(d_b);
// cudaFree(d_c);
//
// free(h_a);
// free(h_b);
// free(host_ref);
// free(gpu_ref);
//}
//
////int main()
////{
//// run_code();
//// system("pause");
//// return 0;
////} |
36eca20ce22ea347b6aa1d52a5ff48992d07789e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <tune_quda.h>
#include <typeinfo>
#include <multigrid_helper.cuh>
namespace quda {
#ifdef GPU_MULTIGRID
using namespace quda::colorspinor;
/**
Kernel argument struct
*/
template <typename Float, typename vFloat, int fineSpin, int fineColor, int coarseSpin, int coarseColor, QudaFieldOrder order>
struct ProlongateArg {
FieldOrderCB<Float,fineSpin,fineColor,1,order> out;
const FieldOrderCB<Float,coarseSpin,coarseColor,1,order> in;
const FieldOrderCB<Float,fineSpin,fineColor,coarseColor,order,vFloat> V;
const int *geo_map; // need to make a device copy of this
const spin_mapper<fineSpin,coarseSpin> spin_map;
const int parity; // the parity of the output field (if single parity)
const int nParity; // number of parities of input fine field
ProlongateArg(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &V,
const int *geo_map, const int parity)
: out(out), in(in), V(V), geo_map(geo_map), spin_map(), parity(parity), nParity(out.SiteSubset()) { }
ProlongateArg(const ProlongateArg<Float,vFloat,fineSpin,fineColor,coarseSpin,coarseColor,order> &arg)
: out(arg.out), in(arg.in), V(arg.V), geo_map(arg.geo_map), spin_map(),
parity(arg.parity), nParity(arg.nParity) { }
};
/**
Applies the grid prolongation operator (coarse to fine)
*/
template <typename Float, int fineSpin, int coarseColor, class Coarse, typename S>
__device__ __host__ inline void prolongate(complex<Float> out[fineSpin*coarseColor], const Coarse &in,
int parity, int x_cb, const int *geo_map, const S& spin_map, int fineVolumeCB) {
int x = parity*fineVolumeCB + x_cb;
int x_coarse = geo_map[x];
int parity_coarse = (x_coarse >= in.VolumeCB()) ? 1 : 0;
int x_coarse_cb = x_coarse - parity_coarse*in.VolumeCB();
#pragma unroll
for (int s=0; s<fineSpin; s++) {
#pragma unroll
for (int c=0; c<coarseColor; c++) {
out[s*coarseColor+c] = in(parity_coarse, x_coarse_cb, spin_map(s,parity), c);
}
}
}
/**
Rotates from the coarse-color basis into the fine-color basis. This
is the second step of applying the prolongator.
*/
template <typename Float, int fineSpin, int fineColor, int coarseColor, int fine_colors_per_thread,
class FineColor, class Rotator>
__device__ __host__ inline void rotateFineColor(FineColor &out, const complex<Float> in[fineSpin*coarseColor],
const Rotator &V, int parity, int nParity, int x_cb, int fine_color_block) {
const int spinor_parity = (nParity == 2) ? parity : 0;
const int v_parity = (V.Nparity() == 2) ? parity : 0;
constexpr int color_unroll = 2;
#pragma unroll
for (int s=0; s<fineSpin; s++)
#pragma unroll
for (int fine_color_local=0; fine_color_local<fine_colors_per_thread; fine_color_local++)
out(spinor_parity, x_cb, s, fine_color_block+fine_color_local) = 0.0; // global fine color index
#pragma unroll
for (int s=0; s<fineSpin; s++) {
#pragma unroll
for (int fine_color_local=0; fine_color_local<fine_colors_per_thread; fine_color_local++) {
int i = fine_color_block + fine_color_local; // global fine color index
complex<Float> partial[color_unroll];
#pragma unroll
for (int k=0; k<color_unroll; k++) partial[k] = 0.0;
#pragma unroll
for (int j=0; j<coarseColor; j+=color_unroll) {
// V is a ColorMatrixField with internal dimensions Ns * Nc * Nvec
#pragma unroll
for (int k=0; k<color_unroll; k++)
partial[k] += V(v_parity, x_cb, s, i, j+k) * in[s*coarseColor + j + k];
}
#pragma unroll
for (int k=0; k<color_unroll; k++) out(spinor_parity, x_cb, s, i) += partial[k];
}
}
}
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int fine_colors_per_thread, typename Arg>
void Prolongate(Arg &arg) {
for (int parity=0; parity<arg.nParity; parity++) {
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb=0; x_cb<arg.out.VolumeCB(); x_cb++) {
complex<Float> tmp[fineSpin*coarseColor];
prolongate<Float,fineSpin,coarseColor>(tmp, arg.in, parity, x_cb, arg.geo_map, arg.spin_map, arg.out.VolumeCB());
for (int fine_color_block=0; fine_color_block<fineColor; fine_color_block+=fine_colors_per_thread) {
rotateFineColor<Float,fineSpin,fineColor,coarseColor,fine_colors_per_thread>
(arg.out, tmp, arg.V, parity, arg.nParity, x_cb, fine_color_block);
}
}
}
}
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int fine_colors_per_thread, typename Arg>
__global__ void ProlongateKernel(Arg arg) {
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
int parity = arg.nParity == 2 ? blockDim.y*blockIdx.y + threadIdx.y : arg.parity;
if (x_cb >= arg.out.VolumeCB()) return;
int fine_color_block = (blockDim.z*blockIdx.z + threadIdx.z) * fine_colors_per_thread;
if (fine_color_block >= fineColor) return;
complex<Float> tmp[fineSpin*coarseColor];
prolongate<Float,fineSpin,coarseColor>(tmp, arg.in, parity, x_cb, arg.geo_map, arg.spin_map, arg.out.VolumeCB());
rotateFineColor<Float,fineSpin,fineColor,coarseColor,fine_colors_per_thread>
(arg.out, tmp, arg.V, parity, arg.nParity, x_cb, fine_color_block);
}
template <typename Float, typename vFloat, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int fine_colors_per_thread>
class ProlongateLaunch : public TunableVectorYZ {
protected:
ColorSpinorField &out;
const ColorSpinorField ∈
const ColorSpinorField &V;
const int *fine_to_coarse;
int parity;
QudaFieldLocation location;
char vol[TuneKey::volume_n];
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return out.VolumeCB(); } // fine parity is the block y dimension
public:
ProlongateLaunch(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &V,
const int *fine_to_coarse, int parity)
: TunableVectorYZ(out.SiteSubset(), fineColor/fine_colors_per_thread), out(out), in(in), V(V),
fine_to_coarse(fine_to_coarse), parity(parity), location(checkLocation(out, in, V))
{
strcpy(vol, out.VolString());
strcat(vol, ",");
strcat(vol, in.VolString());
strcpy(aux, out.AuxString());
strcat(aux, ",");
strcat(aux, in.AuxString());
}
virtual ~ProlongateLaunch() { }
void apply(const hipStream_t &stream) {
if (location == QUDA_CPU_FIELD_LOCATION) {
if (out.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) {
ProlongateArg<Float,vFloat,fineSpin,fineColor,coarseSpin,coarseColor,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER>
arg(out, in, V, fine_to_coarse, parity);
Prolongate<Float,fineSpin,fineColor,coarseSpin,coarseColor,fine_colors_per_thread>(arg);
} else {
errorQuda("Unsupported field order %d", out.FieldOrder());
}
} else {
if (out.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
ProlongateArg<Float,vFloat,fineSpin,fineColor,coarseSpin,coarseColor,QUDA_FLOAT2_FIELD_ORDER>
arg(out, in, V, fine_to_coarse, parity);
hipLaunchKernelGGL(( ProlongateKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,fine_colors_per_thread>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else {
errorQuda("Unsupported field order %d", out.FieldOrder());
}
}
}
TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux); }
long long flops() const { return 8 * fineSpin * fineColor * coarseColor * out.SiteSubset()*(long long)out.VolumeCB(); }
long long bytes() const {
size_t v_bytes = V.Bytes() / (V.SiteSubset() == out.SiteSubset() ? 1 : 2);
return in.Bytes() + out.Bytes() + v_bytes + out.SiteSubset()*out.VolumeCB()*sizeof(int);
}
};
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor>
void Prolongate(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
const int *fine_to_coarse, int parity) {
// for all grids use 1 color per thread
constexpr int fine_colors_per_thread = 1;
if (v.Precision() == QUDA_HALF_PRECISION) {
ProlongateLaunch<Float, short, fineSpin, fineColor, coarseSpin, coarseColor, fine_colors_per_thread>
prolongator(out, in, v, fine_to_coarse, parity);
prolongator.apply(0);
} else if (v.Precision() == in.Precision()) {
ProlongateLaunch<Float, Float, fineSpin, fineColor, coarseSpin, coarseColor, fine_colors_per_thread>
prolongator(out, in, v, fine_to_coarse, parity);
prolongator.apply(0);
} else {
errorQuda("Unsupported V precision %d", v.Precision());
}
if (checkLocation(out, in, v) == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template <typename Float, int fineSpin>
void Prolongate(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int nVec, const int *fine_to_coarse, const int * const * spin_map, int parity) {
if (in.Nspin() != 2) errorQuda("Coarse spin %d is not supported", in.Nspin());
const int coarseSpin = 2;
// first check that the spin_map matches the spin_mapper
spin_mapper<fineSpin,coarseSpin> mapper;
for (int s=0; s<fineSpin; s++)
for (int p=0; p<2; p++)
if (mapper(s,p) != spin_map[s][p]) errorQuda("Spin map does not match spin_mapper");
if (out.Ncolor() == 3) {
const int fineColor = 3;
if (nVec == 4) {
Prolongate<Float,fineSpin,fineColor,coarseSpin,4>(out, in, v, fine_to_coarse, parity);
} else if (nVec == 6) { // Free field Wilson
Prolongate<Float,fineSpin,fineColor,coarseSpin,6>(out, in, v, fine_to_coarse, parity);
} else if (nVec == 24) {
Prolongate<Float,fineSpin,fineColor,coarseSpin,24>(out, in, v, fine_to_coarse, parity);
} else if (nVec == 32) {
Prolongate<Float,fineSpin,fineColor,coarseSpin,32>(out, in, v, fine_to_coarse, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else if (out.Ncolor() == 6) { // for coarsening coarsened Wilson free field.
const int fineColor = 6;
if (nVec == 6) { // these are probably only for debugging only
Prolongate<Float,fineSpin,fineColor,coarseSpin,6>(out, in, v, fine_to_coarse, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else if (out.Ncolor() == 24) {
const int fineColor = 24;
if (nVec == 24) { // to keep compilation under control coarse grids have same or more colors
Prolongate<Float,fineSpin,fineColor,coarseSpin,24>(out, in, v, fine_to_coarse, parity);
} else if (nVec == 32) {
Prolongate<Float,fineSpin,fineColor,coarseSpin,32>(out, in, v, fine_to_coarse, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else if (out.Ncolor() == 32) {
const int fineColor = 32;
if (nVec == 32) {
Prolongate<Float,fineSpin,fineColor,coarseSpin,32>(out, in, v, fine_to_coarse, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else {
errorQuda("Unsupported nColor %d", out.Ncolor());
}
}
template <typename Float>
void Prolongate(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int * const * spin_map, int parity) {
if (out.Nspin() == 2) {
Prolongate<Float,2>(out, in, v, Nvec, fine_to_coarse, spin_map, parity);
#ifdef GPU_WILSON_DIRAC
} else if (out.Nspin() == 4) {
Prolongate<Float,4>(out, in, v, Nvec, fine_to_coarse, spin_map, parity);
#endif
#ifdef GPU_STAGGERED_DIRAC
} else if (out.Nspin() == 1) {
Prolongate<Float,1>(out, in, v, Nvec, fine_to_coarse, spin_map, parity);
#endif
} else {
errorQuda("Unsupported nSpin %d", out.Nspin());
}
}
#endif // GPU_MULTIGRID
void Prolongate(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int * const * spin_map, int parity) {
#ifdef GPU_MULTIGRID
if (out.FieldOrder() != in.FieldOrder() || out.FieldOrder() != v.FieldOrder())
errorQuda("Field orders do not match (out=%d, in=%d, v=%d)",
out.FieldOrder(), in.FieldOrder(), v.FieldOrder());
QudaPrecision precision = checkPrecision(out, in);
if (precision == QUDA_DOUBLE_PRECISION) {
#ifdef GPU_MULTIGRID_DOUBLE
Prolongate<double>(out, in, v, Nvec, fine_to_coarse, spin_map, parity);
#else
errorQuda("Double precision multigrid has not been enabled");
#endif
} else if (precision == QUDA_SINGLE_PRECISION) {
Prolongate<float>(out, in, v, Nvec, fine_to_coarse, spin_map, parity);
} else {
errorQuda("Unsupported precision %d", out.Precision());
}
if (checkLocation(out, in, v) == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
#else
errorQuda("Multigrid has not been built");
#endif
}
} // end namespace quda
| 36eca20ce22ea347b6aa1d52a5ff48992d07789e.cu | #include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <tune_quda.h>
#include <typeinfo>
#include <multigrid_helper.cuh>
namespace quda {
#ifdef GPU_MULTIGRID
using namespace quda::colorspinor;
/**
Kernel argument struct
*/
template <typename Float, typename vFloat, int fineSpin, int fineColor, int coarseSpin, int coarseColor, QudaFieldOrder order>
struct ProlongateArg {
FieldOrderCB<Float,fineSpin,fineColor,1,order> out;
const FieldOrderCB<Float,coarseSpin,coarseColor,1,order> in;
const FieldOrderCB<Float,fineSpin,fineColor,coarseColor,order,vFloat> V;
const int *geo_map; // need to make a device copy of this
const spin_mapper<fineSpin,coarseSpin> spin_map;
const int parity; // the parity of the output field (if single parity)
const int nParity; // number of parities of input fine field
ProlongateArg(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &V,
const int *geo_map, const int parity)
: out(out), in(in), V(V), geo_map(geo_map), spin_map(), parity(parity), nParity(out.SiteSubset()) { }
ProlongateArg(const ProlongateArg<Float,vFloat,fineSpin,fineColor,coarseSpin,coarseColor,order> &arg)
: out(arg.out), in(arg.in), V(arg.V), geo_map(arg.geo_map), spin_map(),
parity(arg.parity), nParity(arg.nParity) { }
};
/**
Applies the grid prolongation operator (coarse to fine)
*/
template <typename Float, int fineSpin, int coarseColor, class Coarse, typename S>
__device__ __host__ inline void prolongate(complex<Float> out[fineSpin*coarseColor], const Coarse &in,
int parity, int x_cb, const int *geo_map, const S& spin_map, int fineVolumeCB) {
int x = parity*fineVolumeCB + x_cb;
int x_coarse = geo_map[x];
int parity_coarse = (x_coarse >= in.VolumeCB()) ? 1 : 0;
int x_coarse_cb = x_coarse - parity_coarse*in.VolumeCB();
#pragma unroll
for (int s=0; s<fineSpin; s++) {
#pragma unroll
for (int c=0; c<coarseColor; c++) {
out[s*coarseColor+c] = in(parity_coarse, x_coarse_cb, spin_map(s,parity), c);
}
}
}
/**
Rotates from the coarse-color basis into the fine-color basis. This
is the second step of applying the prolongator.
*/
template <typename Float, int fineSpin, int fineColor, int coarseColor, int fine_colors_per_thread,
class FineColor, class Rotator>
__device__ __host__ inline void rotateFineColor(FineColor &out, const complex<Float> in[fineSpin*coarseColor],
const Rotator &V, int parity, int nParity, int x_cb, int fine_color_block) {
const int spinor_parity = (nParity == 2) ? parity : 0;
const int v_parity = (V.Nparity() == 2) ? parity : 0;
constexpr int color_unroll = 2;
#pragma unroll
for (int s=0; s<fineSpin; s++)
#pragma unroll
for (int fine_color_local=0; fine_color_local<fine_colors_per_thread; fine_color_local++)
out(spinor_parity, x_cb, s, fine_color_block+fine_color_local) = 0.0; // global fine color index
#pragma unroll
for (int s=0; s<fineSpin; s++) {
#pragma unroll
for (int fine_color_local=0; fine_color_local<fine_colors_per_thread; fine_color_local++) {
int i = fine_color_block + fine_color_local; // global fine color index
complex<Float> partial[color_unroll];
#pragma unroll
for (int k=0; k<color_unroll; k++) partial[k] = 0.0;
#pragma unroll
for (int j=0; j<coarseColor; j+=color_unroll) {
// V is a ColorMatrixField with internal dimensions Ns * Nc * Nvec
#pragma unroll
for (int k=0; k<color_unroll; k++)
partial[k] += V(v_parity, x_cb, s, i, j+k) * in[s*coarseColor + j + k];
}
#pragma unroll
for (int k=0; k<color_unroll; k++) out(spinor_parity, x_cb, s, i) += partial[k];
}
}
}
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int fine_colors_per_thread, typename Arg>
void Prolongate(Arg &arg) {
for (int parity=0; parity<arg.nParity; parity++) {
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb=0; x_cb<arg.out.VolumeCB(); x_cb++) {
complex<Float> tmp[fineSpin*coarseColor];
prolongate<Float,fineSpin,coarseColor>(tmp, arg.in, parity, x_cb, arg.geo_map, arg.spin_map, arg.out.VolumeCB());
for (int fine_color_block=0; fine_color_block<fineColor; fine_color_block+=fine_colors_per_thread) {
rotateFineColor<Float,fineSpin,fineColor,coarseColor,fine_colors_per_thread>
(arg.out, tmp, arg.V, parity, arg.nParity, x_cb, fine_color_block);
}
}
}
}
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int fine_colors_per_thread, typename Arg>
__global__ void ProlongateKernel(Arg arg) {
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
int parity = arg.nParity == 2 ? blockDim.y*blockIdx.y + threadIdx.y : arg.parity;
if (x_cb >= arg.out.VolumeCB()) return;
int fine_color_block = (blockDim.z*blockIdx.z + threadIdx.z) * fine_colors_per_thread;
if (fine_color_block >= fineColor) return;
complex<Float> tmp[fineSpin*coarseColor];
prolongate<Float,fineSpin,coarseColor>(tmp, arg.in, parity, x_cb, arg.geo_map, arg.spin_map, arg.out.VolumeCB());
rotateFineColor<Float,fineSpin,fineColor,coarseColor,fine_colors_per_thread>
(arg.out, tmp, arg.V, parity, arg.nParity, x_cb, fine_color_block);
}
template <typename Float, typename vFloat, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int fine_colors_per_thread>
class ProlongateLaunch : public TunableVectorYZ {
protected:
ColorSpinorField &out;
const ColorSpinorField ∈
const ColorSpinorField &V;
const int *fine_to_coarse;
int parity;
QudaFieldLocation location;
char vol[TuneKey::volume_n];
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return out.VolumeCB(); } // fine parity is the block y dimension
public:
ProlongateLaunch(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &V,
const int *fine_to_coarse, int parity)
: TunableVectorYZ(out.SiteSubset(), fineColor/fine_colors_per_thread), out(out), in(in), V(V),
fine_to_coarse(fine_to_coarse), parity(parity), location(checkLocation(out, in, V))
{
strcpy(vol, out.VolString());
strcat(vol, ",");
strcat(vol, in.VolString());
strcpy(aux, out.AuxString());
strcat(aux, ",");
strcat(aux, in.AuxString());
}
virtual ~ProlongateLaunch() { }
void apply(const cudaStream_t &stream) {
if (location == QUDA_CPU_FIELD_LOCATION) {
if (out.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) {
ProlongateArg<Float,vFloat,fineSpin,fineColor,coarseSpin,coarseColor,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER>
arg(out, in, V, fine_to_coarse, parity);
Prolongate<Float,fineSpin,fineColor,coarseSpin,coarseColor,fine_colors_per_thread>(arg);
} else {
errorQuda("Unsupported field order %d", out.FieldOrder());
}
} else {
if (out.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
ProlongateArg<Float,vFloat,fineSpin,fineColor,coarseSpin,coarseColor,QUDA_FLOAT2_FIELD_ORDER>
arg(out, in, V, fine_to_coarse, parity);
ProlongateKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,fine_colors_per_thread>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else {
errorQuda("Unsupported field order %d", out.FieldOrder());
}
}
}
TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux); }
long long flops() const { return 8 * fineSpin * fineColor * coarseColor * out.SiteSubset()*(long long)out.VolumeCB(); }
long long bytes() const {
size_t v_bytes = V.Bytes() / (V.SiteSubset() == out.SiteSubset() ? 1 : 2);
return in.Bytes() + out.Bytes() + v_bytes + out.SiteSubset()*out.VolumeCB()*sizeof(int);
}
};
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor>
void Prolongate(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
const int *fine_to_coarse, int parity) {
// for all grids use 1 color per thread
constexpr int fine_colors_per_thread = 1;
if (v.Precision() == QUDA_HALF_PRECISION) {
ProlongateLaunch<Float, short, fineSpin, fineColor, coarseSpin, coarseColor, fine_colors_per_thread>
prolongator(out, in, v, fine_to_coarse, parity);
prolongator.apply(0);
} else if (v.Precision() == in.Precision()) {
ProlongateLaunch<Float, Float, fineSpin, fineColor, coarseSpin, coarseColor, fine_colors_per_thread>
prolongator(out, in, v, fine_to_coarse, parity);
prolongator.apply(0);
} else {
errorQuda("Unsupported V precision %d", v.Precision());
}
if (checkLocation(out, in, v) == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template <typename Float, int fineSpin>
void Prolongate(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int nVec, const int *fine_to_coarse, const int * const * spin_map, int parity) {
if (in.Nspin() != 2) errorQuda("Coarse spin %d is not supported", in.Nspin());
const int coarseSpin = 2;
// first check that the spin_map matches the spin_mapper
spin_mapper<fineSpin,coarseSpin> mapper;
for (int s=0; s<fineSpin; s++)
for (int p=0; p<2; p++)
if (mapper(s,p) != spin_map[s][p]) errorQuda("Spin map does not match spin_mapper");
if (out.Ncolor() == 3) {
const int fineColor = 3;
if (nVec == 4) {
Prolongate<Float,fineSpin,fineColor,coarseSpin,4>(out, in, v, fine_to_coarse, parity);
} else if (nVec == 6) { // Free field Wilson
Prolongate<Float,fineSpin,fineColor,coarseSpin,6>(out, in, v, fine_to_coarse, parity);
} else if (nVec == 24) {
Prolongate<Float,fineSpin,fineColor,coarseSpin,24>(out, in, v, fine_to_coarse, parity);
} else if (nVec == 32) {
Prolongate<Float,fineSpin,fineColor,coarseSpin,32>(out, in, v, fine_to_coarse, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else if (out.Ncolor() == 6) { // for coarsening coarsened Wilson free field.
const int fineColor = 6;
if (nVec == 6) { // these are probably only for debugging only
Prolongate<Float,fineSpin,fineColor,coarseSpin,6>(out, in, v, fine_to_coarse, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else if (out.Ncolor() == 24) {
const int fineColor = 24;
if (nVec == 24) { // to keep compilation under control coarse grids have same or more colors
Prolongate<Float,fineSpin,fineColor,coarseSpin,24>(out, in, v, fine_to_coarse, parity);
} else if (nVec == 32) {
Prolongate<Float,fineSpin,fineColor,coarseSpin,32>(out, in, v, fine_to_coarse, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else if (out.Ncolor() == 32) {
const int fineColor = 32;
if (nVec == 32) {
Prolongate<Float,fineSpin,fineColor,coarseSpin,32>(out, in, v, fine_to_coarse, parity);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
} else {
errorQuda("Unsupported nColor %d", out.Ncolor());
}
}
template <typename Float>
void Prolongate(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int * const * spin_map, int parity) {
if (out.Nspin() == 2) {
Prolongate<Float,2>(out, in, v, Nvec, fine_to_coarse, spin_map, parity);
#ifdef GPU_WILSON_DIRAC
} else if (out.Nspin() == 4) {
Prolongate<Float,4>(out, in, v, Nvec, fine_to_coarse, spin_map, parity);
#endif
#ifdef GPU_STAGGERED_DIRAC
} else if (out.Nspin() == 1) {
Prolongate<Float,1>(out, in, v, Nvec, fine_to_coarse, spin_map, parity);
#endif
} else {
errorQuda("Unsupported nSpin %d", out.Nspin());
}
}
#endif // GPU_MULTIGRID
void Prolongate(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int * const * spin_map, int parity) {
#ifdef GPU_MULTIGRID
if (out.FieldOrder() != in.FieldOrder() || out.FieldOrder() != v.FieldOrder())
errorQuda("Field orders do not match (out=%d, in=%d, v=%d)",
out.FieldOrder(), in.FieldOrder(), v.FieldOrder());
QudaPrecision precision = checkPrecision(out, in);
if (precision == QUDA_DOUBLE_PRECISION) {
#ifdef GPU_MULTIGRID_DOUBLE
Prolongate<double>(out, in, v, Nvec, fine_to_coarse, spin_map, parity);
#else
errorQuda("Double precision multigrid has not been enabled");
#endif
} else if (precision == QUDA_SINGLE_PRECISION) {
Prolongate<float>(out, in, v, Nvec, fine_to_coarse, spin_map, parity);
} else {
errorQuda("Unsupported precision %d", out.Precision());
}
if (checkLocation(out, in, v) == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
#else
errorQuda("Multigrid has not been built");
#endif
}
} // end namespace quda
|
77600ebdbad69c2726ce1cb9837a9c7f8f4f6a27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_stencil37_hack1_cp_cols(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int n_rows, int n_cols,int n_slices,int tile_x,int tile_y, int tile_z){
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0)&& threadIdx.x==0 && threadIdx.z==0){
printf("copy cols: begin\n");
printf("copy cols: gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("copy cols: blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("copy cols: tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = blockDim.y * blockIdx.y;
int base_global_col = tile_x * blockIdx.x;
int area_dst = n_rows*n_cols;
int area_shared = gridDim.x*n_rows*2;
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0&&threadIdx.x==0&&threadIdx.z==0)){
printf("area_shared=%d\n",area_shared);
}
#endif
int base_global_idx = base_global_slice*area_dst + base_global_row * n_cols + base_global_col;
int nextCol= base_global_col+1;
bool legalNextCol = (nextCol<n_cols)?1:0;
int ty = threadIdx.y;
bool legalCurRow = (base_global_row + ty)<n_rows;
for(int tz=0;tz<tile_z;++tz){
bool legalCurSlice = (base_global_slice + tz)<n_slices;
int idx_dst =base_global_idx + tz*area_dst + ty*n_cols ;
int idx = (base_global_slice+tz)*area_shared + blockIdx.x*2*n_rows+blockIdx.y*blockDim.y+ty;
if(legalCurRow && legalCurSlice){
shared_cols[idx] = dst[idx_dst];
}
if(legalCurRow && legalCurSlice && legalNextCol){
shared_cols[idx + n_rows] = dst[idx_dst + 1];
}
__syncthreads();
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.z ==0 && blockIdx.y==0 && blockIdx.x==0 && (threadIdx.x==0)){
// printf("shared_cols: addr:%d, val = %f\n", threadIdx.y,shared_cols[threadIdx.y]);
}
#endif
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0 && threadIdx.x==0 && threadIdx.z==0)){
printf("copy cols end!\n");
}
#endif
} | 77600ebdbad69c2726ce1cb9837a9c7f8f4f6a27.cu | #include "includes.h"
__global__ void gpu_stencil37_hack1_cp_cols(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int n_rows, int n_cols,int n_slices,int tile_x,int tile_y, int tile_z){
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0)&& threadIdx.x==0 && threadIdx.z==0){
printf("copy cols: begin\n");
printf("copy cols: gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("copy cols: blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("copy cols: tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = blockDim.y * blockIdx.y;
int base_global_col = tile_x * blockIdx.x;
int area_dst = n_rows*n_cols;
int area_shared = gridDim.x*n_rows*2;
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0&&threadIdx.x==0&&threadIdx.z==0)){
printf("area_shared=%d\n",area_shared);
}
#endif
int base_global_idx = base_global_slice*area_dst + base_global_row * n_cols + base_global_col;
int nextCol= base_global_col+1;
bool legalNextCol = (nextCol<n_cols)?1:0;
int ty = threadIdx.y;
bool legalCurRow = (base_global_row + ty)<n_rows;
for(int tz=0;tz<tile_z;++tz){
bool legalCurSlice = (base_global_slice + tz)<n_slices;
int idx_dst =base_global_idx + tz*area_dst + ty*n_cols ;
int idx = (base_global_slice+tz)*area_shared + blockIdx.x*2*n_rows+blockIdx.y*blockDim.y+ty;
if(legalCurRow && legalCurSlice){
shared_cols[idx] = dst[idx_dst];
}
if(legalCurRow && legalCurSlice && legalNextCol){
shared_cols[idx + n_rows] = dst[idx_dst + 1];
}
__syncthreads();
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.z ==0 && blockIdx.y==0 && blockIdx.x==0 && (threadIdx.x==0)){
// printf("shared_cols: addr:%d, val = %f\n", threadIdx.y,shared_cols[threadIdx.y]);
}
#endif
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0 && threadIdx.x==0 && threadIdx.z==0)){
printf("copy cols end!\n");
}
#endif
} |
4ea525769546bffe8e0579685c6d0df27b543be6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <stdint.h>
#ifndef TSIZE
#define TSIZE 32
#endif
#define BSIZE 32
#define PSIZE 4096
#define FPSIZE (4096/sizeof(float))
#define DEVICE_STATIC_INTRINSIC_QUALIFIERS static __device__ __forceinline__
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
#define PXL_GLOBAL_PTR "l"
#else
#define PXL_GLOBAL_PTR "r"
#endif
DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_l1(const void* const ptr)
{
asm("prefetch.global.L1 [%0];" : : PXL_GLOBAL_PTR(ptr));
}
DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_uniform(const void* const ptr)
{
asm("prefetchu.L1 [%0];" : : PXL_GLOBAL_PTR(ptr));
}
DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_l2(const void* const ptr)
{
asm("prefetch.global.L2 [%0];" : : PXL_GLOBAL_PTR(ptr));
}
static __device__ __inline__ uint64_t __nano(){
uint64_t mclk;
asm volatile("mov.u64 %0, %%globaltimer;" : "=l"(mclk));
return mclk ;
}
__global__ void foo(float* a, float* b, float* c)
{
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
size_t tid1 = tid * FPSIZE;
size_t tid2 = tid1 + (FPSIZE * TSIZE);
size_t tid3 = tid2 + (FPSIZE * TSIZE);
if (tid < TSIZE)
{
__prefetch_global_l2(&c[tid1]);
c[tid1] = a[tid1] + b[tid1];
__prefetch_global_l2(&c[tid2]);
c[tid2] = a[tid2] + b[tid2];
__prefetch_global_l2(&c[tid3]);
c[tid3] = a[tid3] + b[tid3];
}
}
size_t pad_2MB(size_t val)
{
size_t ret = val / (PSIZE * 512);
size_t diff = val % (PSIZE * 512);
if (diff)
{
ret += (PSIZE * 512);
}
return ret;
}
int main(void)
{
float* a;
float* b;
float* c;
assert(!hipMallocManaged(&a, pad_2MB(2 * sizeof(float) * TSIZE * FPSIZE)));
assert(!hipMallocManaged(&b, pad_2MB(2 * sizeof(float) * TSIZE * FPSIZE)));
assert(!hipMallocManaged(&c, pad_2MB(2* sizeof(float) * TSIZE * FPSIZE)));
//assert(!hipMallocManaged(&a, sizeof(float) * TSIZE * FPSIZE));
//assert(!hipMallocManaged(&b, sizeof(float) * TSIZE * FPSIZE));
//assert(!hipMallocManaged(&c, sizeof(float) * TSIZE * FPSIZE));
for (size_t i = 0; i < TSIZE * FPSIZE; i++)
{
a[i] = i;
b[i] = i;
c[i] = i;
}
hipLaunchKernelGGL(( foo), dim3(TSIZE/BSIZE), dim3(BSIZE), 0, 0, a, b, c);
hipDeviceSynchronize();
return 0;
}
| 4ea525769546bffe8e0579685c6d0df27b543be6.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <stdint.h>
#ifndef TSIZE
#define TSIZE 32
#endif
#define BSIZE 32
#define PSIZE 4096
#define FPSIZE (4096/sizeof(float))
#define DEVICE_STATIC_INTRINSIC_QUALIFIERS static __device__ __forceinline__
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
#define PXL_GLOBAL_PTR "l"
#else
#define PXL_GLOBAL_PTR "r"
#endif
DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_l1(const void* const ptr)
{
asm("prefetch.global.L1 [%0];" : : PXL_GLOBAL_PTR(ptr));
}
DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_uniform(const void* const ptr)
{
asm("prefetchu.L1 [%0];" : : PXL_GLOBAL_PTR(ptr));
}
DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_l2(const void* const ptr)
{
asm("prefetch.global.L2 [%0];" : : PXL_GLOBAL_PTR(ptr));
}
static __device__ __inline__ uint64_t __nano(){
uint64_t mclk;
asm volatile("mov.u64 %0, %%globaltimer;" : "=l"(mclk));
return mclk ;
}
__global__ void foo(float* a, float* b, float* c)
{
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
size_t tid1 = tid * FPSIZE;
size_t tid2 = tid1 + (FPSIZE * TSIZE);
size_t tid3 = tid2 + (FPSIZE * TSIZE);
if (tid < TSIZE)
{
__prefetch_global_l2(&c[tid1]);
c[tid1] = a[tid1] + b[tid1];
__prefetch_global_l2(&c[tid2]);
c[tid2] = a[tid2] + b[tid2];
__prefetch_global_l2(&c[tid3]);
c[tid3] = a[tid3] + b[tid3];
}
}
size_t pad_2MB(size_t val)
{
size_t ret = val / (PSIZE * 512);
size_t diff = val % (PSIZE * 512);
if (diff)
{
ret += (PSIZE * 512);
}
return ret;
}
int main(void)
{
float* a;
float* b;
float* c;
assert(!cudaMallocManaged(&a, pad_2MB(2 * sizeof(float) * TSIZE * FPSIZE)));
assert(!cudaMallocManaged(&b, pad_2MB(2 * sizeof(float) * TSIZE * FPSIZE)));
assert(!cudaMallocManaged(&c, pad_2MB(2* sizeof(float) * TSIZE * FPSIZE)));
//assert(!cudaMallocManaged(&a, sizeof(float) * TSIZE * FPSIZE));
//assert(!cudaMallocManaged(&b, sizeof(float) * TSIZE * FPSIZE));
//assert(!cudaMallocManaged(&c, sizeof(float) * TSIZE * FPSIZE));
for (size_t i = 0; i < TSIZE * FPSIZE; i++)
{
a[i] = i;
b[i] = i;
c[i] = i;
}
foo<<<TSIZE/BSIZE, BSIZE>>>(a, b, c);
cudaDeviceSynchronize();
return 0;
}
|
7fefc0435e911c85bbfb6a178cd10d0cdd3f3bb1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <thrust/sort.h>
//#include <inttypes.h>
#include "HashCalculatorGPU.h"
#include "Common.h"
#define BLOCK_SIZE 32
#define TOP_PER_THREAD_HAMMING_LIST_SIZE 32 * 10
__global__ void compute_hash_kernel(ImageData *ptr, int *d_firstProjMat, int *d_secondProjMat, int imageIndex)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < ptr->cntPoint) {
float sumFirstHash;
for (int m = 0; m < kCntBucketGroup; m++){
int bucketid = 0;
for (int j = 0; j < kCntBucketBit; j++) {
sumFirstHash = 0.0f;
for (int k = 0; k < kDimSiftData; k++) {
sumFirstHash += ptr->deviceSiftDataPtrList[index * kDimSiftData + k] * d_firstProjMat[m * kDimSiftData * kCntBucketBit + j * kDimSiftData + k];
}
bucketid = (bucketid << 1) + (sumFirstHash > 0 ? 1 : 0);
}
ptr->deviceBucketIDSiftPoint[ptr->cntPoint * m + index] = bucketid;
}
float sumSecondHash;
for (int m = 0; m < kDimSiftData; m++) {
sumSecondHash = 0.0f;
for (int j = 0; j < kDimSiftData; j++) {
sumSecondHash += ptr->deviceSiftDataPtrList[index * kDimSiftData + j] * d_secondProjMat[m * kDimSiftData + j];
}
ptr->deviceHashDataPtrList[index * kDimSiftData + m] = (sumSecondHash > 0 ? 1 : 0);
}
for (int dimCompHashIndex = 0; dimCompHashIndex < kDimCompHashData; dimCompHashIndex++)
{
uint64_t compHashBitVal = 0;
int dimHashIndexLBound = dimCompHashIndex * kBitInCompHash;
int dimHashIndexUBound = (dimCompHashIndex + 1) * kBitInCompHash;
for (int dimHashIndex = dimHashIndexLBound; dimHashIndex < dimHashIndexUBound; dimHashIndex++)
{
compHashBitVal = (compHashBitVal << 1) + ptr->deviceHashDataPtrList[index * kDimSiftData + dimHashIndex]; // set the corresponding bit to 1/0
}
ptr->compHashDataPtrList[index * kDimCompHashData + dimCompHashIndex] = compHashBitVal;
}
}
}
void compute_hash_GPU(ImageData **device_ptr, ImageData *host_ptr ,int img_cnt, int *d_firstProjMat, int *d_secondProjMat)
{
for(int i = 0; i < img_cnt; i++){
dim3 block(1024);
dim3 grid((host_ptr->cntPoint + block.x - 1) / block.x);
hipLaunchKernelGGL(( compute_hash_kernel), dim3(grid), dim3(block), 0, 0, device_ptr[i], d_firstProjMat, d_secondProjMat, i);
host_ptr++;
}
hipDeviceSynchronize();
}
__global__ void compute_hash_kernel_revised(ImageData *ptr, const hipPitchedPtr devPitchedPtr, const int *d_secondProjMat, const int pitch, int imageIndex)
{
int index = blockIdx.x;
int tid = threadIdx.x;
__shared__ float sumFirstHash[kDimSiftData];
__shared__ float sumSecondHash[kDimSiftData];
if (index < ptr->cntPoint) {
//Calculate First Hash Values
char* devptr = (char*)devPitchedPtr.ptr;
size_t pitch = devPitchedPtr.pitch;
size_t slicepitch = pitch * kCntBucketBit;
for (int d = 0; d < kCntBucketGroup; d++) {
uint16_t bucketid = 0;
char *slice = devptr + d * slicepitch;
for (int y = 0; y < kCntBucketBit; y++) {
int *row = (int*)(slice + y * pitch);
sumFirstHash[tid] = ptr->deviceSiftDataPtrList[index * kDimSiftData + tid] * (*(row + tid));
__syncthreads();
for (int stride = kDimSiftData / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
sumFirstHash[tid] += sumFirstHash[tid + stride];
}
__syncthreads();
}
if (tid == 0) {
bucketid = (bucketid << 1) + (sumFirstHash[0] > 0 ? 1 : 0);
ptr->deviceBucketIDSiftPoint[ptr->cntPoint * d + index] = bucketid;
}
}
}
//Calculate Second Hash Values
for (int m = 0; m < kDimSiftData; m++) {
int *matElement = (int*)((char*)d_secondProjMat + m * pitch);
sumSecondHash[tid] = ptr->deviceSiftDataPtrList[index * kDimSiftData + tid] * (*(matElement + tid));
__syncthreads();
for (int stride = kDimSiftData / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
sumSecondHash[tid] += sumSecondHash[tid + stride];
}
__syncthreads();
}
if (tid == 0) {
ptr->deviceHashDataPtrList[index * kDimSiftData + m] = (sumSecondHash[0] > 0 ? 1 : 0);
}
}
// calculate the CompHash code
// compress <kBitInCompHash> Hash code bits within a single <uint64_t> variable
if (tid == 0) {
for (int dimCompHashIndex = 0; dimCompHashIndex < kDimCompHashData; dimCompHashIndex++) {
uint64_t compHashBitVal = 0;
int dimHashIndexLBound = dimCompHashIndex * kBitInCompHash;
int dimHashIndexUBound = (dimCompHashIndex + 1) * kBitInCompHash;
for (int dimHashIndex = dimHashIndexLBound; dimHashIndex < dimHashIndexUBound; dimHashIndex++) {
compHashBitVal = (compHashBitVal << 1) + ptr->deviceHashDataPtrList[index * kDimHashData + dimHashIndex]; // set the corresponding bit to 1/0
}
ptr->compHashDataPtrList[index * kDimCompHashData + dimCompHashIndex] = compHashBitVal;
}
}
}
}
void compute_hash_GPU_revised(ImageData **device_ptr, const ImageData *host_ptr, const int img_cnt, const hipPitchedPtr devPitchedPtr, const int *d_secondProjMat, const int pitch)
{
for (int i = 0; i < img_cnt; i++) {
dim3 block(128);
dim3 grid((host_ptr->cntPoint * block.x + block.x - 1) / block.x);
hipLaunchKernelGGL(( compute_hash_kernel_revised) , dim3(grid), dim3(block), 0, 0, device_ptr[i], devPitchedPtr, d_secondProjMat, pitch, i);
host_ptr++;
}
hipDeviceSynchronize();
}
| 7fefc0435e911c85bbfb6a178cd10d0cdd3f3bb1.cu | #include <iostream>
#include <thrust/sort.h>
//#include <inttypes.h>
#include "HashCalculatorGPU.h"
#include "Common.h"
#define BLOCK_SIZE 32
#define TOP_PER_THREAD_HAMMING_LIST_SIZE 32 * 10
__global__ void compute_hash_kernel(ImageData *ptr, int *d_firstProjMat, int *d_secondProjMat, int imageIndex)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < ptr->cntPoint) {
float sumFirstHash;
for (int m = 0; m < kCntBucketGroup; m++){
int bucketid = 0;
for (int j = 0; j < kCntBucketBit; j++) {
sumFirstHash = 0.0f;
for (int k = 0; k < kDimSiftData; k++) {
sumFirstHash += ptr->deviceSiftDataPtrList[index * kDimSiftData + k] * d_firstProjMat[m * kDimSiftData * kCntBucketBit + j * kDimSiftData + k];
}
bucketid = (bucketid << 1) + (sumFirstHash > 0 ? 1 : 0);
}
ptr->deviceBucketIDSiftPoint[ptr->cntPoint * m + index] = bucketid;
}
float sumSecondHash;
for (int m = 0; m < kDimSiftData; m++) {
sumSecondHash = 0.0f;
for (int j = 0; j < kDimSiftData; j++) {
sumSecondHash += ptr->deviceSiftDataPtrList[index * kDimSiftData + j] * d_secondProjMat[m * kDimSiftData + j];
}
ptr->deviceHashDataPtrList[index * kDimSiftData + m] = (sumSecondHash > 0 ? 1 : 0);
}
for (int dimCompHashIndex = 0; dimCompHashIndex < kDimCompHashData; dimCompHashIndex++)
{
uint64_t compHashBitVal = 0;
int dimHashIndexLBound = dimCompHashIndex * kBitInCompHash;
int dimHashIndexUBound = (dimCompHashIndex + 1) * kBitInCompHash;
for (int dimHashIndex = dimHashIndexLBound; dimHashIndex < dimHashIndexUBound; dimHashIndex++)
{
compHashBitVal = (compHashBitVal << 1) + ptr->deviceHashDataPtrList[index * kDimSiftData + dimHashIndex]; // set the corresponding bit to 1/0
}
ptr->compHashDataPtrList[index * kDimCompHashData + dimCompHashIndex] = compHashBitVal;
}
}
}
void compute_hash_GPU(ImageData **device_ptr, ImageData *host_ptr ,int img_cnt, int *d_firstProjMat, int *d_secondProjMat)
{
for(int i = 0; i < img_cnt; i++){
dim3 block(1024);
dim3 grid((host_ptr->cntPoint + block.x - 1) / block.x);
compute_hash_kernel<<<grid, block>>>(device_ptr[i], d_firstProjMat, d_secondProjMat, i);
host_ptr++;
}
cudaDeviceSynchronize();
}
__global__ void compute_hash_kernel_revised(ImageData *ptr, const cudaPitchedPtr devPitchedPtr, const int *d_secondProjMat, const int pitch, int imageIndex)
{
int index = blockIdx.x;
int tid = threadIdx.x;
__shared__ float sumFirstHash[kDimSiftData];
__shared__ float sumSecondHash[kDimSiftData];
if (index < ptr->cntPoint) {
//Calculate First Hash Values
char* devptr = (char*)devPitchedPtr.ptr;
size_t pitch = devPitchedPtr.pitch;
size_t slicepitch = pitch * kCntBucketBit;
for (int d = 0; d < kCntBucketGroup; d++) {
uint16_t bucketid = 0;
char *slice = devptr + d * slicepitch;
for (int y = 0; y < kCntBucketBit; y++) {
int *row = (int*)(slice + y * pitch);
sumFirstHash[tid] = ptr->deviceSiftDataPtrList[index * kDimSiftData + tid] * (*(row + tid));
__syncthreads();
for (int stride = kDimSiftData / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
sumFirstHash[tid] += sumFirstHash[tid + stride];
}
__syncthreads();
}
if (tid == 0) {
bucketid = (bucketid << 1) + (sumFirstHash[0] > 0 ? 1 : 0);
ptr->deviceBucketIDSiftPoint[ptr->cntPoint * d + index] = bucketid;
}
}
}
//Calculate Second Hash Values
for (int m = 0; m < kDimSiftData; m++) {
int *matElement = (int*)((char*)d_secondProjMat + m * pitch);
sumSecondHash[tid] = ptr->deviceSiftDataPtrList[index * kDimSiftData + tid] * (*(matElement + tid));
__syncthreads();
for (int stride = kDimSiftData / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
sumSecondHash[tid] += sumSecondHash[tid + stride];
}
__syncthreads();
}
if (tid == 0) {
ptr->deviceHashDataPtrList[index * kDimSiftData + m] = (sumSecondHash[0] > 0 ? 1 : 0);
}
}
// calculate the CompHash code
// compress <kBitInCompHash> Hash code bits within a single <uint64_t> variable
if (tid == 0) {
for (int dimCompHashIndex = 0; dimCompHashIndex < kDimCompHashData; dimCompHashIndex++) {
uint64_t compHashBitVal = 0;
int dimHashIndexLBound = dimCompHashIndex * kBitInCompHash;
int dimHashIndexUBound = (dimCompHashIndex + 1) * kBitInCompHash;
for (int dimHashIndex = dimHashIndexLBound; dimHashIndex < dimHashIndexUBound; dimHashIndex++) {
compHashBitVal = (compHashBitVal << 1) + ptr->deviceHashDataPtrList[index * kDimHashData + dimHashIndex]; // set the corresponding bit to 1/0
}
ptr->compHashDataPtrList[index * kDimCompHashData + dimCompHashIndex] = compHashBitVal;
}
}
}
}
void compute_hash_GPU_revised(ImageData **device_ptr, const ImageData *host_ptr, const int img_cnt, const cudaPitchedPtr devPitchedPtr, const int *d_secondProjMat, const int pitch)
{
for (int i = 0; i < img_cnt; i++) {
dim3 block(128);
dim3 grid((host_ptr->cntPoint * block.x + block.x - 1) / block.x);
compute_hash_kernel_revised <<<grid, block>>> (device_ptr[i], devPitchedPtr, d_secondProjMat, pitch, i);
host_ptr++;
}
cudaDeviceSynchronize();
}
|
56b6f6e5316463dc47289a58aa674549cec3dc4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaYUV.h"
#define COLOR_COMPONENT_MASK 0x3FF
#define COLOR_COMPONENT_BIT_SIZE 10
#define FIXED_DECIMAL_POINT 24
#define FIXED_POINT_MULTIPLIER 1.0f
#define FIXED_COLOR_COMPONENT_MASK 0xffffffff
#define MUL(x,y) (x*y)
__constant__ uint32_t constAlpha;
__constant__ float constHueColorSpaceMat[9];
__device__ void YUV2RGB(uint32_t *yuvi, float *red, float *green, float *blue)
{
// Prepare for hue adjustment
/*
float luma, chromaCb, chromaCr;
luma = (float)yuvi[0];
chromaCb = (float)((int)yuvi[1] - 512.0f);
chromaCr = (float)((int)yuvi[2] - 512.0f);
// Convert YUV To RGB with hue adjustment
*red = MUL(luma, constHueColorSpaceMat[0]) +
MUL(chromaCb, constHueColorSpaceMat[1]) +
MUL(chromaCr, constHueColorSpaceMat[2]);
*green= MUL(luma, constHueColorSpaceMat[3]) +
MUL(chromaCb, constHueColorSpaceMat[4]) +
MUL(chromaCr, constHueColorSpaceMat[5]);
*blue = MUL(luma, constHueColorSpaceMat[6]) +
MUL(chromaCb, constHueColorSpaceMat[7]) +
MUL(chromaCr, constHueColorSpaceMat[8]);*/
const float luma = float(yuvi[0]);
const float u = float(yuvi[1]) - 512.0f;
const float v = float(yuvi[2]) - 512.0f;
/*R = Y + 1.140V
G = Y - 0.395U - 0.581V
B = Y + 2.032U*/
/**green = luma + 1.140f * v;
*blue = luma - 0.395f * u - 0.581f * v;
*red = luma + 2.032f * u;*/
*red = luma + 1.140f * v;
*green = luma - 0.395f * u - 0.581f * v;
*blue = luma + 2.032f * u;
}
__device__ uint32_t RGBAPACK_8bit(float red, float green, float blue, uint32_t alpha)
{
uint32_t ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 255.0f);
green = min(max(green, 0.0f), 255.0f);
blue = min(max(blue, 0.0f), 255.0f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = ((((uint32_t)red) << 24) |
(((uint32_t)green) << 16) |
(((uint32_t)blue) << 8) | (uint32_t)alpha);
return ARGBpixel;
}
__device__ uint32_t RGBAPACK_10bit(float red, float green, float blue, uint32_t alpha)
{
uint32_t ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 1023.f);
green = min(max(green, 0.0f), 1023.f);
blue = min(max(blue, 0.0f), 1023.f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = ((((uint32_t)red >> 2) << 24) |
(((uint32_t)green >> 2) << 16) |
(((uint32_t)blue >> 2) << 8) | (uint32_t)alpha);
return ARGBpixel;
}
// CUDA kernel for outputing the final ARGB output from NV12;
/*extern "C"*/
__global__ void Passthru(uint32_t *srcImage, size_t nSourcePitch,
uint32_t *dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint32_t dstImagePitch = nDestPitch >> 2;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]);
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]);
// this steps performs the color conversion
float luma[2];
luma[0] = (yuv101010Pel[0] & 0x00FF);
luma[1] = (yuv101010Pel[1] & 0x00FF);
// Clamp the results to RGBA
dstImage[y * dstImagePitch + x ] = RGBAPACK_8bit(luma[0], luma[0], luma[0], constAlpha);
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_8bit(luma[1], luma[1], luma[1], constAlpha);
}
// CUDA kernel for outputing the final ARGB output from NV12;
/*extern "C"*/
__global__ void NV12ToARGB(uint32_t *srcImage, size_t nSourcePitch,
uint32_t *dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint32_t dstImagePitch = nDestPitch >> 2;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2;
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2;
uint32_t chromaOffset = processingPitch * height;
int y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint32_t chromaCb;
uint32_t chromaCr;
chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ];
chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1;
chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
uint32_t yuvi[6];
float red[2], green[2], blue[2];
yuvi[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK);
yuvi[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
yuvi[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK);
yuvi[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
// YUV to RGB Transformation conversion
YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]);
YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]);
// Clamp the results to RGBA
dstImage[y * dstImagePitch + x ] = RGBAPACK_10bit(red[0], green[0], blue[0], constAlpha);
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_10bit(red[1], green[1], blue[1], constAlpha);
}
bool nv12ColorspaceSetup = false;
// cudaNV12ToARGB32
hipError_t cudaNV12ToRGBA( uint8_t* srcDev, size_t srcPitch, uchar4* destDev, size_t destPitch, size_t width, size_t height )
{
if( !srcDev || !destDev )
return hipErrorInvalidDevicePointer;
if( srcPitch == 0 || destPitch == 0 || width == 0 || height == 0 )
return hipErrorInvalidValue;
if( !nv12ColorspaceSetup )
cudaNV12SetupColorspace();
const dim3 blockDim(32,16,1);
const dim3 gridDim((width+(2*blockDim.x-1))/(2*blockDim.x), (height+(blockDim.y-1))/blockDim.y, 1);
hipLaunchKernelGGL(( NV12ToARGB), dim3(gridDim), dim3(blockDim), 0, 0, (uint32_t*)srcDev, srcPitch, (uint32_t*)destDev, destPitch, width, height );
return CUDA(hipGetLastError());
}
hipError_t cudaNV12ToRGBA( uint8_t* srcDev, uchar4* destDev, size_t width, size_t height )
{
return cudaNV12ToRGBA(srcDev, width * sizeof(uint8_t), destDev, width * sizeof(uchar4), width, height);
}
//-------------------------------------------------------------------------------------------------------------------------
__global__ void NV12ToRGBAf(uint32_t* srcImage, size_t nSourcePitch,
float4* dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
#if 1
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2;
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2;
uint32_t chromaOffset = processingPitch * height;
int y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint32_t chromaCb;
uint32_t chromaCr;
chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ];
chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1;
chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
uint32_t yuvi[6];
float red[2], green[2], blue[2];
yuvi[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK);
yuvi[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
yuvi[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK);
yuvi[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
// YUV to RGB Transformation conversion
YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]);
YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]);
// Clamp the results to RGBA
//printf("cuda thread %i %i %f %f %f\n", x, y, red[0], green[0], blue[0]);
const float s = 1.0f / 1024.0f * 255.0f;
dstImage[y * width + x] = make_float4(red[0] * s, green[0] * s, blue[0] * s, 1.0f);
dstImage[y * width + x + 1] = make_float4(red[1] * s, green[1] * s, blue[1] * s, 1.0f);
#else
//printf("cuda thread %i %i %i %i \n", x, y, width, height);
dstImage[y * width + x] = make_float4(1.0f, 0.0f, 0.0f, 1.0f);
dstImage[y * width + x + 1] = make_float4(1.0f, 0.0f, 0.0f, 1.0f);
#endif
}
// cudaNV12ToRGBA
hipError_t cudaNV12ToRGBAf( uint8_t* srcDev, size_t srcPitch, float4* destDev, size_t destPitch, size_t width, size_t height )
{
if( !srcDev || !destDev )
return hipErrorInvalidDevicePointer;
if( srcPitch == 0 || destPitch == 0 || width == 0 || height == 0 )
return hipErrorInvalidValue;
if( !nv12ColorspaceSetup )
cudaNV12SetupColorspace();
const dim3 blockDim(8,8,1);
//const dim3 gridDim((width+(2*blockDim.x-1))/(2*blockDim.x), (height+(blockDim.y-1))/blockDim.y, 1);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height, blockDim.y), 1);
hipLaunchKernelGGL(( NV12ToRGBAf), dim3(gridDim), dim3(blockDim), 0, 0, (uint32_t*)srcDev, srcPitch, destDev, destPitch, width, height );
return CUDA(hipGetLastError());
}
hipError_t cudaNV12ToRGBAf( uint8_t* srcDev, float4* destDev, size_t width, size_t height )
{
return cudaNV12ToRGBAf(srcDev, width * sizeof(uint8_t), destDev, width * sizeof(float4), width, height);
}
// cudaNV12SetupColorspace
hipError_t cudaNV12SetupColorspace( float hue )
{
const float hueSin = sin(hue);
const float hueCos = cos(hue);
float hueCSC[9];
const bool itu601 = false;
if( itu601 /*CSC == ITU601*/)
{
//CCIR 601
hueCSC[0] = 1.1644f;
hueCSC[1] = hueSin * 1.5960f;
hueCSC[2] = hueCos * 1.5960f;
hueCSC[3] = 1.1644f;
hueCSC[4] = (hueCos * -0.3918f) - (hueSin * 0.8130f);
hueCSC[5] = (hueSin * 0.3918f) - (hueCos * 0.8130f);
hueCSC[6] = 1.1644f;
hueCSC[7] = hueCos * 2.0172f;
hueCSC[8] = hueSin * -2.0172f;
}
else /*if(CSC == ITU709)*/
{
//CCIR 709
hueCSC[0] = 1.0f;
hueCSC[1] = hueSin * 1.57480f;
hueCSC[2] = hueCos * 1.57480f;
hueCSC[3] = 1.0;
hueCSC[4] = (hueCos * -0.18732f) - (hueSin * 0.46812f);
hueCSC[5] = (hueSin * 0.18732f) - (hueCos * 0.46812f);
hueCSC[6] = 1.0f;
hueCSC[7] = hueCos * 1.85560f;
hueCSC[8] = hueSin * -1.85560f;
}
if( CUDA_FAILED(hipMemcpyToSymbol(constHueColorSpaceMat, hueCSC, sizeof(float) * 9)) )
return hipErrorInvalidSymbol;
uint32_t cudaAlpha = ((uint32_t)0xff<< 24);
if( CUDA_FAILED(hipMemcpyToSymbol(constAlpha, &cudaAlpha, sizeof(uint32_t))) )
return hipErrorInvalidSymbol;
nv12ColorspaceSetup = true;
return hipSuccess;
}
| 56b6f6e5316463dc47289a58aa674549cec3dc4f.cu | /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaYUV.h"
#define COLOR_COMPONENT_MASK 0x3FF
#define COLOR_COMPONENT_BIT_SIZE 10
#define FIXED_DECIMAL_POINT 24
#define FIXED_POINT_MULTIPLIER 1.0f
#define FIXED_COLOR_COMPONENT_MASK 0xffffffff
#define MUL(x,y) (x*y)
__constant__ uint32_t constAlpha;
__constant__ float constHueColorSpaceMat[9];
__device__ void YUV2RGB(uint32_t *yuvi, float *red, float *green, float *blue)
{
// Prepare for hue adjustment
/*
float luma, chromaCb, chromaCr;
luma = (float)yuvi[0];
chromaCb = (float)((int)yuvi[1] - 512.0f);
chromaCr = (float)((int)yuvi[2] - 512.0f);
// Convert YUV To RGB with hue adjustment
*red = MUL(luma, constHueColorSpaceMat[0]) +
MUL(chromaCb, constHueColorSpaceMat[1]) +
MUL(chromaCr, constHueColorSpaceMat[2]);
*green= MUL(luma, constHueColorSpaceMat[3]) +
MUL(chromaCb, constHueColorSpaceMat[4]) +
MUL(chromaCr, constHueColorSpaceMat[5]);
*blue = MUL(luma, constHueColorSpaceMat[6]) +
MUL(chromaCb, constHueColorSpaceMat[7]) +
MUL(chromaCr, constHueColorSpaceMat[8]);*/
const float luma = float(yuvi[0]);
const float u = float(yuvi[1]) - 512.0f;
const float v = float(yuvi[2]) - 512.0f;
/*R = Y + 1.140V
G = Y - 0.395U - 0.581V
B = Y + 2.032U*/
/**green = luma + 1.140f * v;
*blue = luma - 0.395f * u - 0.581f * v;
*red = luma + 2.032f * u;*/
*red = luma + 1.140f * v;
*green = luma - 0.395f * u - 0.581f * v;
*blue = luma + 2.032f * u;
}
__device__ uint32_t RGBAPACK_8bit(float red, float green, float blue, uint32_t alpha)
{
uint32_t ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 255.0f);
green = min(max(green, 0.0f), 255.0f);
blue = min(max(blue, 0.0f), 255.0f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = ((((uint32_t)red) << 24) |
(((uint32_t)green) << 16) |
(((uint32_t)blue) << 8) | (uint32_t)alpha);
return ARGBpixel;
}
__device__ uint32_t RGBAPACK_10bit(float red, float green, float blue, uint32_t alpha)
{
uint32_t ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 1023.f);
green = min(max(green, 0.0f), 1023.f);
blue = min(max(blue, 0.0f), 1023.f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = ((((uint32_t)red >> 2) << 24) |
(((uint32_t)green >> 2) << 16) |
(((uint32_t)blue >> 2) << 8) | (uint32_t)alpha);
return ARGBpixel;
}
// CUDA kernel for outputing the final ARGB output from NV12;
/*extern "C"*/
__global__ void Passthru(uint32_t *srcImage, size_t nSourcePitch,
uint32_t *dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint32_t dstImagePitch = nDestPitch >> 2;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]);
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]);
// this steps performs the color conversion
float luma[2];
luma[0] = (yuv101010Pel[0] & 0x00FF);
luma[1] = (yuv101010Pel[1] & 0x00FF);
// Clamp the results to RGBA
dstImage[y * dstImagePitch + x ] = RGBAPACK_8bit(luma[0], luma[0], luma[0], constAlpha);
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_8bit(luma[1], luma[1], luma[1], constAlpha);
}
// CUDA kernel for outputing the final ARGB output from NV12;
/*extern "C"*/
__global__ void NV12ToARGB(uint32_t *srcImage, size_t nSourcePitch,
uint32_t *dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint32_t dstImagePitch = nDestPitch >> 2;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2;
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2;
uint32_t chromaOffset = processingPitch * height;
int y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint32_t chromaCb;
uint32_t chromaCr;
chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ];
chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1;
chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
uint32_t yuvi[6];
float red[2], green[2], blue[2];
yuvi[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK);
yuvi[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
yuvi[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK);
yuvi[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
// YUV to RGB Transformation conversion
YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]);
YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]);
// Clamp the results to RGBA
dstImage[y * dstImagePitch + x ] = RGBAPACK_10bit(red[0], green[0], blue[0], constAlpha);
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_10bit(red[1], green[1], blue[1], constAlpha);
}
bool nv12ColorspaceSetup = false;
// cudaNV12ToARGB32
cudaError_t cudaNV12ToRGBA( uint8_t* srcDev, size_t srcPitch, uchar4* destDev, size_t destPitch, size_t width, size_t height )
{
if( !srcDev || !destDev )
return cudaErrorInvalidDevicePointer;
if( srcPitch == 0 || destPitch == 0 || width == 0 || height == 0 )
return cudaErrorInvalidValue;
if( !nv12ColorspaceSetup )
cudaNV12SetupColorspace();
const dim3 blockDim(32,16,1);
const dim3 gridDim((width+(2*blockDim.x-1))/(2*blockDim.x), (height+(blockDim.y-1))/blockDim.y, 1);
NV12ToARGB<<<gridDim, blockDim>>>( (uint32_t*)srcDev, srcPitch, (uint32_t*)destDev, destPitch, width, height );
return CUDA(cudaGetLastError());
}
cudaError_t cudaNV12ToRGBA( uint8_t* srcDev, uchar4* destDev, size_t width, size_t height )
{
return cudaNV12ToRGBA(srcDev, width * sizeof(uint8_t), destDev, width * sizeof(uchar4), width, height);
}
//-------------------------------------------------------------------------------------------------------------------------
__global__ void NV12ToRGBAf(uint32_t* srcImage, size_t nSourcePitch,
float4* dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
#if 1
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2;
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2;
uint32_t chromaOffset = processingPitch * height;
int y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint32_t chromaCb;
uint32_t chromaCr;
chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ];
chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1;
chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
uint32_t yuvi[6];
float red[2], green[2], blue[2];
yuvi[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK);
yuvi[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
yuvi[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK);
yuvi[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
// YUV to RGB Transformation conversion
YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]);
YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]);
// Clamp the results to RGBA
//printf("cuda thread %i %i %f %f %f\n", x, y, red[0], green[0], blue[0]);
const float s = 1.0f / 1024.0f * 255.0f;
dstImage[y * width + x] = make_float4(red[0] * s, green[0] * s, blue[0] * s, 1.0f);
dstImage[y * width + x + 1] = make_float4(red[1] * s, green[1] * s, blue[1] * s, 1.0f);
#else
//printf("cuda thread %i %i %i %i \n", x, y, width, height);
dstImage[y * width + x] = make_float4(1.0f, 0.0f, 0.0f, 1.0f);
dstImage[y * width + x + 1] = make_float4(1.0f, 0.0f, 0.0f, 1.0f);
#endif
}
// cudaNV12ToRGBA
cudaError_t cudaNV12ToRGBAf( uint8_t* srcDev, size_t srcPitch, float4* destDev, size_t destPitch, size_t width, size_t height )
{
if( !srcDev || !destDev )
return cudaErrorInvalidDevicePointer;
if( srcPitch == 0 || destPitch == 0 || width == 0 || height == 0 )
return cudaErrorInvalidValue;
if( !nv12ColorspaceSetup )
cudaNV12SetupColorspace();
const dim3 blockDim(8,8,1);
//const dim3 gridDim((width+(2*blockDim.x-1))/(2*blockDim.x), (height+(blockDim.y-1))/blockDim.y, 1);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height, blockDim.y), 1);
NV12ToRGBAf<<<gridDim, blockDim>>>( (uint32_t*)srcDev, srcPitch, destDev, destPitch, width, height );
return CUDA(cudaGetLastError());
}
cudaError_t cudaNV12ToRGBAf( uint8_t* srcDev, float4* destDev, size_t width, size_t height )
{
return cudaNV12ToRGBAf(srcDev, width * sizeof(uint8_t), destDev, width * sizeof(float4), width, height);
}
// cudaNV12SetupColorspace
cudaError_t cudaNV12SetupColorspace( float hue )
{
const float hueSin = sin(hue);
const float hueCos = cos(hue);
float hueCSC[9];
const bool itu601 = false;
if( itu601 /*CSC == ITU601*/)
{
//CCIR 601
hueCSC[0] = 1.1644f;
hueCSC[1] = hueSin * 1.5960f;
hueCSC[2] = hueCos * 1.5960f;
hueCSC[3] = 1.1644f;
hueCSC[4] = (hueCos * -0.3918f) - (hueSin * 0.8130f);
hueCSC[5] = (hueSin * 0.3918f) - (hueCos * 0.8130f);
hueCSC[6] = 1.1644f;
hueCSC[7] = hueCos * 2.0172f;
hueCSC[8] = hueSin * -2.0172f;
}
else /*if(CSC == ITU709)*/
{
//CCIR 709
hueCSC[0] = 1.0f;
hueCSC[1] = hueSin * 1.57480f;
hueCSC[2] = hueCos * 1.57480f;
hueCSC[3] = 1.0;
hueCSC[4] = (hueCos * -0.18732f) - (hueSin * 0.46812f);
hueCSC[5] = (hueSin * 0.18732f) - (hueCos * 0.46812f);
hueCSC[6] = 1.0f;
hueCSC[7] = hueCos * 1.85560f;
hueCSC[8] = hueSin * -1.85560f;
}
if( CUDA_FAILED(cudaMemcpyToSymbol(constHueColorSpaceMat, hueCSC, sizeof(float) * 9)) )
return cudaErrorInvalidSymbol;
uint32_t cudaAlpha = ((uint32_t)0xff<< 24);
if( CUDA_FAILED(cudaMemcpyToSymbol(constAlpha, &cudaAlpha, sizeof(uint32_t))) )
return cudaErrorInvalidSymbol;
nv12ColorspaceSetup = true;
return cudaSuccess;
}
|
ad7ee65780d13bb0b556cf5d9391fc9ed82c2407.hip | // !!! This is a file automatically generated by hipify!!!
// Includes, system
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <hip/hip_vector_types.h>
// includes, kernels
#include <common.cu>
#include <mummergpu.h>
#include <mummergpu_kernel.cu>
#include <gloop/gloop.h>
#include <gloop/statistics.h>
int USE_PRINT_KERNEL = 1;
#define BREATHING_ROOM (16 * 1024 * 1024)
#define BASES_PER_TREE_PAGE 8388608
//#define BASES_PER_TREE_PAGE 7000000
#define BLOCKSIZE 256
unsigned int cuda_calls = 0;
void trap_dbg()
{
fprintf(stderr, "Trapped\n");
}
#define CUDA_SAFE_CALL(call) \
do { \
cuda_calls++; \
hipError_t err = call; \
if (hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \
__FILE__, __LINE__, err, hipGetErrorString(err)); \
trap_dbg(); \
exit(EXIT_FAILURE); \
} \
} while (0)
#define CU_SAFE_CALL_NO_SYNC(call) \
do { \
hipError_t err = call; \
if (hipSuccess != err) { \
fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \
err, __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
} \
} while (0)
#define CUT_DEVICE_INIT_DRV(cuDevice) \
do { \
cuDevice = 0; \
int deviceCount = 0; \
hipError_t err = hipInit(0); \
if (hipSuccess == err) \
CU_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); \
if (deviceCount == 0) { \
fprintf(stderr, "There is no device.\n"); \
exit(EXIT_FAILURE); \
} \
int dev; \
for (dev = 0; dev < deviceCount; ++dev) { \
int major, minor; \
CU_SAFE_CALL_NO_SYNC(hipDeviceComputeCapability(&major, &minor, dev)); \
if (major >= 1) \
break; \
} \
if (dev == deviceCount) { \
fprintf(stderr, "There is no device supporting CUDA.\n"); \
exit(EXIT_FAILURE); \
} \
else \
CU_SAFE_CALL_NO_SYNC(hipDeviceGet(&cuDevice, dev)); \
} while (0)
unsigned int num_bind_tex_calls = 0;
#define BIND_TEX(offset, tex, arr, desc, len) \
do { \
CUDA_SAFE_CALL(hipBindTexture(offset, tex, arr, desc, len)); \
++num_bind_tex_calls; \
} while (0)
#define BIND_TEX_ARRAY(tex, arr, desc) \
do { \
CUDA_SAFE_CALL(hipBindTextureToArray(tex, arr, desc)); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC(ptr, size) \
do { \
hipMalloc(ptr, size); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) \
do { \
hipMallocPitch(ptr, out_pitch, rowsize, numrows); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) \
do { \
hipMallocArray(ptr, desc, pitch, rows); \
++num_bind_tex_calls; \
} while (0)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
extern "C" void computeGold(MatchResults* results,
char* refstr,
char* queries,
int* queryAddrs,
int* queryLengths,
PixelOfNode* nodeTexture,
PixelOfChildren* childrenTexture,
int numQueries,
int mismatch_length,
int rc);
extern "C" void getReferenceString(const char* filename, char** refstr, size_t* reflen);
extern "C" void createTreeTexture(const char* filename,
PixelOfNode** nodeTexture,
PixelOfChildren** childrenTexture,
unsigned int* width,
unsigned int* node_height,
unsigned int* children_height,
AuxiliaryNodeData** aux_data,
int* num_match_coords,
int min_match_len,
Statistics* statistics,
const char* dotfilename,
const char* texfilename);
extern "C" void getQueriesTexture(int qfile,
char** queryTexture,
size_t* queryLength,
int** queryAddrs,
char*** queryNames,
int** queryLengths,
unsigned int* numQueries,
unsigned int* num_match_coords,
unsigned int device_memory_avail,
int min_match_length,
bool rc);
extern "C" int lookupNumLeaves(ReferencePage* page, TextureAddress addr);
void printAlignments(ReferencePage* page,
Alignment* alignments,
char* query,
int qrylen,
TextureAddress nodeid,
int qrypos,
int edge_depth,
int min_match,
bool rc,
bool forwardcoordinates);
int countLeafNodes(int nodeid);
extern "C" void mapQueriesEndToEnd(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* h_alignments,
unsigned int numAligments);
char* createTimer()
{
unsigned int* ptr = (unsigned int*)malloc(sizeof(struct Timer_t));
memset(ptr, 0, sizeof(struct Timer_t));
return (char*)ptr;
}
void startTimer(char* ptr)
{
gettimeofday(&(((struct Timer_t*)ptr)->start_m), NULL);
}
void stopTimer(char* ptr)
{
gettimeofday(&(((struct Timer_t*)ptr)->end_m), NULL);
}
float getTimerValue(char* ptr)
{
Timer_t* timer = (Timer_t*)ptr;
if (timer == NULL) {
fprintf(stderr, "Uninitialized timer!!!\n");
return 0.0;
}
if (timer->end_m.tv_sec == 0) {
stopTimer(ptr);
}
return (float)(1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec)
+ (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec)));
}
void deleteTimer(char* ptr)
{
free((Timer_t*)ptr);
}
extern "C" int createReference(const char* fromFile, Reference* ref)
{
if (!fromFile || !ref)
return -1;
char* loadreftimer = createTimer();
startTimer(loadreftimer);
getReferenceString(fromFile, &(ref->str), &(ref->len));
stopTimer(loadreftimer);
ref->t_load_from_disk += getTimerValue(loadreftimer);
deleteTimer(loadreftimer);
return 0;
}
extern "C" int destroyReference(Reference* ref)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
free(ref->h_node_tex_array);
free(ref->h_children_tex_array);
free(ref->str);
#if REORDER_REF
free(ref->h_ref_array);
#endif
free(ref->aux_data);
#if TREE_ACCESS_HISTOGRAM
free(ref->h_node_hist);
free(ref->h_child_hist);
#endif
ref->str = NULL;
ref->len = 0;
return 0;
}
extern "C" int createQuerySet(const char* fromFile, QuerySet* queries)
{
fprintf(stderr, "Opening %s...\n", fromFile);
int qfile = open(fromFile, O_RDONLY);
if (qfile == -1) {
fprintf(stderr, "Can't open %s: %d\n", fromFile, errno);
exit(1);
}
queries->qfile = qfile;
return 0;
}
extern "C" int destroyQuerySet(QuerySet* queries)
{
if (queries->qfile)
close(queries->qfile);
return 0;
}
extern "C" void printStringForError(int err)
{
}
extern "C" int createMatchContext(Reference* ref,
QuerySet* queries,
MatchResults* matches,
bool on_cpu,
int min_match_length,
char* stats_file,
bool reverse,
bool forwardreverse,
bool forwardcoordinates,
bool showQueryLength,
char* dotfilename,
char* texfilename,
MatchContext* ctx)
{
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
ctx->queries = queries;
ctx->ref = ref;
ctx->full_ref = ref->str;
ctx->full_ref_len = ref->len;
ctx->on_cpu = on_cpu;
ctx->min_match_length = min_match_length;
ctx->stats_file = stats_file;
ctx->reverse = reverse;
ctx->forwardreverse = forwardreverse;
ctx->forwardcoordinates = forwardcoordinates;
ctx->show_query_length = showQueryLength;
ctx->dotfilename = dotfilename;
ctx->texfilename = texfilename;
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
// gloop initialization.
ctx->hostLoop = gloop::HostLoop::create(0).release();
// FIXME, choose appropriate physical TBs.
ctx->hostContext = gloop::HostContext::create(*ctx->hostLoop, dim3(448)).release();
}
return 0;
}
extern "C" int destroyMatchContext(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
free(ctx->full_ref);
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
delete ctx->hostContext;
delete ctx->hostLoop;
}
//destroyReference(ctx->ref);
destroyQuerySet(ctx->queries);
return 0;
}
void buildReferenceTexture(Reference* ref,
char* full_ref,
size_t begin,
size_t end,
int min_match_len,
char* dotfilename,
char* texfilename,
Statistics* statistics)
{
fprintf(stderr, "Building reference texture...\n");
PixelOfNode* nodeTexture = NULL;
PixelOfChildren* childrenTexture = NULL;
unsigned int width = 0;
unsigned int node_height = 0;
unsigned int children_height = 0;
AuxiliaryNodeData* aux_data = NULL;
int num_nodes;
char* loadreftimer = createTimer();
startTimer(loadreftimer);
ref->len = end - begin + 3;
ref->str = (char*)malloc(ref->len);
ref->str[0] = 's';
strncpy(ref->str + 1, full_ref + begin, ref->len - 3);
strcpy(ref->str + ref->len - 2, "$");
stopTimer(loadreftimer);
statistics->t_ref_from_disk += getTimerValue(loadreftimer) + ref->t_load_from_disk;
deleteTimer(loadreftimer);
createTreeTexture(ref->str,
&nodeTexture,
&childrenTexture,
&width,
&node_height,
&children_height,
&aux_data,
&num_nodes,
min_match_len,
statistics,
dotfilename,
texfilename);
ref->h_node_tex_array = nodeTexture;
ref->h_children_tex_array = childrenTexture;
ref->tex_width = width;
ref->tex_node_height = node_height;
ref->tex_children_height = children_height;
#if TREE_ACCESS_HISTOGRAM
ref->h_node_hist = (int*)calloc(width * node_height, sizeof(int));
ref->h_child_hist = (int*)calloc(width * children_height, sizeof(int));
#endif
ref->aux_data = aux_data;
ref->num_nodes = num_nodes;
ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) + (width * children_height * sizeof(PixelOfChildren));
fprintf(stderr, "This tree will need %d bytes on the board\n", ref->bytes_on_board);
#if REORDER_REF
char* reordertimer = createTimer();
startTimer(reordertimer);
unsigned int refpitch = ref->pitch = 65536;
int numrows = ceil(ref->len / ((float)refpitch));
int blocksize = 4;
numrows += blocksize;
int refstrsize = numrows * refpitch;
ref->h_ref_array = (char*)malloc(refstrsize);
ref->bytes_on_board += refstrsize;
fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize);
int z_max = numrows * refpitch;
for (int z = 0; z < z_max; z++) {
ref->h_ref_array[z] = 'Z';
}
int x, y;
int maxx = 0, maxy = 0;
size_t reflen = ref->len;
char* refstr = ref->str;
int block_dim = refpitch * blocksize;
for (int i = 0; i < reflen; i++) {
int bigx = i % (block_dim); // ref string reorder
int bigy = i / (block_dim);
y = bigy * blocksize + bigx % blocksize;
x = bigx / blocksize;
// printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]);
assert(x < refpitch);
assert(y < numrows);
ref->h_ref_array[y * refpitch + x] = refstr[i];
if (x > maxx) {
maxx = x;
}
if (y > maxy) {
maxy = y;
}
}
if ((maxx >= refpitch) || (maxy >= numrows)) {
fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n",
maxx, refpitch, maxy, numrows);
exit(1);
}
stopTimer(reordertimer);
if (statistics)
statistics->t_reorder_ref_str += getTimerValue(reordertimer);
deleteTimer(reordertimer);
#else
fprintf(stderr, "The refstr requires %d bytes\n", ref->len);
ref->bytes_on_board += ref->len;
#endif
}
void boardMemory(size_t* free_mem, size_t* total_mem)
{
// The emulator doesn't allow calls to cuMemGetInfo
#ifdef __DEVICE_EMULATION__
*free_mem = 512 * 1024 * 1024;
*total_mem = 768 * 1024 * 1024;
#else
CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem));
#endif
}
void loadReferenceTexture(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
Reference* ref = ctx->ref;
int numrows = ceil(ref->len / ((float)ref->pitch));
int blocksize = 4;
numrows += blocksize;
hipChannelFormatDesc refTextureDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindSigned);
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
#if REFTEX
#if REORDER_REF
CUDA_MALLOC_ARRAY((hipArray**)(&ref->d_ref_array),
&refTextureDesc,
ref->pitch,
numrows);
CUDA_SAFE_CALL(hipMemcpyToArray((hipArray*)(ref->d_ref_array),
0,
0,
ref->h_ref_array,
numrows * ref->pitch,
hipMemcpyHostToDevice));
reftex.addressMode[0] = hipAddressModeClamp;
reftex.addressMode[1] = hipAddressModeClamp;
reftex.filterMode = hipFilterModePoint;
reftex.normalized = false;
BIND_TEX_ARRAY(reftex, (hipArray*)ref->d_ref_array, refTextureDesc);
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC((void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL(hipMemcpy((void*)(ref->d_ref_array),
ref->str,
ref->len,
hipMemcpyHostToDevice));
reftex.addressMode[0] = hipAddressModeClamp;
reftex.filterMode = hipFilterModePoint;
reftex.normalized = false; // access with normalized texture coordinates
hipChannelFormatDesc refDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned);
BIND_TEX(0, reftex, (void*)(ref->d_ref_array), refDesc, ref->len);
ctx->ref->bytes_on_board += ref->len;
#endif
#else
#if REORDER_REF
size_t refpitch;
CUDA_MALLOC_PITCH((void**)(&ref->d_ref_array),
&refpitch,
ref->pitch * sizeof(char),
numrows);
CUDA_SAFE_CALL(hipMemcpy2D((ref->d_ref_array),
refpitch,
ref->h_ref_array,
ref->pitch,
ref->pitch * sizeof(char),
numrows,
hipMemcpyHostToDevice));
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC((void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL(hipMemcpy((void*)(ref->d_ref_array),
ref->str,
ref->len,
hipMemcpyHostToDevice));
ctx->ref->bytes_on_board += ref->len;
#endif
#endif
stopTimer(toboardtimer);
ctx->statistics.t_ref_str_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ref->d_ref_array = NULL;
}
}
void unloadReferenceString(Reference* ref)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
#if REFTEX
CUDA_SAFE_CALL(hipUnbindTexture(reftex));
#endif
#if REORDER_REF && REFTEX
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_ref_array)));
#else
CUDA_SAFE_CALL(hipFree((ref->d_ref_array)));
#endif
ref->d_ref_array = NULL;
}
void unloadReferenceTree(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
#if REORDER_TREE
// Unload nodetex
#if NODETEX
CUDA_SAFE_CALL(hipUnbindTexture(nodetex));
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_node_tex_array)));
#else
CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array));
#endif
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array) {
#if CHILDTEX
CUDA_SAFE_CALL(hipUnbindTexture(childrentex));
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_children_tex_array)));
#else
CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array));
#endif
}
ref->d_children_tex_array = NULL;
#else
#if NODETEX
CUDA_SAFE_CALL(hipUnbindTexture(nodetex));
#endif
CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array));
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array) {
#if CHILDTEX
CUDA_SAFE_CALL(hipUnbindTexture(childrentex));
#endif
CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array));
ref->d_children_tex_array = NULL;
}
#endif
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(hipFree(ref->d_node_hist));
ref->d_node_hist = NULL;
CUDA_SAFE_CALL(hipFree(ref->d_child_hist));
ref->d_child_hist = NULL;
#endif
}
//loads a tree and text for [begin, end) in the reference
void loadReference(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
ref->bytes_on_board = 0;
loadReferenceTexture(ctx);
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
// node texels
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode));
// children texels
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren);
#if REORDER_TREE
#if NODETEX
hipChannelFormatDesc nodeTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY((hipArray**)(&ref->d_node_tex_array),
&nodeTextureDesc,
ref->tex_width,
ref->tex_node_height);
CUDA_SAFE_CALL(hipMemcpyToArray((hipArray*)(ref->d_node_tex_array),
0,
0,
ref->h_node_tex_array,
ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode),
hipMemcpyHostToDevice));
nodetex.addressMode[0] = hipAddressModeClamp;
nodetex.addressMode[1] = hipAddressModeClamp;
nodetex.filterMode = hipFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(nodetex, (hipArray*)ref->d_node_tex_array,
nodeTextureDesc);
#else
size_t nodepitch;
CUDA_MALLOC_PITCH((void**)(&ref->d_node_tex_array),
&nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height);
CUDA_SAFE_CALL(hipMemcpy2D((ref->d_node_tex_array),
nodepitch,
ref->h_node_tex_array,
nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height,
hipMemcpyHostToDevice));
#endif
if (ref->tex_children_height) {
#if CHILDTEX
hipChannelFormatDesc childrenTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY((hipArray**)(&ref->d_children_tex_array),
&childrenTextureDesc,
ref->tex_width,
ref->tex_children_height);
CUDA_SAFE_CALL(hipMemcpyToArray((hipArray*)(ref->d_children_tex_array),
0,
0,
ref->h_children_tex_array,
ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren),
hipMemcpyHostToDevice));
childrentex.addressMode[0] = hipAddressModeClamp;
childrentex.addressMode[1] = hipAddressModeClamp;
childrentex.filterMode = hipFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(childrentex, (hipArray*)(ref->d_children_tex_array),
childrenTextureDesc);
#else
size_t childpitch;
CUDA_MALLOC_PITCH((void**)(&ref->d_children_tex_array),
&childpitch,
ref->tex_width * sizeof(PixelOfChildren),
ref->tex_children_height);
CUDA_SAFE_CALL(hipMemcpy2D((ref->d_children_tex_array),
childpitch,
ref->h_children_tex_array,
childpitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_children_height,
hipMemcpyHostToDevice));
#endif
}
#if TREE_ACCESS_HISTOGRAM
// node hist
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * sizeof(int);
CUDA_MALLOC((void**)(&ref->d_node_hist),
ref->tex_width * ref->tex_node_height * sizeof(int));
CUDA_SAFE_CALL(hipMemset((ref->d_node_hist), 0,
ref->tex_width * ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height) {
// children hist
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(int);
fprintf(stderr, "after child_hist ref->bytes_on_board:%ld\n", ref->bytes_on_board);
CUDA_MALLOC((void**)(&ref->d_child_hist),
ref->tex_width * ref->tex_children_height * sizeof(int));
CUDA_SAFE_CALL(hipMemset((ref->d_child_hist), 0,
ref->tex_width * ref->tex_children_height * sizeof(int)));
}
#endif
#else // NO TREE REORDERING
// Node tex, 1-dimensional
CUDA_MALLOC((void**)(&ref->d_node_tex_array),
ref->tex_node_height * sizeof(PixelOfNode));
CUDA_SAFE_CALL(hipMemcpy((ref->d_node_tex_array),
ref->h_node_tex_array,
ref->tex_node_height * sizeof(PixelOfNode),
hipMemcpyHostToDevice));
#if NODETEX
hipChannelFormatDesc nodeTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
nodetex.addressMode[0] = hipAddressModeClamp;
nodetex.filterMode = hipFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, nodetex, (void*)(ref->d_node_tex_array), nodeTextureDesc,
ref->tex_node_height * sizeof(PixelOfNode));
#endif
if (ref->tex_children_height) {
// Child tex, 1-dimensional
CUDA_MALLOC((void**)(&ref->d_children_tex_array),
ref->tex_children_height * sizeof(PixelOfChildren));
CUDA_SAFE_CALL(hipMemcpy((ref->d_children_tex_array),
ref->h_children_tex_array,
ref->tex_children_height * sizeof(PixelOfChildren),
hipMemcpyHostToDevice));
#if CHILDTEX
hipChannelFormatDesc childTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
childrentex.addressMode[0] = hipAddressModeClamp;
childrentex.filterMode = hipFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, childrentex, (void*)(ref->d_children_tex_array),
childTextureDesc, ref->tex_children_height * sizeof(PixelOfChildren));
#endif
}
#if TREE_ACCESS_HISTOGRAM
ref->bytes_on_board += ref->tex_node_height * sizeof(int);
CUDA_MALLOC((void**)(&ref->d_node_hist),
ref->tex_node_height * sizeof(int));
CUDA_SAFE_CALL(hipMemset((ref->d_node_hist), 0,
ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height) {
ref->bytes_on_board += ref->tex_children_height * sizeof(int);
CUDA_MALLOC((void**)(&ref->d_child_hist),
ref->tex_children_height * sizeof(int));
CUDA_SAFE_CALL(hipMemset((ref->d_child_hist), 0,
ref->tex_children_height * sizeof(int)));
}
#endif
#endif
#if TWO_LEVEL_NODE_TREE
PixelOfNode node_buf[NODE_THRESH];
memset(node_buf, 0, sizeof(node_buf));
for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes); ++i) {
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif MERGETEX
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x * 2];
#else
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL(hipMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf)));
#endif
#if TWO_LEVEL_CHILD_TREE
PixelOfChildren child_buf[CHILD_THRESH];
memset(child_buf, 0, sizeof(child_buf));
for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes); ++i) {
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
child_buf[i] = ((PixelOfChildren*)(ref->h_node_tex_array))[loc + 1];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
child_buf[i] = ((PixelOfChildren*)(ref->h_children))[loc];
#elif MERGETEX
child_buf[i] = ((PixelOfChildren*)(ref->h_node_tex_array))[myaddress.x * 2 + 1];
#else
child_buf[i] = ((PixelOfChildren*)(ref->h_children_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL(hipMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf)));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_tree_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "done\n");
}
else {
ref->d_node_tex_array = NULL;
ref->d_children_tex_array = NULL;
}
}
void dumpQueryBlockInfo(QuerySet* queries)
{
fprintf(stderr, "\tProcessing queries %s to %s\n",
queries->h_names[0],
queries->h_names[queries->count - 1]);
}
void loadQueries(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
QuerySet* queries = ctx->queries;
queries->bytes_on_board = 0;
unsigned int numQueries = queries->count;
if (!ctx->on_cpu) {
fprintf(stderr, "Allocating device memory for queries... ");
char* toboardtimer = createTimer();
startTimer(toboardtimer);
dumpQueryBlockInfo(queries);
CUDA_MALLOC((void**)&queries->d_tex_array, queries->texlen);
queries->bytes_on_board += queries->texlen;
CUDA_SAFE_CALL(hipMemcpy((void*)queries->d_tex_array,
queries->h_tex_array + queries->h_addrs_tex_array[0],
queries->texlen,
hipMemcpyHostToDevice));
#if QRYTEX
qrytex.addressMode[0] = hipAddressModeClamp;
qrytex.filterMode = hipFilterModePoint;
qrytex.normalized = false; // access with normalized texture coordinates
hipChannelFormatDesc qryDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned);
BIND_TEX(0, qrytex, (void*)(queries->d_tex_array), qryDesc,
queries->texlen);
#endif
CUDA_MALLOC((void**)&queries->d_addrs_tex_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(hipMemcpy((void*)queries->d_addrs_tex_array,
queries->h_addrs_tex_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
CUDA_MALLOC((void**)&queries->d_lengths_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(hipMemcpy((void*)queries->d_lengths_array,
queries->h_lengths_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
stopTimer(toboardtimer);
ctx->statistics.t_queries_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board);
}
else {
queries->d_addrs_tex_array = NULL;
queries->d_tex_array = NULL;
queries->d_lengths_array = NULL;
fprintf(stderr, " allocated %ld bytes\n", 2 * numQueries * sizeof(int) + queries->texlen);
}
}
void unloadQueries(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
QuerySet* queries = ctx->queries;
CUDA_SAFE_CALL(hipFree(queries->d_tex_array));
queries->d_tex_array = NULL;
CUDA_SAFE_CALL(hipFree(queries->d_addrs_tex_array));
queries->d_addrs_tex_array = NULL;
CUDA_SAFE_CALL(hipFree(queries->d_lengths_array));
queries->d_lengths_array = NULL;
queries->bytes_on_board = 0;
}
// Computes the location of the first MatchCoord for a given query. NOTE:
// Do NOT use this function if COALESCED_QUERIES == 1
inline int match_coord_addrs(int qryid, int qry_addrs, int match_length)
{
return qry_addrs - qryid * (match_length + 1);
}
// Construct the offset table for a set of queries. This table will be used
// by the printing functions, and if COALESCED_QUERIES == 1, by the matching
// kernel.
void buildCoordOffsetArray(MatchContext* ctx,
int** h_coord_offset_array,
unsigned int* num_coords)
{
int numCoords = 0;
int match_length = ctx->min_match_length;
int numQueries = ctx->queries->count;
int* lengths = ctx->queries->h_lengths_array;
int* coord_offsets = (int*)calloc(numQueries, sizeof(int));
#if COALESCED_QUERIES
for (unsigned int i = 0; i < numQueries; i += WARP_SIZE) {
// Every query in this warp will need at least this many coords
int max_num_coords = 0;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) {
int num_coords = lengths[i + j] - match_length + 1;
if (max_num_coords < num_coords)
max_num_coords = num_coords;
}
unsigned int block_size = max_num_coords * WARP_SIZE;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) {
ctx->results.h_coord_tex_array[i + j] = numCoords + j;
}
numCoords += block_size;
}
#else
for (unsigned int i = 0; i < numQueries; ++i) {
int qryoffset = ctx->queries->h_addrs_tex_array[i];
coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length);
}
if (numQueries > 0) {
unsigned int last_qry = numQueries - 1;
unsigned int last_qry_len = lengths[last_qry] - match_length + 1;
numCoords = coord_offsets[last_qry] + last_qry_len;
fprintf(stderr, "Need %d match coords for this result array\n",
numCoords);
}
#endif
*num_coords = numCoords;
*h_coord_offset_array = coord_offsets;
}
void loadResultBuffer(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
unsigned int numQueries = ctx->queries->count;
assert(numQueries);
char* offsettimer = createTimer();
startTimer(offsettimer);
buildCoordOffsetArray(ctx,
&(ctx->results.h_coord_tex_array),
&(ctx->results.numCoords));
stopTimer(offsettimer);
ctx->statistics.t_build_coord_offsets += getTimerValue(offsettimer);
deleteTimer(offsettimer);
unsigned int numCoords = ctx->results.numCoords;
fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...",
numQueries, numCoords * sizeof(MatchCoord));
size_t boardFreeMemory = 0;
size_t total_mem = 0;
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
ctx->results.h_match_coords = (MatchCoord*)calloc(numCoords, sizeof(MatchCoord));
if (ctx->results.h_match_coords == NULL) {
trap_dbg();
exit(EXIT_FAILURE);
}
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
ctx->results.bytes_on_board = 0;
CUDA_MALLOC((void**)&ctx->results.d_match_coords,
numCoords * sizeof(MatchCoord));
ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord);
CUDA_SAFE_CALL(hipMemset((void*)ctx->results.d_match_coords, 0,
numCoords * sizeof(MatchCoord)));
#if COALESCED_QUERIES
CUDA_MALLOC((void**)&ctx->results.d_coord_tex_array,
numQueries * sizeof(int));
ctx->results.bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(hipMemcpy((void*)ctx->results.d_coord_tex_array,
ctx->results.h_coord_tex_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_match_coords_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ctx->results.d_match_coords = NULL;
}
fprintf(stderr, "done\n");
}
void unloadResultBuffer(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords));
ctx->results.d_match_coords = NULL;
ctx->results.bytes_on_board = 0;
#if COALESCED_QUERIES
CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords));
#endif
}
void transferResultsFromDevice(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
if (!ctx->on_cpu) {
char* fromboardtimer = createTimer();
startTimer(fromboardtimer);
CUDA_SAFE_CALL(hipMemcpy(ctx->results.h_match_coords,
ctx->results.d_match_coords,
ctx->results.numCoords * sizeof(MatchCoord),
hipMemcpyDeviceToHost));
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_node_hist,
ctx->ref->d_node_hist,
ctx->ref->tex_node_height * ctx->ref->tex_width * sizeof(int),
hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_child_hist,
ctx->ref->d_child_hist,
ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int),
hipMemcpyDeviceToHost));
if (ctx->statistics.node_hist_size < ctx->ref->tex_width * ctx->ref->tex_node_height) {
int* temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int));
if (ctx->statistics.node_hist_size)
memcpy(temp, ctx->statistics.node_hist, ctx->statistics.node_hist_size * sizeof(int));
ctx->statistics.node_hist = temp;
ctx->statistics.node_hist_size = ctx->ref->tex_width * ctx->ref->tex_node_height;
}
if (ctx->statistics.child_hist_size < ctx->ref->tex_width * ctx->ref->tex_children_height) {
temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_children_height, sizeof(int));
if (ctx->statistics.hist_size)
memcpy(temp, ctx->statistics.child_hist, ctx->statistics.hist_size * sizeof(int));
ctx->statistics.child_hist = temp;
ctx->statistics.child_hist_size = ctx->ref->tex_width * ctx->ref->tex_children_height;
}
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) {
ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i];
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) {
ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i];
}
#endif
stopTimer(fromboardtimer);
ctx->statistics.t_match_coords_from_board += getTimerValue(fromboardtimer);
deleteTimer(fromboardtimer);
}
}
int flushOutput();
int addToBuffer(char* string);
char numbuffer[32];
MatchCoord* coordForQueryChar(MatchContext* ctx,
unsigned int qryid,
unsigned int qrychar)
{
MatchResults* results = &(ctx->results);
MatchCoord* coords = results->h_match_coords;
#if COALESCED_QUERIES
return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE;
#else
return coords + results->h_coord_tex_array[qryid] + qrychar;
#endif
}
void coordsToPrintBuffers(MatchContext* ctx,
ReferencePage* page,
MatchInfo** matches,
Alignment** alignments,
unsigned int mem_avail,
unsigned int* coord_idx,
unsigned int* match_idx,
unsigned int* align_idx,
unsigned int* nextqry,
unsigned int* nextqrychar)
{
unsigned int numQueries = ctx->queries->count;
int match_length = ctx->min_match_length;
unsigned int cidx = *coord_idx;
unsigned int midx = 0;
unsigned int numCoords = ctx->results.numCoords;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
int DEBUG = 0;
if (DEBUG && cidx == 0) {
for (int j = 0; j < numCoords; ++j) {
MatchCoord* coord = ctx->results.h_match_coords + j;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
//fprintf(stdout, "node: %d\n",
// coord->node);
fprintf(stdout, "node: %d leaves:%d\n",
coord->node.data, lookupNumLeaves(page, coord->node));
}
}
exit(0);
}
// How much can we fit into mem_avail?
for (int j = cidx; j < numCoords; ++j) {
MatchCoord* coord = ctx->results.h_match_coords + j;
int queryAlignments = 0;
int queryMatches = 0;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
int numLeaves = lookupNumLeaves(page, coord->node);
queryAlignments += numLeaves;
queryMatches++;
}
int allMatches = numMatches + queryMatches;
int allAlignments = numAlignments + queryAlignments;
int neededSize = allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment);
if (neededSize > mem_avail || (allMatches / BLOCKSIZE) >= MAX_GRID_DIMENSION) {
// adding this match won't fit on the board
break;
}
++cidx;
numMatches = allMatches;
numAlignments = allAlignments;
}
MatchInfo* M = (MatchInfo*)calloc(numMatches, sizeof(MatchInfo));
unsigned int alignmentOffset = 0;
int qry = *nextqry;
int qrychar = *nextqrychar;
bool set_full = false;
while (qry < numQueries) {
// h_lengths_array doesn't count the 'q' at the beginning of each query
int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length;
while (qrychar < qlen) {
if (midx >= numMatches) {
set_full = true;
break;
}
MatchCoord* coord = coordForQueryChar(ctx, qry, qrychar);
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
MatchInfo m;
m.resultsoffset = alignmentOffset;
m.qrystartpos = qrychar;
m.matchnode = coord->node;
m.edgematch = coord->edge_match_length;
m.numLeaves = lookupNumLeaves(page, m.matchnode);
m.queryid = qry;
alignmentOffset += m.numLeaves;
M[midx++] = m;
}
++qrychar;
}
if (set_full)
break;
++qry;
qrychar = 0;
}
*coord_idx = cidx;
*match_idx = midx;
*align_idx = alignmentOffset;
*matches = M;
*nextqry = qry;
*nextqrychar = qrychar;
fprintf(stderr, "Allocing %d bytes of host memory for %d alignments\n", alignmentOffset * sizeof(Alignment), numAlignments);
*alignments = (struct Alignment*)calloc(alignmentOffset, sizeof(Alignment));
//hipHostMalloc((void**)alignments, numAlignments * sizeof(Alignment));
}
void runPrintKernel(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
MatchInfo* d_matches;
size_t matchesSize = numMatches * sizeof(MatchInfo);
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
CUDA_MALLOC((void**)&d_matches, matchesSize);
}
struct Alignment* d_alignments;
size_t alignmentSize = numAlignments * sizeof(Alignment);
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
CUDA_MALLOC((void**)&d_alignments, alignmentSize);
CUDA_SAFE_CALL(hipMemset((void*)d_alignments, 0, alignmentSize));
}
char* atimer = createTimer();
startTimer(atimer);
// Copy matches to card
fprintf(stderr, "prepared %d matches %d alignments\n", numMatches, numAlignments);
fprintf(stderr, "Copying %d bytes to host memory for %d alignments\n", numAlignments * sizeof(Alignment), numAlignments);
int DEBUG = 0;
if (DEBUG) {
for (int i = 0; i < numMatches; i++) {
printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n",
i,
h_matches[i].resultsoffset,
h_matches[i].queryid,
h_matches[i].matchnode.data,
h_matches[i].numLeaves,
h_matches[i].edgematch,
h_matches[i].qrystartpos);
}
exit(0);
}
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
CUDA_SAFE_CALL(hipMemcpy(d_matches, h_matches, matchesSize, hipMemcpyHostToDevice));
}
stopTimer(atimer);
float mtime = getTimerValue(atimer);
// Launch the kernel
int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1);
fprintf(stderr, " Calling print kernel... ");
fprintf(stderr, "printKernel threads:(%d),blocks(%d)\n", dimBlock.x, dimGrid.x);
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
ctx->hostLoop->launch(*ctx->hostContext, dim3(60), dimGrid, dimBlock, [] __device__(
gloop::DeviceLoop<> * loop,
MatchInfo * matches,
int totalMatches,
Alignment* alignments,
char* queries,
const int* queryAddrs,
const int* queryLengths,
const int page_begin,
const int page_end,
const int page_shadow_left,
const int page_shadow_right,
const int min_match_length) {
printKernel(loop, matches, totalMatches, alignments, queries, queryAddrs, queryLengths, page_begin, page_end, page_shadow_left, page_shadow_right, min_match_length);
}, d_matches, numMatches, d_alignments, ctx->queries->d_tex_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, page->begin, page->end, page->shadow_left, page->shadow_right, ctx->min_match_length);
// hipDeviceSynchronize();
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
}
startTimer(atimer);
// Copy the results back to the host
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
CUDA_SAFE_CALL(hipMemcpy((void*)alignments,
(void*)d_alignments,
alignmentSize,
hipMemcpyDeviceToHost));
}
// hipDeviceSynchronize();
stopTimer(atimer);
float atime = getTimerValue(atimer);
fprintf(stderr, "memcpy time= %f\n", atime + mtime);
deleteTimer(atimer);
// Cleanup
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
CUDA_SAFE_CALL(hipFree(d_alignments));
CUDA_SAFE_CALL(hipFree(d_matches));
}
}
// TODO: need reverse-complement printing support
void runPrintOnCPU(MatchContext* ctx, ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
unsigned int min_match_length = ctx->min_match_length;
int* addrs = ctx->queries->h_addrs_tex_array;
int* lengths = ctx->queries->h_lengths_array;
char* qrychars = ctx->queries->h_tex_array;
if (!numMatches)
return;
int qry = -1;
unsigned int qrylen;
for (int i = 0; i < numMatches; ++i) {
MatchInfo& match = h_matches[i];
if (match.queryid != qry) {
qry = match.queryid;
qrylen = lengths[qry];
}
if (!(match.edgematch & FRMASK)) {
printAlignments(page,
alignments + match.resultsoffset,
#if COALESCED_QUERIES
qrychars + sizeof(int) * addrs[qry],
#else
qrychars + addrs[qry],
#endif
qrylen,
match.matchnode,
match.qrystartpos,
match.edgematch,
min_match_length,
0,
ctx->forwardcoordinates);
}
}
}
int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen);
void getExactAlignments(MatchContext* ctx, ReferencePage* page, bool on_cpu)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
assert(!ctx->reverse && !ctx->forwardreverse);
size_t boardFreeMemory;
size_t total_mem;
if (!on_cpu) {
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
}
else {
boardFreeMemory = 256 * 1024 * 1024;
total_mem = boardFreeMemory;
}
#ifdef __DEVICE_EMULATION__
boardFreeMemory = 512 * 1024 * 1024;
#endif
boardFreeMemory -= BREATHING_ROOM;
fprintf(stderr, "board free memory: %u\n", boardFreeMemory);
int rTotalMatches = 0;
int rTotalAlignments = 0;
int totalRounds = 0;
unsigned int last_coord = ctx->results.numCoords;
unsigned int next_coord = 0;
unsigned int nextqry = 0;
unsigned int nextqrychar = 0;
int lastqry = -1;
while (next_coord < last_coord) {
// see how many queries will fit on the board
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
totalRounds++;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
MatchInfo* h_matches = NULL;
Alignment* h_alignments = NULL;
int coord_left = next_coord;
char* btimer = createTimer();
startTimer(btimer);
coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments, boardFreeMemory,
&next_coord, &numMatches, &numAlignments, &nextqry, &nextqrychar);
stopTimer(btimer);
float btime = getTimerValue(btimer);
ctx->statistics.t_coords_to_buffers += btime;
fprintf(stderr, "buffer prep time= %f\n", btime);
deleteTimer(btimer);
fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) of %d using %d matches and %d alignments\n",
totalRounds, coord_left, next_coord, last_coord, numMatches, numAlignments);
if (numMatches == 0)
continue;
char buf[256];
//assert(qryend > qrystart);
rTotalAlignments += numAlignments;
rTotalMatches += numMatches;
if (num_bind_tex_calls > 100) {
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
hipDeviceReset();
}
num_bind_tex_calls = 0;
loadReference(ctx);
loadQueries(ctx);
}
char* ktimer = createTimer();
startTimer(ktimer);
if (on_cpu) {
runPrintOnCPU(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
else {
runPrintKernel(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_print_kernel += ktime;
fprintf(stderr, "print kernel time= %f\n", ktime);
deleteTimer(ktimer);
// char* stimer = createTimer();
// startTimer(stimer);
// mapQueriesEndToEnd(ctx,
// page,
// h_matches,
// numMatches,
// h_alignments,
// numAlignments);
//
// stopTimer(stimer);
//
// float stime = getTimerValue(stimer);
// fprintf(stderr, "postprocess time= %f\n", stime);
// deleteTimer(stimer);
//flushOutput();
//Process the alignments
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
char* otimer = createTimer();
startTimer(otimer);
for (int m = 0; m < numMatches; m++) {
int base = h_matches[m].resultsoffset;
for (int i = 0; i < h_matches[m].numLeaves; i++) {
// See if there are any more left maximal alignments for this match
if (h_alignments[base + i].left_in_ref == 0) {
break;
}
if (h_matches[m].queryid != lastqry) {
lastqry = h_matches[m].queryid;
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + lastqry));
addToBuffer("\n");
}
sprintf(buf, "%d\t%d\t%d\n",
h_alignments[base + i].left_in_ref,
h_matches[m].qrystartpos + 1,
h_alignments[base + i].matchlen);
addToBuffer(buf);
// addMatchToBuffer(h_alignments[base+i].left_in_ref,
// h_matches[m].qrystartpos + 1,
// h_alignments[base+i].matchlen);
}
}
flushOutput();
stopTimer(otimer);
ctx->statistics.t_results_to_disk += getTimerValue(otimer);
deleteTimer(otimer);
free(h_matches);
free(h_alignments);
//hipHostFree((void*)h_alignments);
}
}
free(ctx->results.h_coord_tex_array);
free(ctx->results.h_match_coords);
ctx->results.h_coord_tex_array = NULL;
ctx->results.h_match_coords = NULL;
fprintf(stderr, "Finished processing %d matches and %d potential alignments in %d rounds\n",
rTotalMatches, rTotalAlignments, totalRounds);
}
int getQueryBlock(MatchContext* ctx, size_t device_mem_avail)
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
QuerySet* queries = ctx->queries;
char* queryTex = NULL;
int* queryAddrs = NULL;
int* queryLengths = NULL;
unsigned int numQueries;
unsigned int num_match_coords;
size_t queryLen;
char** names;
fprintf(stderr, "Loading query block... ");
char* queryreadtimer = createTimer();
startTimer(queryreadtimer);
getQueriesTexture(queries->qfile,
&queryTex,
&queryLen,
&queryAddrs,
&names,
&queryLengths,
&numQueries,
&num_match_coords,
device_mem_avail,
ctx->min_match_length,
ctx->reverse || ctx->forwardreverse);
stopTimer(queryreadtimer);
ctx->statistics.t_queries_from_disk += getTimerValue(queryreadtimer);
deleteTimer(queryreadtimer);
queries->h_tex_array = queryTex;
queries->count = numQueries;
queries->h_addrs_tex_array = queryAddrs;
queries->texlen = queryLen;
queries->h_names = names;
queries->h_lengths_array = queryLengths;
ctx->results.numCoords = num_match_coords;
fprintf(stderr, "done.\n");
return numQueries;
}
void destroyQueryBlock(QuerySet* queries)
{
free(queries->h_tex_array);
queries->h_tex_array = NULL;
for (int i = 0; i < queries->count; ++i)
free(queries->h_names[i]);
free(queries->h_names);
queries->count = 0;
queries->texlen = 0;
free(queries->h_addrs_tex_array);
queries->h_addrs_tex_array = NULL;
free(queries->h_lengths_array);
queries->h_lengths_array = NULL;
}
void resetStats(Statistics* stats)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
stats->t_end_to_end = 0.0;
stats->t_match_kernel = 0.0;
stats->t_print_kernel = 0.0;
stats->t_queries_to_board = 0.0;
stats->t_match_coords_to_board = 0.0;
stats->t_match_coords_from_board = 0.0;
stats->t_tree_to_board = 0.0;
stats->t_ref_str_to_board = 0.0;
stats->t_queries_from_disk = 0.0;
stats->t_ref_from_disk = 0.0;
stats->t_results_to_disk = 0.0;
stats->t_tree_construction = 0.0;
stats->t_tree_reorder = 0.0;
stats->t_tree_flatten = 0.0;
stats->t_reorder_ref_str = 0.0;
stats->t_build_coord_offsets = 0.0;
stats->t_coords_to_buffers = 0.0;
stats->bp_avg_query_length = 0.0;
#if TREE_ACCESS_HISTOGRAM
if (stats->node_hist_size) {
free(stats->node_hist);
stats->node_hist = NULL;
stats->node_hist_size = 0;
}
if (stats->child_hist_size) {
free(stats->child_hist);
stats->child_hist = NULL;
stats->child_hist_size = 0;
}
#endif
}
void writeStatisticsFile(Statistics* stats,
char* stats_filename,
char* node_hist_filename = NULL,
char* child_hist_filename = NULL)
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
if (stats_filename) {
FILE* f = fopen(stats_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename);
}
else {
fprintf(f, "Q");
fprintf(f, ",R");
fprintf(f, ",T");
fprintf(f, ",m");
fprintf(f, ",r");
fprintf(f, ",t");
fprintf(f, ",n");
fprintf(f, ",Total");
fprintf(f, ",Match kernel");
fprintf(f, ",Print Kernel");
fprintf(f, ",Queries to board");
fprintf(f, ",Match coords to board");
fprintf(f, ",Match coords from board");
fprintf(f, ",Tree to board");
fprintf(f, ",Ref str to board");
fprintf(f, ",Queries from disk");
fprintf(f, ",Ref from disk");
fprintf(f, ",Output to disk");
fprintf(f, ",Tree construction");
fprintf(f, ",Tree reorder");
fprintf(f, ",Tree flatten");
fprintf(f, ",Ref reorder");
fprintf(f, ",Build coord table");
fprintf(f, ",Coords to buffers");
fprintf(f, ",Avg qry length");
fprintf(f, "\n");
fprintf(f, "%d", QRYTEX);
fprintf(f, ",%d", REFTEX);
fprintf(f, ",%d", TREETEX);
fprintf(f, ",%d", MERGETEX);
fprintf(f, ",%d", REORDER_REF);
fprintf(f, ",%d", REORDER_TREE);
fprintf(f, ",%d", RENUMBER_TREE);
fprintf(f, ",%f", stats->t_end_to_end);
fprintf(f, ",%f", stats->t_match_kernel);
fprintf(f, ",%f", stats->t_print_kernel);
fprintf(f, ",%f", stats->t_queries_to_board);
fprintf(f, ",%f", stats->t_match_coords_to_board);
fprintf(f, ",%f", stats->t_match_coords_from_board);
fprintf(f, ",%f", stats->t_tree_to_board);
fprintf(f, ",%f", stats->t_ref_str_to_board);
fprintf(f, ",%f", stats->t_queries_from_disk);
fprintf(f, ",%f", stats->t_ref_from_disk);
fprintf(f, ",%f", stats->t_results_to_disk);
fprintf(f, ",%f", stats->t_tree_construction);
fprintf(f, ",%f", stats->t_tree_reorder);
fprintf(f, ",%f", stats->t_tree_flatten);
fprintf(f, ",%f", stats->t_reorder_ref_str);
fprintf(f, ",%f", stats->t_build_coord_offsets);
fprintf(f, ",%f", stats->t_coords_to_buffers);
fprintf(f, ",%f", stats->bp_avg_query_length);
fprintf(f, "\n");
fclose(f);
}
}
#if TREE_ACCESS_HISTOGRAM
if (node_hist_filename) {
FILE* f = fopen(node_hist_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", node_hist_filename);
}
else {
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]);
}
}
if (child_hist_filename) {
FILE* f = fopen(child_hist_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", child_hist_filename);
}
else {
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]);
}
}
float total_node_hits = 0;
float tree_top_node_hits = 0;
float total_child_hits = 0;
float tree_top_child_hits = 0;
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) {
total_node_hits += ctx->statistics.node_hist[i];
if (i < 256) {
tree_top_node_hits += ctx->statistics.node_hist[i];
}
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) {
total_child_hits += ctx->statistics.child_hist[i];
if (i < 256) {
tree_top_child_hits += ctx->statistics.child_hist[i];
}
}
fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n", (int)tree_top_node_hits, (int)total_node_hits, tree_top_node_hits / total_node_hits);
fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n", (int)tree_top_child_hits, (int)total_child_hits, tree_top_child_hits / total_child_hits);
#endif
}
void matchOnCPU(MatchContext* ctx, bool doRC)
{
//TODO: CPU is matching is disabled.
if (doRC) {
// Match the reverse complement of the queries to the ref
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
REVERSE);
}
else {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
FORWARD);
}
}
void matchOnGPU(MatchContext* ctx, bool doRC)
{
int numQueries = ctx->queries->count;
int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1);
// Match the reverse complement of the queries to the ref
if (doRC) {
//TODO: GPU RC is disabled
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
hipLaunchKernelGGL(( mummergpuRCKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
}
} else {
fprintf(stderr, "mummergpuKernel threads:(%d),blocks(%d)\n", dimBlock.x, dimGrid.x);
ctx->hostLoop->launch(*ctx->hostContext, /* FIXME */ dim3(30), dimGrid, dimBlock, [] __device__(
gloop::DeviceLoop<> * loop,
void* match_coords,
char* queries,
char* ref,
const int* queryAddrs,
const int* queryLengths,
const int numQueries,
const int min_match_len) {
mummergpuKernel(
loop,
match_coords,
queries,
ref,
queryAddrs,
queryLengths,
numQueries,
min_match_len);
}, ctx->results.d_match_coords, ctx->queries->d_tex_array, (char*)ctx->ref->d_ref_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length);
}
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
// check if kernel execution generated an error
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
}
void getMatchResults(MatchContext* ctx,
unsigned int page_num)
{
transferResultsFromDevice(ctx);
}
void matchQueryBlockToReferencePage(MatchContext* ctx,
ReferencePage* page,
bool reverse_complement)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
char* ktimer = createTimer();
fprintf(stderr, "Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n",
ctx->queries->bytes_on_board,
ctx->ref->bytes_on_board,
ctx->results.bytes_on_board);
startTimer(ktimer);
if (ctx->on_cpu) {
matchOnCPU(ctx, reverse_complement);
}
else {
matchOnGPU(ctx, reverse_complement);
// hipDeviceSynchronize();
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_match_kernel += ktime;
fprintf(stderr, "match kernel time= %f\n", ktime);
deleteTimer(ktimer);
getMatchResults(ctx, page->id);
unloadResultBuffer(ctx);
}
int matchSubset(MatchContext* ctx,
ReferencePage* page)
{
loadQueries(ctx);
fprintf(stderr,
"Matching queries %s - %s against ref coords %d - %d\n",
ctx->queries->h_names[0],
ctx->queries->h_names[ctx->queries->count - 1],
page->begin,
page->end);
loadResultBuffer(ctx);
// TODO: renable RC support by calling this twice /w reverse/fwdreverse
// idiom.
matchQueryBlockToReferencePage(ctx, page, false);
if (USE_PRINT_KERNEL && !ctx->on_cpu) {
getExactAlignments(ctx, page, false);
}
else {
getExactAlignments(ctx, page, true);
}
flushOutput();
unloadQueries(ctx);
return 0;
}
int getFreeDeviceMemory(MatchContext* ctx, bool on_cpu)
{
size_t free_mem = 0;
size_t total_mem = 0;
// We have to 'prime' CUDA by making an allocation here. cuMemGetInfo
// will return zeroes until we do a malloc.
int* p = NULL;
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
CUDA_SAFE_CALL(hipMalloc((void**)&p, sizeof(int)));
CUDA_SAFE_CALL(hipFree(p));
}
if (!on_cpu) {
boardMemory(&free_mem, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
free_mem, total_mem);
}
else {
total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX
}
return free_mem;
}
int matchQueriesToReferencePage(MatchContext* ctx, ReferencePage* page)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
fprintf(stderr, "Beginning reference page %p\n", page);
int free_mem = getFreeDeviceMemory(ctx, ctx->on_cpu);
int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM;
ctx->ref = &(page->ref);
loadReference(ctx);
while (getQueryBlock(ctx, available_mem)) {
matchSubset(ctx, page);
ctx->statistics.bp_avg_query_length = ctx->queries->texlen / (float)(ctx->queries->count) - 2;
destroyQueryBlock(ctx->queries);
if (num_bind_tex_calls > 100) {
std::abort();
hipDeviceReset();
num_bind_tex_calls = 0;
loadReference(ctx);
}
}
unloadReferenceString(ctx->ref);
unloadReferenceTree(ctx);
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
lseek(ctx->queries->qfile, 0, SEEK_SET);
}
return 0;
}
void initReferencePages(MatchContext* ctx, int* num_pages, ReferencePage** pages_out)
{
unsigned int bases_in_ref = ctx->full_ref_len - 3;
unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref;
unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size);
fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n",
num_reference_pages, bases_in_ref, page_size);
unsigned int page_overlap = MAX_QUERY_LEN + 1;
ReferencePage* pages = (ReferencePage*)calloc(num_reference_pages,
sizeof(ReferencePage));
pages[0].begin = 1;
pages[0].end = pages[0].begin + page_size + ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning
pages[0].shadow_left = -1;
pages[0].id = 0;
for (int i = 1; i < num_reference_pages - 1; ++i) {
pages[i].begin = pages[i - 1].end - page_overlap;
pages[i].end = pages[i].begin + page_size + page_overlap;
pages[i - 1].shadow_right = pages[i].begin;
pages[i].shadow_left = pages[i - 1].end;
pages[i].id = i;
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
pages[last_page].begin = pages[last_page - 1].end - page_overlap;
pages[last_page].end = ctx->full_ref_len - 1;
pages[last_page - 1].shadow_right = pages[last_page].begin;
pages[last_page].shadow_right = -1;
pages[last_page].shadow_left = pages[last_page - 1].end;
pages[last_page].id = last_page;
}
*pages_out = pages;
*num_pages = num_reference_pages;
}
int streamReferenceAgainstQueries(MatchContext* ctx)
{
int num_reference_pages = 0;
ReferencePage* pages = NULL;
initReferencePages(ctx, &num_reference_pages, &pages);
buildReferenceTexture(&(pages[0].ref),
ctx->full_ref,
pages[0].begin,
pages[0].end,
ctx->min_match_length,
ctx->dotfilename,
ctx->texfilename,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[0]);
destroyReference(&(pages[0].ref));
for (int i = 1; i < num_reference_pages - 1; ++i) {
buildReferenceTexture(&(pages[i].ref),
ctx->full_ref,
pages[i].begin,
pages[i].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[i]);
destroyReference(&(pages[i].ref));
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
buildReferenceTexture(&(pages[last_page].ref),
ctx->full_ref,
pages[last_page].begin,
pages[last_page].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[last_page]);
destroyReference(&(pages[last_page].ref));
}
free(pages);
return 0;
}
extern "C" int matchQueries(MatchContext* ctx)
{
assert(sizeof(struct PixelOfNode) == sizeof(uint4));
assert(sizeof(struct PixelOfChildren) == sizeof(uint4));
#if TREE_ACCESS_HISTOGRAM
ctx->statistics.node_hist_size = 0;
ctx->statistics.child_hist_size = 0;
#endif
resetStats(&(ctx->statistics));
char* ttimer = createTimer();
startTimer(ttimer);
int ret;
fprintf(stderr, "Streaming reference pages against all queries\n");
ret = streamReferenceAgainstQueries(ctx);
stopTimer(ttimer);
ctx->statistics.t_end_to_end += getTimerValue(ttimer);
deleteTimer(ttimer);
writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out", "child_hist.out");
return ret;
}
| ad7ee65780d13bb0b556cf5d9391fc9ed82c2407.cu | // Includes, system
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include <cuda.h>
#include <vector_types.h>
// includes, kernels
#include <common.cu>
#include <mummergpu.h>
#include <mummergpu_kernel.cu>
#include <gloop/gloop.h>
#include <gloop/statistics.h>
int USE_PRINT_KERNEL = 1;
#define BREATHING_ROOM (16 * 1024 * 1024)
#define BASES_PER_TREE_PAGE 8388608
//#define BASES_PER_TREE_PAGE 7000000
#define BLOCKSIZE 256
unsigned int cuda_calls = 0;
void trap_dbg()
{
fprintf(stderr, "Trapped\n");
}
#define CUDA_SAFE_CALL(call) \
do { \
cuda_calls++; \
cudaError err = call; \
if (cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \
__FILE__, __LINE__, err, cudaGetErrorString(err)); \
trap_dbg(); \
exit(EXIT_FAILURE); \
} \
} while (0)
#define CU_SAFE_CALL_NO_SYNC(call) \
do { \
CUresult err = call; \
if (CUDA_SUCCESS != err) { \
fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \
err, __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
} \
} while (0)
#define CUT_DEVICE_INIT_DRV(cuDevice) \
do { \
cuDevice = 0; \
int deviceCount = 0; \
CUresult err = cuInit(0); \
if (CUDA_SUCCESS == err) \
CU_SAFE_CALL_NO_SYNC(cuDeviceGetCount(&deviceCount)); \
if (deviceCount == 0) { \
fprintf(stderr, "There is no device.\n"); \
exit(EXIT_FAILURE); \
} \
int dev; \
for (dev = 0; dev < deviceCount; ++dev) { \
int major, minor; \
CU_SAFE_CALL_NO_SYNC(cuDeviceComputeCapability(&major, &minor, dev)); \
if (major >= 1) \
break; \
} \
if (dev == deviceCount) { \
fprintf(stderr, "There is no device supporting CUDA.\n"); \
exit(EXIT_FAILURE); \
} \
else \
CU_SAFE_CALL_NO_SYNC(cuDeviceGet(&cuDevice, dev)); \
} while (0)
unsigned int num_bind_tex_calls = 0;
#define BIND_TEX(offset, tex, arr, desc, len) \
do { \
CUDA_SAFE_CALL(cudaBindTexture(offset, tex, arr, desc, len)); \
++num_bind_tex_calls; \
} while (0)
#define BIND_TEX_ARRAY(tex, arr, desc) \
do { \
CUDA_SAFE_CALL(cudaBindTextureToArray(tex, arr, desc)); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC(ptr, size) \
do { \
cudaMalloc(ptr, size); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) \
do { \
cudaMallocPitch(ptr, out_pitch, rowsize, numrows); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) \
do { \
cudaMallocArray(ptr, desc, pitch, rows); \
++num_bind_tex_calls; \
} while (0)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
extern "C" void computeGold(MatchResults* results,
char* refstr,
char* queries,
int* queryAddrs,
int* queryLengths,
PixelOfNode* nodeTexture,
PixelOfChildren* childrenTexture,
int numQueries,
int mismatch_length,
int rc);
extern "C" void getReferenceString(const char* filename, char** refstr, size_t* reflen);
extern "C" void createTreeTexture(const char* filename,
PixelOfNode** nodeTexture,
PixelOfChildren** childrenTexture,
unsigned int* width,
unsigned int* node_height,
unsigned int* children_height,
AuxiliaryNodeData** aux_data,
int* num_match_coords,
int min_match_len,
Statistics* statistics,
const char* dotfilename,
const char* texfilename);
extern "C" void getQueriesTexture(int qfile,
char** queryTexture,
size_t* queryLength,
int** queryAddrs,
char*** queryNames,
int** queryLengths,
unsigned int* numQueries,
unsigned int* num_match_coords,
unsigned int device_memory_avail,
int min_match_length,
bool rc);
extern "C" int lookupNumLeaves(ReferencePage* page, TextureAddress addr);
void printAlignments(ReferencePage* page,
Alignment* alignments,
char* query,
int qrylen,
TextureAddress nodeid,
int qrypos,
int edge_depth,
int min_match,
bool rc,
bool forwardcoordinates);
int countLeafNodes(int nodeid);
extern "C" void mapQueriesEndToEnd(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* h_alignments,
unsigned int numAligments);
char* createTimer()
{
unsigned int* ptr = (unsigned int*)malloc(sizeof(struct Timer_t));
memset(ptr, 0, sizeof(struct Timer_t));
return (char*)ptr;
}
void startTimer(char* ptr)
{
gettimeofday(&(((struct Timer_t*)ptr)->start_m), NULL);
}
void stopTimer(char* ptr)
{
gettimeofday(&(((struct Timer_t*)ptr)->end_m), NULL);
}
float getTimerValue(char* ptr)
{
Timer_t* timer = (Timer_t*)ptr;
if (timer == NULL) {
fprintf(stderr, "Uninitialized timer!!!\n");
return 0.0;
}
if (timer->end_m.tv_sec == 0) {
stopTimer(ptr);
}
return (float)(1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec)
+ (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec)));
}
void deleteTimer(char* ptr)
{
free((Timer_t*)ptr);
}
extern "C" int createReference(const char* fromFile, Reference* ref)
{
if (!fromFile || !ref)
return -1;
char* loadreftimer = createTimer();
startTimer(loadreftimer);
getReferenceString(fromFile, &(ref->str), &(ref->len));
stopTimer(loadreftimer);
ref->t_load_from_disk += getTimerValue(loadreftimer);
deleteTimer(loadreftimer);
return 0;
}
extern "C" int destroyReference(Reference* ref)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
free(ref->h_node_tex_array);
free(ref->h_children_tex_array);
free(ref->str);
#if REORDER_REF
free(ref->h_ref_array);
#endif
free(ref->aux_data);
#if TREE_ACCESS_HISTOGRAM
free(ref->h_node_hist);
free(ref->h_child_hist);
#endif
ref->str = NULL;
ref->len = 0;
return 0;
}
extern "C" int createQuerySet(const char* fromFile, QuerySet* queries)
{
fprintf(stderr, "Opening %s...\n", fromFile);
int qfile = open(fromFile, O_RDONLY);
if (qfile == -1) {
fprintf(stderr, "Can't open %s: %d\n", fromFile, errno);
exit(1);
}
queries->qfile = qfile;
return 0;
}
extern "C" int destroyQuerySet(QuerySet* queries)
{
if (queries->qfile)
close(queries->qfile);
return 0;
}
extern "C" void printStringForError(int err)
{
}
extern "C" int createMatchContext(Reference* ref,
QuerySet* queries,
MatchResults* matches,
bool on_cpu,
int min_match_length,
char* stats_file,
bool reverse,
bool forwardreverse,
bool forwardcoordinates,
bool showQueryLength,
char* dotfilename,
char* texfilename,
MatchContext* ctx)
{
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
ctx->queries = queries;
ctx->ref = ref;
ctx->full_ref = ref->str;
ctx->full_ref_len = ref->len;
ctx->on_cpu = on_cpu;
ctx->min_match_length = min_match_length;
ctx->stats_file = stats_file;
ctx->reverse = reverse;
ctx->forwardreverse = forwardreverse;
ctx->forwardcoordinates = forwardcoordinates;
ctx->show_query_length = showQueryLength;
ctx->dotfilename = dotfilename;
ctx->texfilename = texfilename;
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
// gloop initialization.
ctx->hostLoop = gloop::HostLoop::create(0).release();
// FIXME, choose appropriate physical TBs.
ctx->hostContext = gloop::HostContext::create(*ctx->hostLoop, dim3(448)).release();
}
return 0;
}
extern "C" int destroyMatchContext(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
free(ctx->full_ref);
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
delete ctx->hostContext;
delete ctx->hostLoop;
}
//destroyReference(ctx->ref);
destroyQuerySet(ctx->queries);
return 0;
}
void buildReferenceTexture(Reference* ref,
char* full_ref,
size_t begin,
size_t end,
int min_match_len,
char* dotfilename,
char* texfilename,
Statistics* statistics)
{
fprintf(stderr, "Building reference texture...\n");
PixelOfNode* nodeTexture = NULL;
PixelOfChildren* childrenTexture = NULL;
unsigned int width = 0;
unsigned int node_height = 0;
unsigned int children_height = 0;
AuxiliaryNodeData* aux_data = NULL;
int num_nodes;
char* loadreftimer = createTimer();
startTimer(loadreftimer);
ref->len = end - begin + 3;
ref->str = (char*)malloc(ref->len);
ref->str[0] = 's';
strncpy(ref->str + 1, full_ref + begin, ref->len - 3);
strcpy(ref->str + ref->len - 2, "$");
stopTimer(loadreftimer);
statistics->t_ref_from_disk += getTimerValue(loadreftimer) + ref->t_load_from_disk;
deleteTimer(loadreftimer);
createTreeTexture(ref->str,
&nodeTexture,
&childrenTexture,
&width,
&node_height,
&children_height,
&aux_data,
&num_nodes,
min_match_len,
statistics,
dotfilename,
texfilename);
ref->h_node_tex_array = nodeTexture;
ref->h_children_tex_array = childrenTexture;
ref->tex_width = width;
ref->tex_node_height = node_height;
ref->tex_children_height = children_height;
#if TREE_ACCESS_HISTOGRAM
ref->h_node_hist = (int*)calloc(width * node_height, sizeof(int));
ref->h_child_hist = (int*)calloc(width * children_height, sizeof(int));
#endif
ref->aux_data = aux_data;
ref->num_nodes = num_nodes;
ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) + (width * children_height * sizeof(PixelOfChildren));
fprintf(stderr, "This tree will need %d bytes on the board\n", ref->bytes_on_board);
#if REORDER_REF
char* reordertimer = createTimer();
startTimer(reordertimer);
unsigned int refpitch = ref->pitch = 65536;
int numrows = ceil(ref->len / ((float)refpitch));
int blocksize = 4;
numrows += blocksize;
int refstrsize = numrows * refpitch;
ref->h_ref_array = (char*)malloc(refstrsize);
ref->bytes_on_board += refstrsize;
fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize);
int z_max = numrows * refpitch;
for (int z = 0; z < z_max; z++) {
ref->h_ref_array[z] = 'Z';
}
int x, y;
int maxx = 0, maxy = 0;
size_t reflen = ref->len;
char* refstr = ref->str;
int block_dim = refpitch * blocksize;
for (int i = 0; i < reflen; i++) {
int bigx = i % (block_dim); // ref string reorder
int bigy = i / (block_dim);
y = bigy * blocksize + bigx % blocksize;
x = bigx / blocksize;
// printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]);
assert(x < refpitch);
assert(y < numrows);
ref->h_ref_array[y * refpitch + x] = refstr[i];
if (x > maxx) {
maxx = x;
}
if (y > maxy) {
maxy = y;
}
}
if ((maxx >= refpitch) || (maxy >= numrows)) {
fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n",
maxx, refpitch, maxy, numrows);
exit(1);
}
stopTimer(reordertimer);
if (statistics)
statistics->t_reorder_ref_str += getTimerValue(reordertimer);
deleteTimer(reordertimer);
#else
fprintf(stderr, "The refstr requires %d bytes\n", ref->len);
ref->bytes_on_board += ref->len;
#endif
}
void boardMemory(size_t* free_mem, size_t* total_mem)
{
// The emulator doesn't allow calls to cuMemGetInfo
#ifdef __DEVICE_EMULATION__
*free_mem = 512 * 1024 * 1024;
*total_mem = 768 * 1024 * 1024;
#else
CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem));
#endif
}
void loadReferenceTexture(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
Reference* ref = ctx->ref;
int numrows = ceil(ref->len / ((float)ref->pitch));
int blocksize = 4;
numrows += blocksize;
cudaChannelFormatDesc refTextureDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSigned);
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
#if REFTEX
#if REORDER_REF
CUDA_MALLOC_ARRAY((cudaArray**)(&ref->d_ref_array),
&refTextureDesc,
ref->pitch,
numrows);
CUDA_SAFE_CALL(cudaMemcpyToArray((cudaArray*)(ref->d_ref_array),
0,
0,
ref->h_ref_array,
numrows * ref->pitch,
cudaMemcpyHostToDevice));
reftex.addressMode[0] = cudaAddressModeClamp;
reftex.addressMode[1] = cudaAddressModeClamp;
reftex.filterMode = cudaFilterModePoint;
reftex.normalized = false;
BIND_TEX_ARRAY(reftex, (cudaArray*)ref->d_ref_array, refTextureDesc);
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC((void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL(cudaMemcpy((void*)(ref->d_ref_array),
ref->str,
ref->len,
cudaMemcpyHostToDevice));
reftex.addressMode[0] = cudaAddressModeClamp;
reftex.filterMode = cudaFilterModePoint;
reftex.normalized = false; // access with normalized texture coordinates
cudaChannelFormatDesc refDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
BIND_TEX(0, reftex, (void*)(ref->d_ref_array), refDesc, ref->len);
ctx->ref->bytes_on_board += ref->len;
#endif
#else
#if REORDER_REF
size_t refpitch;
CUDA_MALLOC_PITCH((void**)(&ref->d_ref_array),
&refpitch,
ref->pitch * sizeof(char),
numrows);
CUDA_SAFE_CALL(cudaMemcpy2D((ref->d_ref_array),
refpitch,
ref->h_ref_array,
ref->pitch,
ref->pitch * sizeof(char),
numrows,
cudaMemcpyHostToDevice));
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC((void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL(cudaMemcpy((void*)(ref->d_ref_array),
ref->str,
ref->len,
cudaMemcpyHostToDevice));
ctx->ref->bytes_on_board += ref->len;
#endif
#endif
stopTimer(toboardtimer);
ctx->statistics.t_ref_str_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ref->d_ref_array = NULL;
}
}
void unloadReferenceString(Reference* ref)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
#if REFTEX
CUDA_SAFE_CALL(cudaUnbindTexture(reftex));
#endif
#if REORDER_REF && REFTEX
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_ref_array)));
#else
CUDA_SAFE_CALL(cudaFree((ref->d_ref_array)));
#endif
ref->d_ref_array = NULL;
}
void unloadReferenceTree(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
#if REORDER_TREE
// Unload nodetex
#if NODETEX
CUDA_SAFE_CALL(cudaUnbindTexture(nodetex));
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_node_tex_array)));
#else
CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array));
#endif
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array) {
#if CHILDTEX
CUDA_SAFE_CALL(cudaUnbindTexture(childrentex));
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_children_tex_array)));
#else
CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array));
#endif
}
ref->d_children_tex_array = NULL;
#else
#if NODETEX
CUDA_SAFE_CALL(cudaUnbindTexture(nodetex));
#endif
CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array));
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array) {
#if CHILDTEX
CUDA_SAFE_CALL(cudaUnbindTexture(childrentex));
#endif
CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array));
ref->d_children_tex_array = NULL;
}
#endif
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(cudaFree(ref->d_node_hist));
ref->d_node_hist = NULL;
CUDA_SAFE_CALL(cudaFree(ref->d_child_hist));
ref->d_child_hist = NULL;
#endif
}
//loads a tree and text for [begin, end) in the reference
void loadReference(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
ref->bytes_on_board = 0;
loadReferenceTexture(ctx);
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
// node texels
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode));
// children texels
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren);
#if REORDER_TREE
#if NODETEX
cudaChannelFormatDesc nodeTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY((cudaArray**)(&ref->d_node_tex_array),
&nodeTextureDesc,
ref->tex_width,
ref->tex_node_height);
CUDA_SAFE_CALL(cudaMemcpyToArray((cudaArray*)(ref->d_node_tex_array),
0,
0,
ref->h_node_tex_array,
ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode),
cudaMemcpyHostToDevice));
nodetex.addressMode[0] = cudaAddressModeClamp;
nodetex.addressMode[1] = cudaAddressModeClamp;
nodetex.filterMode = cudaFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(nodetex, (cudaArray*)ref->d_node_tex_array,
nodeTextureDesc);
#else
size_t nodepitch;
CUDA_MALLOC_PITCH((void**)(&ref->d_node_tex_array),
&nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height);
CUDA_SAFE_CALL(cudaMemcpy2D((ref->d_node_tex_array),
nodepitch,
ref->h_node_tex_array,
nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height,
cudaMemcpyHostToDevice));
#endif
if (ref->tex_children_height) {
#if CHILDTEX
cudaChannelFormatDesc childrenTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY((cudaArray**)(&ref->d_children_tex_array),
&childrenTextureDesc,
ref->tex_width,
ref->tex_children_height);
CUDA_SAFE_CALL(cudaMemcpyToArray((cudaArray*)(ref->d_children_tex_array),
0,
0,
ref->h_children_tex_array,
ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren),
cudaMemcpyHostToDevice));
childrentex.addressMode[0] = cudaAddressModeClamp;
childrentex.addressMode[1] = cudaAddressModeClamp;
childrentex.filterMode = cudaFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(childrentex, (cudaArray*)(ref->d_children_tex_array),
childrenTextureDesc);
#else
size_t childpitch;
CUDA_MALLOC_PITCH((void**)(&ref->d_children_tex_array),
&childpitch,
ref->tex_width * sizeof(PixelOfChildren),
ref->tex_children_height);
CUDA_SAFE_CALL(cudaMemcpy2D((ref->d_children_tex_array),
childpitch,
ref->h_children_tex_array,
childpitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_children_height,
cudaMemcpyHostToDevice));
#endif
}
#if TREE_ACCESS_HISTOGRAM
// node hist
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * sizeof(int);
CUDA_MALLOC((void**)(&ref->d_node_hist),
ref->tex_width * ref->tex_node_height * sizeof(int));
CUDA_SAFE_CALL(cudaMemset((ref->d_node_hist), 0,
ref->tex_width * ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height) {
// children hist
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(int);
fprintf(stderr, "after child_hist ref->bytes_on_board:%ld\n", ref->bytes_on_board);
CUDA_MALLOC((void**)(&ref->d_child_hist),
ref->tex_width * ref->tex_children_height * sizeof(int));
CUDA_SAFE_CALL(cudaMemset((ref->d_child_hist), 0,
ref->tex_width * ref->tex_children_height * sizeof(int)));
}
#endif
#else // NO TREE REORDERING
// Node tex, 1-dimensional
CUDA_MALLOC((void**)(&ref->d_node_tex_array),
ref->tex_node_height * sizeof(PixelOfNode));
CUDA_SAFE_CALL(cudaMemcpy((ref->d_node_tex_array),
ref->h_node_tex_array,
ref->tex_node_height * sizeof(PixelOfNode),
cudaMemcpyHostToDevice));
#if NODETEX
cudaChannelFormatDesc nodeTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
nodetex.addressMode[0] = cudaAddressModeClamp;
nodetex.filterMode = cudaFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, nodetex, (void*)(ref->d_node_tex_array), nodeTextureDesc,
ref->tex_node_height * sizeof(PixelOfNode));
#endif
if (ref->tex_children_height) {
// Child tex, 1-dimensional
CUDA_MALLOC((void**)(&ref->d_children_tex_array),
ref->tex_children_height * sizeof(PixelOfChildren));
CUDA_SAFE_CALL(cudaMemcpy((ref->d_children_tex_array),
ref->h_children_tex_array,
ref->tex_children_height * sizeof(PixelOfChildren),
cudaMemcpyHostToDevice));
#if CHILDTEX
cudaChannelFormatDesc childTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
childrentex.addressMode[0] = cudaAddressModeClamp;
childrentex.filterMode = cudaFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, childrentex, (void*)(ref->d_children_tex_array),
childTextureDesc, ref->tex_children_height * sizeof(PixelOfChildren));
#endif
}
#if TREE_ACCESS_HISTOGRAM
ref->bytes_on_board += ref->tex_node_height * sizeof(int);
CUDA_MALLOC((void**)(&ref->d_node_hist),
ref->tex_node_height * sizeof(int));
CUDA_SAFE_CALL(cudaMemset((ref->d_node_hist), 0,
ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height) {
ref->bytes_on_board += ref->tex_children_height * sizeof(int);
CUDA_MALLOC((void**)(&ref->d_child_hist),
ref->tex_children_height * sizeof(int));
CUDA_SAFE_CALL(cudaMemset((ref->d_child_hist), 0,
ref->tex_children_height * sizeof(int)));
}
#endif
#endif
#if TWO_LEVEL_NODE_TREE
PixelOfNode node_buf[NODE_THRESH];
memset(node_buf, 0, sizeof(node_buf));
for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes); ++i) {
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif MERGETEX
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x * 2];
#else
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL(cudaMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf)));
#endif
#if TWO_LEVEL_CHILD_TREE
PixelOfChildren child_buf[CHILD_THRESH];
memset(child_buf, 0, sizeof(child_buf));
for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes); ++i) {
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
child_buf[i] = ((PixelOfChildren*)(ref->h_node_tex_array))[loc + 1];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
child_buf[i] = ((PixelOfChildren*)(ref->h_children))[loc];
#elif MERGETEX
child_buf[i] = ((PixelOfChildren*)(ref->h_node_tex_array))[myaddress.x * 2 + 1];
#else
child_buf[i] = ((PixelOfChildren*)(ref->h_children_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL(cudaMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf)));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_tree_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "done\n");
}
else {
ref->d_node_tex_array = NULL;
ref->d_children_tex_array = NULL;
}
}
void dumpQueryBlockInfo(QuerySet* queries)
{
fprintf(stderr, "\tProcessing queries %s to %s\n",
queries->h_names[0],
queries->h_names[queries->count - 1]);
}
void loadQueries(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
QuerySet* queries = ctx->queries;
queries->bytes_on_board = 0;
unsigned int numQueries = queries->count;
if (!ctx->on_cpu) {
fprintf(stderr, "Allocating device memory for queries... ");
char* toboardtimer = createTimer();
startTimer(toboardtimer);
dumpQueryBlockInfo(queries);
CUDA_MALLOC((void**)&queries->d_tex_array, queries->texlen);
queries->bytes_on_board += queries->texlen;
CUDA_SAFE_CALL(cudaMemcpy((void*)queries->d_tex_array,
queries->h_tex_array + queries->h_addrs_tex_array[0],
queries->texlen,
cudaMemcpyHostToDevice));
#if QRYTEX
qrytex.addressMode[0] = cudaAddressModeClamp;
qrytex.filterMode = cudaFilterModePoint;
qrytex.normalized = false; // access with normalized texture coordinates
cudaChannelFormatDesc qryDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
BIND_TEX(0, qrytex, (void*)(queries->d_tex_array), qryDesc,
queries->texlen);
#endif
CUDA_MALLOC((void**)&queries->d_addrs_tex_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(cudaMemcpy((void*)queries->d_addrs_tex_array,
queries->h_addrs_tex_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_MALLOC((void**)&queries->d_lengths_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(cudaMemcpy((void*)queries->d_lengths_array,
queries->h_lengths_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
stopTimer(toboardtimer);
ctx->statistics.t_queries_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board);
}
else {
queries->d_addrs_tex_array = NULL;
queries->d_tex_array = NULL;
queries->d_lengths_array = NULL;
fprintf(stderr, " allocated %ld bytes\n", 2 * numQueries * sizeof(int) + queries->texlen);
}
}
void unloadQueries(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
QuerySet* queries = ctx->queries;
CUDA_SAFE_CALL(cudaFree(queries->d_tex_array));
queries->d_tex_array = NULL;
CUDA_SAFE_CALL(cudaFree(queries->d_addrs_tex_array));
queries->d_addrs_tex_array = NULL;
CUDA_SAFE_CALL(cudaFree(queries->d_lengths_array));
queries->d_lengths_array = NULL;
queries->bytes_on_board = 0;
}
// Computes the location of the first MatchCoord for a given query. NOTE:
// Do NOT use this function if COALESCED_QUERIES == 1
inline int match_coord_addrs(int qryid, int qry_addrs, int match_length)
{
return qry_addrs - qryid * (match_length + 1);
}
// Construct the offset table for a set of queries. This table will be used
// by the printing functions, and if COALESCED_QUERIES == 1, by the matching
// kernel.
void buildCoordOffsetArray(MatchContext* ctx,
int** h_coord_offset_array,
unsigned int* num_coords)
{
int numCoords = 0;
int match_length = ctx->min_match_length;
int numQueries = ctx->queries->count;
int* lengths = ctx->queries->h_lengths_array;
int* coord_offsets = (int*)calloc(numQueries, sizeof(int));
#if COALESCED_QUERIES
for (unsigned int i = 0; i < numQueries; i += WARP_SIZE) {
// Every query in this warp will need at least this many coords
int max_num_coords = 0;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) {
int num_coords = lengths[i + j] - match_length + 1;
if (max_num_coords < num_coords)
max_num_coords = num_coords;
}
unsigned int block_size = max_num_coords * WARP_SIZE;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) {
ctx->results.h_coord_tex_array[i + j] = numCoords + j;
}
numCoords += block_size;
}
#else
for (unsigned int i = 0; i < numQueries; ++i) {
int qryoffset = ctx->queries->h_addrs_tex_array[i];
coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length);
}
if (numQueries > 0) {
unsigned int last_qry = numQueries - 1;
unsigned int last_qry_len = lengths[last_qry] - match_length + 1;
numCoords = coord_offsets[last_qry] + last_qry_len;
fprintf(stderr, "Need %d match coords for this result array\n",
numCoords);
}
#endif
*num_coords = numCoords;
*h_coord_offset_array = coord_offsets;
}
void loadResultBuffer(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
unsigned int numQueries = ctx->queries->count;
assert(numQueries);
char* offsettimer = createTimer();
startTimer(offsettimer);
buildCoordOffsetArray(ctx,
&(ctx->results.h_coord_tex_array),
&(ctx->results.numCoords));
stopTimer(offsettimer);
ctx->statistics.t_build_coord_offsets += getTimerValue(offsettimer);
deleteTimer(offsettimer);
unsigned int numCoords = ctx->results.numCoords;
fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...",
numQueries, numCoords * sizeof(MatchCoord));
size_t boardFreeMemory = 0;
size_t total_mem = 0;
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
ctx->results.h_match_coords = (MatchCoord*)calloc(numCoords, sizeof(MatchCoord));
if (ctx->results.h_match_coords == NULL) {
trap_dbg();
exit(EXIT_FAILURE);
}
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
ctx->results.bytes_on_board = 0;
CUDA_MALLOC((void**)&ctx->results.d_match_coords,
numCoords * sizeof(MatchCoord));
ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord);
CUDA_SAFE_CALL(cudaMemset((void*)ctx->results.d_match_coords, 0,
numCoords * sizeof(MatchCoord)));
#if COALESCED_QUERIES
CUDA_MALLOC((void**)&ctx->results.d_coord_tex_array,
numQueries * sizeof(int));
ctx->results.bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(cudaMemcpy((void*)ctx->results.d_coord_tex_array,
ctx->results.h_coord_tex_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_match_coords_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ctx->results.d_match_coords = NULL;
}
fprintf(stderr, "done\n");
}
void unloadResultBuffer(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords));
ctx->results.d_match_coords = NULL;
ctx->results.bytes_on_board = 0;
#if COALESCED_QUERIES
CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords));
#endif
}
void transferResultsFromDevice(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
if (!ctx->on_cpu) {
char* fromboardtimer = createTimer();
startTimer(fromboardtimer);
CUDA_SAFE_CALL(cudaMemcpy(ctx->results.h_match_coords,
ctx->results.d_match_coords,
ctx->results.numCoords * sizeof(MatchCoord),
cudaMemcpyDeviceToHost));
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_node_hist,
ctx->ref->d_node_hist,
ctx->ref->tex_node_height * ctx->ref->tex_width * sizeof(int),
cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_child_hist,
ctx->ref->d_child_hist,
ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int),
cudaMemcpyDeviceToHost));
if (ctx->statistics.node_hist_size < ctx->ref->tex_width * ctx->ref->tex_node_height) {
int* temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int));
if (ctx->statistics.node_hist_size)
memcpy(temp, ctx->statistics.node_hist, ctx->statistics.node_hist_size * sizeof(int));
ctx->statistics.node_hist = temp;
ctx->statistics.node_hist_size = ctx->ref->tex_width * ctx->ref->tex_node_height;
}
if (ctx->statistics.child_hist_size < ctx->ref->tex_width * ctx->ref->tex_children_height) {
temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_children_height, sizeof(int));
if (ctx->statistics.hist_size)
memcpy(temp, ctx->statistics.child_hist, ctx->statistics.hist_size * sizeof(int));
ctx->statistics.child_hist = temp;
ctx->statistics.child_hist_size = ctx->ref->tex_width * ctx->ref->tex_children_height;
}
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) {
ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i];
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) {
ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i];
}
#endif
stopTimer(fromboardtimer);
ctx->statistics.t_match_coords_from_board += getTimerValue(fromboardtimer);
deleteTimer(fromboardtimer);
}
}
int flushOutput();
int addToBuffer(char* string);
char numbuffer[32];
MatchCoord* coordForQueryChar(MatchContext* ctx,
unsigned int qryid,
unsigned int qrychar)
{
MatchResults* results = &(ctx->results);
MatchCoord* coords = results->h_match_coords;
#if COALESCED_QUERIES
return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE;
#else
return coords + results->h_coord_tex_array[qryid] + qrychar;
#endif
}
void coordsToPrintBuffers(MatchContext* ctx,
ReferencePage* page,
MatchInfo** matches,
Alignment** alignments,
unsigned int mem_avail,
unsigned int* coord_idx,
unsigned int* match_idx,
unsigned int* align_idx,
unsigned int* nextqry,
unsigned int* nextqrychar)
{
unsigned int numQueries = ctx->queries->count;
int match_length = ctx->min_match_length;
unsigned int cidx = *coord_idx;
unsigned int midx = 0;
unsigned int numCoords = ctx->results.numCoords;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
int DEBUG = 0;
if (DEBUG && cidx == 0) {
for (int j = 0; j < numCoords; ++j) {
MatchCoord* coord = ctx->results.h_match_coords + j;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
//fprintf(stdout, "node: %d\n",
// coord->node);
fprintf(stdout, "node: %d leaves:%d\n",
coord->node.data, lookupNumLeaves(page, coord->node));
}
}
exit(0);
}
// How much can we fit into mem_avail?
for (int j = cidx; j < numCoords; ++j) {
MatchCoord* coord = ctx->results.h_match_coords + j;
int queryAlignments = 0;
int queryMatches = 0;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
int numLeaves = lookupNumLeaves(page, coord->node);
queryAlignments += numLeaves;
queryMatches++;
}
int allMatches = numMatches + queryMatches;
int allAlignments = numAlignments + queryAlignments;
int neededSize = allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment);
if (neededSize > mem_avail || (allMatches / BLOCKSIZE) >= MAX_GRID_DIMENSION) {
// adding this match won't fit on the board
break;
}
++cidx;
numMatches = allMatches;
numAlignments = allAlignments;
}
MatchInfo* M = (MatchInfo*)calloc(numMatches, sizeof(MatchInfo));
unsigned int alignmentOffset = 0;
int qry = *nextqry;
int qrychar = *nextqrychar;
bool set_full = false;
while (qry < numQueries) {
// h_lengths_array doesn't count the 'q' at the beginning of each query
int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length;
while (qrychar < qlen) {
if (midx >= numMatches) {
set_full = true;
break;
}
MatchCoord* coord = coordForQueryChar(ctx, qry, qrychar);
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
MatchInfo m;
m.resultsoffset = alignmentOffset;
m.qrystartpos = qrychar;
m.matchnode = coord->node;
m.edgematch = coord->edge_match_length;
m.numLeaves = lookupNumLeaves(page, m.matchnode);
m.queryid = qry;
alignmentOffset += m.numLeaves;
M[midx++] = m;
}
++qrychar;
}
if (set_full)
break;
++qry;
qrychar = 0;
}
*coord_idx = cidx;
*match_idx = midx;
*align_idx = alignmentOffset;
*matches = M;
*nextqry = qry;
*nextqrychar = qrychar;
fprintf(stderr, "Allocing %d bytes of host memory for %d alignments\n", alignmentOffset * sizeof(Alignment), numAlignments);
*alignments = (struct Alignment*)calloc(alignmentOffset, sizeof(Alignment));
//cudaMallocHost((void**)alignments, numAlignments * sizeof(Alignment));
}
void runPrintKernel(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
MatchInfo* d_matches;
size_t matchesSize = numMatches * sizeof(MatchInfo);
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
CUDA_MALLOC((void**)&d_matches, matchesSize);
}
struct Alignment* d_alignments;
size_t alignmentSize = numAlignments * sizeof(Alignment);
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
CUDA_MALLOC((void**)&d_alignments, alignmentSize);
CUDA_SAFE_CALL(cudaMemset((void*)d_alignments, 0, alignmentSize));
}
char* atimer = createTimer();
startTimer(atimer);
// Copy matches to card
fprintf(stderr, "prepared %d matches %d alignments\n", numMatches, numAlignments);
fprintf(stderr, "Copying %d bytes to host memory for %d alignments\n", numAlignments * sizeof(Alignment), numAlignments);
int DEBUG = 0;
if (DEBUG) {
for (int i = 0; i < numMatches; i++) {
printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n",
i,
h_matches[i].resultsoffset,
h_matches[i].queryid,
h_matches[i].matchnode.data,
h_matches[i].numLeaves,
h_matches[i].edgematch,
h_matches[i].qrystartpos);
}
exit(0);
}
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
CUDA_SAFE_CALL(cudaMemcpy(d_matches, h_matches, matchesSize, cudaMemcpyHostToDevice));
}
stopTimer(atimer);
float mtime = getTimerValue(atimer);
// Launch the kernel
int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1);
fprintf(stderr, " Calling print kernel... ");
fprintf(stderr, "printKernel threads:(%d),blocks(%d)\n", dimBlock.x, dimGrid.x);
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
ctx->hostLoop->launch(*ctx->hostContext, dim3(60), dimGrid, dimBlock, [] __device__(
gloop::DeviceLoop<> * loop,
MatchInfo * matches,
int totalMatches,
Alignment* alignments,
char* queries,
const int* queryAddrs,
const int* queryLengths,
const int page_begin,
const int page_end,
const int page_shadow_left,
const int page_shadow_right,
const int min_match_length) {
printKernel(loop, matches, totalMatches, alignments, queries, queryAddrs, queryLengths, page_begin, page_end, page_shadow_left, page_shadow_right, min_match_length);
}, d_matches, numMatches, d_alignments, ctx->queries->d_tex_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, page->begin, page->end, page->shadow_left, page->shadow_right, ctx->min_match_length);
// cudaThreadSynchronize();
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
}
startTimer(atimer);
// Copy the results back to the host
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
CUDA_SAFE_CALL(cudaMemcpy((void*)alignments,
(void*)d_alignments,
alignmentSize,
cudaMemcpyDeviceToHost));
}
// cudaThreadSynchronize();
stopTimer(atimer);
float atime = getTimerValue(atimer);
fprintf(stderr, "memcpy time= %f\n", atime + mtime);
deleteTimer(atimer);
// Cleanup
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
CUDA_SAFE_CALL(cudaFree(d_alignments));
CUDA_SAFE_CALL(cudaFree(d_matches));
}
}
// TODO: need reverse-complement printing support
void runPrintOnCPU(MatchContext* ctx, ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
unsigned int min_match_length = ctx->min_match_length;
int* addrs = ctx->queries->h_addrs_tex_array;
int* lengths = ctx->queries->h_lengths_array;
char* qrychars = ctx->queries->h_tex_array;
if (!numMatches)
return;
int qry = -1;
unsigned int qrylen;
for (int i = 0; i < numMatches; ++i) {
MatchInfo& match = h_matches[i];
if (match.queryid != qry) {
qry = match.queryid;
qrylen = lengths[qry];
}
if (!(match.edgematch & FRMASK)) {
printAlignments(page,
alignments + match.resultsoffset,
#if COALESCED_QUERIES
qrychars + sizeof(int) * addrs[qry],
#else
qrychars + addrs[qry],
#endif
qrylen,
match.matchnode,
match.qrystartpos,
match.edgematch,
min_match_length,
0,
ctx->forwardcoordinates);
}
}
}
int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen);
void getExactAlignments(MatchContext* ctx, ReferencePage* page, bool on_cpu)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
assert(!ctx->reverse && !ctx->forwardreverse);
size_t boardFreeMemory;
size_t total_mem;
if (!on_cpu) {
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
}
else {
boardFreeMemory = 256 * 1024 * 1024;
total_mem = boardFreeMemory;
}
#ifdef __DEVICE_EMULATION__
boardFreeMemory = 512 * 1024 * 1024;
#endif
boardFreeMemory -= BREATHING_ROOM;
fprintf(stderr, "board free memory: %u\n", boardFreeMemory);
int rTotalMatches = 0;
int rTotalAlignments = 0;
int totalRounds = 0;
unsigned int last_coord = ctx->results.numCoords;
unsigned int next_coord = 0;
unsigned int nextqry = 0;
unsigned int nextqrychar = 0;
int lastqry = -1;
while (next_coord < last_coord) {
// see how many queries will fit on the board
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
totalRounds++;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
MatchInfo* h_matches = NULL;
Alignment* h_alignments = NULL;
int coord_left = next_coord;
char* btimer = createTimer();
startTimer(btimer);
coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments, boardFreeMemory,
&next_coord, &numMatches, &numAlignments, &nextqry, &nextqrychar);
stopTimer(btimer);
float btime = getTimerValue(btimer);
ctx->statistics.t_coords_to_buffers += btime;
fprintf(stderr, "buffer prep time= %f\n", btime);
deleteTimer(btimer);
fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) of %d using %d matches and %d alignments\n",
totalRounds, coord_left, next_coord, last_coord, numMatches, numAlignments);
if (numMatches == 0)
continue;
char buf[256];
//assert(qryend > qrystart);
rTotalAlignments += numAlignments;
rTotalMatches += numMatches;
if (num_bind_tex_calls > 100) {
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
cudaThreadExit();
}
num_bind_tex_calls = 0;
loadReference(ctx);
loadQueries(ctx);
}
char* ktimer = createTimer();
startTimer(ktimer);
if (on_cpu) {
runPrintOnCPU(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
else {
runPrintKernel(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_print_kernel += ktime;
fprintf(stderr, "print kernel time= %f\n", ktime);
deleteTimer(ktimer);
// char* stimer = createTimer();
// startTimer(stimer);
// mapQueriesEndToEnd(ctx,
// page,
// h_matches,
// numMatches,
// h_alignments,
// numAlignments);
//
// stopTimer(stimer);
//
// float stime = getTimerValue(stimer);
// fprintf(stderr, "postprocess time= %f\n", stime);
// deleteTimer(stimer);
//flushOutput();
//Process the alignments
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
char* otimer = createTimer();
startTimer(otimer);
for (int m = 0; m < numMatches; m++) {
int base = h_matches[m].resultsoffset;
for (int i = 0; i < h_matches[m].numLeaves; i++) {
// See if there are any more left maximal alignments for this match
if (h_alignments[base + i].left_in_ref == 0) {
break;
}
if (h_matches[m].queryid != lastqry) {
lastqry = h_matches[m].queryid;
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + lastqry));
addToBuffer("\n");
}
sprintf(buf, "%d\t%d\t%d\n",
h_alignments[base + i].left_in_ref,
h_matches[m].qrystartpos + 1,
h_alignments[base + i].matchlen);
addToBuffer(buf);
// addMatchToBuffer(h_alignments[base+i].left_in_ref,
// h_matches[m].qrystartpos + 1,
// h_alignments[base+i].matchlen);
}
}
flushOutput();
stopTimer(otimer);
ctx->statistics.t_results_to_disk += getTimerValue(otimer);
deleteTimer(otimer);
free(h_matches);
free(h_alignments);
//cudaFreeHost((void*)h_alignments);
}
}
free(ctx->results.h_coord_tex_array);
free(ctx->results.h_match_coords);
ctx->results.h_coord_tex_array = NULL;
ctx->results.h_match_coords = NULL;
fprintf(stderr, "Finished processing %d matches and %d potential alignments in %d rounds\n",
rTotalMatches, rTotalAlignments, totalRounds);
}
int getQueryBlock(MatchContext* ctx, size_t device_mem_avail)
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
QuerySet* queries = ctx->queries;
char* queryTex = NULL;
int* queryAddrs = NULL;
int* queryLengths = NULL;
unsigned int numQueries;
unsigned int num_match_coords;
size_t queryLen;
char** names;
fprintf(stderr, "Loading query block... ");
char* queryreadtimer = createTimer();
startTimer(queryreadtimer);
getQueriesTexture(queries->qfile,
&queryTex,
&queryLen,
&queryAddrs,
&names,
&queryLengths,
&numQueries,
&num_match_coords,
device_mem_avail,
ctx->min_match_length,
ctx->reverse || ctx->forwardreverse);
stopTimer(queryreadtimer);
ctx->statistics.t_queries_from_disk += getTimerValue(queryreadtimer);
deleteTimer(queryreadtimer);
queries->h_tex_array = queryTex;
queries->count = numQueries;
queries->h_addrs_tex_array = queryAddrs;
queries->texlen = queryLen;
queries->h_names = names;
queries->h_lengths_array = queryLengths;
ctx->results.numCoords = num_match_coords;
fprintf(stderr, "done.\n");
return numQueries;
}
void destroyQueryBlock(QuerySet* queries)
{
free(queries->h_tex_array);
queries->h_tex_array = NULL;
for (int i = 0; i < queries->count; ++i)
free(queries->h_names[i]);
free(queries->h_names);
queries->count = 0;
queries->texlen = 0;
free(queries->h_addrs_tex_array);
queries->h_addrs_tex_array = NULL;
free(queries->h_lengths_array);
queries->h_lengths_array = NULL;
}
void resetStats(Statistics* stats)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
stats->t_end_to_end = 0.0;
stats->t_match_kernel = 0.0;
stats->t_print_kernel = 0.0;
stats->t_queries_to_board = 0.0;
stats->t_match_coords_to_board = 0.0;
stats->t_match_coords_from_board = 0.0;
stats->t_tree_to_board = 0.0;
stats->t_ref_str_to_board = 0.0;
stats->t_queries_from_disk = 0.0;
stats->t_ref_from_disk = 0.0;
stats->t_results_to_disk = 0.0;
stats->t_tree_construction = 0.0;
stats->t_tree_reorder = 0.0;
stats->t_tree_flatten = 0.0;
stats->t_reorder_ref_str = 0.0;
stats->t_build_coord_offsets = 0.0;
stats->t_coords_to_buffers = 0.0;
stats->bp_avg_query_length = 0.0;
#if TREE_ACCESS_HISTOGRAM
if (stats->node_hist_size) {
free(stats->node_hist);
stats->node_hist = NULL;
stats->node_hist_size = 0;
}
if (stats->child_hist_size) {
free(stats->child_hist);
stats->child_hist = NULL;
stats->child_hist_size = 0;
}
#endif
}
void writeStatisticsFile(Statistics* stats,
char* stats_filename,
char* node_hist_filename = NULL,
char* child_hist_filename = NULL)
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
if (stats_filename) {
FILE* f = fopen(stats_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename);
}
else {
fprintf(f, "Q");
fprintf(f, ",R");
fprintf(f, ",T");
fprintf(f, ",m");
fprintf(f, ",r");
fprintf(f, ",t");
fprintf(f, ",n");
fprintf(f, ",Total");
fprintf(f, ",Match kernel");
fprintf(f, ",Print Kernel");
fprintf(f, ",Queries to board");
fprintf(f, ",Match coords to board");
fprintf(f, ",Match coords from board");
fprintf(f, ",Tree to board");
fprintf(f, ",Ref str to board");
fprintf(f, ",Queries from disk");
fprintf(f, ",Ref from disk");
fprintf(f, ",Output to disk");
fprintf(f, ",Tree construction");
fprintf(f, ",Tree reorder");
fprintf(f, ",Tree flatten");
fprintf(f, ",Ref reorder");
fprintf(f, ",Build coord table");
fprintf(f, ",Coords to buffers");
fprintf(f, ",Avg qry length");
fprintf(f, "\n");
fprintf(f, "%d", QRYTEX);
fprintf(f, ",%d", REFTEX);
fprintf(f, ",%d", TREETEX);
fprintf(f, ",%d", MERGETEX);
fprintf(f, ",%d", REORDER_REF);
fprintf(f, ",%d", REORDER_TREE);
fprintf(f, ",%d", RENUMBER_TREE);
fprintf(f, ",%f", stats->t_end_to_end);
fprintf(f, ",%f", stats->t_match_kernel);
fprintf(f, ",%f", stats->t_print_kernel);
fprintf(f, ",%f", stats->t_queries_to_board);
fprintf(f, ",%f", stats->t_match_coords_to_board);
fprintf(f, ",%f", stats->t_match_coords_from_board);
fprintf(f, ",%f", stats->t_tree_to_board);
fprintf(f, ",%f", stats->t_ref_str_to_board);
fprintf(f, ",%f", stats->t_queries_from_disk);
fprintf(f, ",%f", stats->t_ref_from_disk);
fprintf(f, ",%f", stats->t_results_to_disk);
fprintf(f, ",%f", stats->t_tree_construction);
fprintf(f, ",%f", stats->t_tree_reorder);
fprintf(f, ",%f", stats->t_tree_flatten);
fprintf(f, ",%f", stats->t_reorder_ref_str);
fprintf(f, ",%f", stats->t_build_coord_offsets);
fprintf(f, ",%f", stats->t_coords_to_buffers);
fprintf(f, ",%f", stats->bp_avg_query_length);
fprintf(f, "\n");
fclose(f);
}
}
#if TREE_ACCESS_HISTOGRAM
if (node_hist_filename) {
FILE* f = fopen(node_hist_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", node_hist_filename);
}
else {
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]);
}
}
if (child_hist_filename) {
FILE* f = fopen(child_hist_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", child_hist_filename);
}
else {
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]);
}
}
float total_node_hits = 0;
float tree_top_node_hits = 0;
float total_child_hits = 0;
float tree_top_child_hits = 0;
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) {
total_node_hits += ctx->statistics.node_hist[i];
if (i < 256) {
tree_top_node_hits += ctx->statistics.node_hist[i];
}
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) {
total_child_hits += ctx->statistics.child_hist[i];
if (i < 256) {
tree_top_child_hits += ctx->statistics.child_hist[i];
}
}
fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n", (int)tree_top_node_hits, (int)total_node_hits, tree_top_node_hits / total_node_hits);
fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n", (int)tree_top_child_hits, (int)total_child_hits, tree_top_child_hits / total_child_hits);
#endif
}
void matchOnCPU(MatchContext* ctx, bool doRC)
{
//TODO: CPU is matching is disabled.
if (doRC) {
// Match the reverse complement of the queries to the ref
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
REVERSE);
}
else {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
FORWARD);
}
}
void matchOnGPU(MatchContext* ctx, bool doRC)
{
int numQueries = ctx->queries->count;
int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1);
// Match the reverse complement of the queries to the ref
if (doRC) {
//TODO: GPU RC is disabled
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
mummergpuRCKernel<<<dimGrid, dimBlock, 0>>>(ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
}
} else {
fprintf(stderr, "mummergpuKernel threads:(%d),blocks(%d)\n", dimBlock.x, dimGrid.x);
ctx->hostLoop->launch(*ctx->hostContext, /* FIXME */ dim3(30), dimGrid, dimBlock, [] __device__(
gloop::DeviceLoop<> * loop,
void* match_coords,
char* queries,
char* ref,
const int* queryAddrs,
const int* queryLengths,
const int numQueries,
const int min_match_len) {
mummergpuKernel(
loop,
match_coords,
queries,
ref,
queryAddrs,
queryLengths,
numQueries,
min_match_len);
}, ctx->results.d_match_coords, ctx->queries->d_tex_array, (char*)ctx->ref->d_ref_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length);
}
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
// check if kernel execution generated an error
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
}
void getMatchResults(MatchContext* ctx,
unsigned int page_num)
{
transferResultsFromDevice(ctx);
}
void matchQueryBlockToReferencePage(MatchContext* ctx,
ReferencePage* page,
bool reverse_complement)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
char* ktimer = createTimer();
fprintf(stderr, "Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n",
ctx->queries->bytes_on_board,
ctx->ref->bytes_on_board,
ctx->results.bytes_on_board);
startTimer(ktimer);
if (ctx->on_cpu) {
matchOnCPU(ctx, reverse_complement);
}
else {
matchOnGPU(ctx, reverse_complement);
// cudaThreadSynchronize();
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_match_kernel += ktime;
fprintf(stderr, "match kernel time= %f\n", ktime);
deleteTimer(ktimer);
getMatchResults(ctx, page->id);
unloadResultBuffer(ctx);
}
int matchSubset(MatchContext* ctx,
ReferencePage* page)
{
loadQueries(ctx);
fprintf(stderr,
"Matching queries %s - %s against ref coords %d - %d\n",
ctx->queries->h_names[0],
ctx->queries->h_names[ctx->queries->count - 1],
page->begin,
page->end);
loadResultBuffer(ctx);
// TODO: renable RC support by calling this twice /w reverse/fwdreverse
// idiom.
matchQueryBlockToReferencePage(ctx, page, false);
if (USE_PRINT_KERNEL && !ctx->on_cpu) {
getExactAlignments(ctx, page, false);
}
else {
getExactAlignments(ctx, page, true);
}
flushOutput();
unloadQueries(ctx);
return 0;
}
int getFreeDeviceMemory(MatchContext* ctx, bool on_cpu)
{
size_t free_mem = 0;
size_t total_mem = 0;
// We have to 'prime' CUDA by making an allocation here. cuMemGetInfo
// will return zeroes until we do a malloc.
int* p = NULL;
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(ctx->hostLoop->kernelLock());
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
CUDA_SAFE_CALL(cudaMalloc((void**)&p, sizeof(int)));
CUDA_SAFE_CALL(cudaFree(p));
}
if (!on_cpu) {
boardMemory(&free_mem, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
free_mem, total_mem);
}
else {
total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX
}
return free_mem;
}
int matchQueriesToReferencePage(MatchContext* ctx, ReferencePage* page)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
fprintf(stderr, "Beginning reference page %p\n", page);
int free_mem = getFreeDeviceMemory(ctx, ctx->on_cpu);
int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM;
ctx->ref = &(page->ref);
loadReference(ctx);
while (getQueryBlock(ctx, available_mem)) {
matchSubset(ctx, page);
ctx->statistics.bp_avg_query_length = ctx->queries->texlen / (float)(ctx->queries->count) - 2;
destroyQueryBlock(ctx->queries);
if (num_bind_tex_calls > 100) {
std::abort();
cudaThreadExit();
num_bind_tex_calls = 0;
loadReference(ctx);
}
}
unloadReferenceString(ctx->ref);
unloadReferenceTree(ctx);
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
lseek(ctx->queries->qfile, 0, SEEK_SET);
}
return 0;
}
void initReferencePages(MatchContext* ctx, int* num_pages, ReferencePage** pages_out)
{
unsigned int bases_in_ref = ctx->full_ref_len - 3;
unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref;
unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size);
fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n",
num_reference_pages, bases_in_ref, page_size);
unsigned int page_overlap = MAX_QUERY_LEN + 1;
ReferencePage* pages = (ReferencePage*)calloc(num_reference_pages,
sizeof(ReferencePage));
pages[0].begin = 1;
pages[0].end = pages[0].begin + page_size + ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning
pages[0].shadow_left = -1;
pages[0].id = 0;
for (int i = 1; i < num_reference_pages - 1; ++i) {
pages[i].begin = pages[i - 1].end - page_overlap;
pages[i].end = pages[i].begin + page_size + page_overlap;
pages[i - 1].shadow_right = pages[i].begin;
pages[i].shadow_left = pages[i - 1].end;
pages[i].id = i;
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
pages[last_page].begin = pages[last_page - 1].end - page_overlap;
pages[last_page].end = ctx->full_ref_len - 1;
pages[last_page - 1].shadow_right = pages[last_page].begin;
pages[last_page].shadow_right = -1;
pages[last_page].shadow_left = pages[last_page - 1].end;
pages[last_page].id = last_page;
}
*pages_out = pages;
*num_pages = num_reference_pages;
}
int streamReferenceAgainstQueries(MatchContext* ctx)
{
int num_reference_pages = 0;
ReferencePage* pages = NULL;
initReferencePages(ctx, &num_reference_pages, &pages);
buildReferenceTexture(&(pages[0].ref),
ctx->full_ref,
pages[0].begin,
pages[0].end,
ctx->min_match_length,
ctx->dotfilename,
ctx->texfilename,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[0]);
destroyReference(&(pages[0].ref));
for (int i = 1; i < num_reference_pages - 1; ++i) {
buildReferenceTexture(&(pages[i].ref),
ctx->full_ref,
pages[i].begin,
pages[i].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[i]);
destroyReference(&(pages[i].ref));
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
buildReferenceTexture(&(pages[last_page].ref),
ctx->full_ref,
pages[last_page].begin,
pages[last_page].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[last_page]);
destroyReference(&(pages[last_page].ref));
}
free(pages);
return 0;
}
extern "C" int matchQueries(MatchContext* ctx)
{
assert(sizeof(struct PixelOfNode) == sizeof(uint4));
assert(sizeof(struct PixelOfChildren) == sizeof(uint4));
#if TREE_ACCESS_HISTOGRAM
ctx->statistics.node_hist_size = 0;
ctx->statistics.child_hist_size = 0;
#endif
resetStats(&(ctx->statistics));
char* ttimer = createTimer();
startTimer(ttimer);
int ret;
fprintf(stderr, "Streaming reference pages against all queries\n");
ret = streamReferenceAgainstQueries(ctx);
stopTimer(ttimer);
ctx->statistics.t_end_to_end += getTimerValue(ttimer);
deleteTimer(ttimer);
writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out", "child_hist.out");
return ret;
}
|
cc40d5a6a7035fd5b58eaa32fad03417b9ef7c01.hip | // !!! This is a file automatically generated by hipify!!!
//---------------------------------------------------------------------------//
// Copyright (c) 2013-2014 Kyle Lutz <kyle.r.lutz@gmail.com>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://kylelutz.github.com/compute for more information.
//---------------------------------------------------------------------------//
#include <algorithm>
#include <cstdlib>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/reverse.h>
#include "perf.hpp"
int main(int argc, char *argv[])
{
perf_parse_args(argc, argv);
std::cout << "size: " << PERF_N << std::endl;
thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N);
// transfer data to the device
thrust::device_vector<int> d_vec;
perf_timer t;
for(size_t trial = 0; trial < PERF_TRIALS; trial++){
d_vec = h_vec;
t.start();
thrust::reverse(d_vec.begin(), d_vec.end());
hipDeviceSynchronize();
t.stop();
}
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
}
| cc40d5a6a7035fd5b58eaa32fad03417b9ef7c01.cu | //---------------------------------------------------------------------------//
// Copyright (c) 2013-2014 Kyle Lutz <kyle.r.lutz@gmail.com>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://kylelutz.github.com/compute for more information.
//---------------------------------------------------------------------------//
#include <algorithm>
#include <cstdlib>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/reverse.h>
#include "perf.hpp"
int main(int argc, char *argv[])
{
perf_parse_args(argc, argv);
std::cout << "size: " << PERF_N << std::endl;
thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N);
// transfer data to the device
thrust::device_vector<int> d_vec;
perf_timer t;
for(size_t trial = 0; trial < PERF_TRIALS; trial++){
d_vec = h_vec;
t.start();
thrust::reverse(d_vec.begin(), d_vec.end());
cudaDeviceSynchronize();
t.stop();
}
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
}
|
f599d952897e699a7302d39e0b6973e0d1e2f606.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//make sure numbers above match the matlab script
__constant__ int num_row;
__constant__ int num_col;
__global__ void matrix_addition (int* a, int* b, int* c)//each block calculates a row
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
//c[x * num_col + y] = a[x * num_col + y] + b[x * num_col + y];
c[y * num_row + x] = a[y * num_row + x] + b[y * num_row + x];
}
| f599d952897e699a7302d39e0b6973e0d1e2f606.cu | //make sure numbers above match the matlab script
__constant__ int num_row;
__constant__ int num_col;
__global__ void matrix_addition (int* a, int* b, int* c)//each block calculates a row
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
//c[x * num_col + y] = a[x * num_col + y] + b[x * num_col + y];
c[y * num_row + x] = a[y * num_row + x] + b[y * num_row + x];
}
|
35dcb0b105b9db3549807b37df8fb2ba3f51dd5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <float.h>
#include "cuda_auxiliary.h"
__global__ void gpu_mat_add(double *A, double *B, double *C, const int nx, const int ny)
{
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int idx = iy * nx + ix;
C[idx] = A[idx] + B[idx];
}
void cpu_mat_add(double *A, double *B, double *C, const int nx, const int ny)
{
for (int iy = 0; iy < ny; ++iy) {
for (int ix = 0; ix < nx; ++ix)
C[ix] = A[ix] + B[ix];
A += nx;
B += nx;
C += nx;
}
}
void check_results(double *cpu_array, double *gpu_array, int size)
{
for (int ix = 0; ix < size; ++ix)
if (abs(cpu_array[ix] - gpu_array[ix]) >= DBL_EPSILON) {
printf("CPU and GPU results differ at element %d\n", ix);
printf("CPU value: %lg\n", cpu_array[ix]);
printf("GPU value: %lg\n", gpu_array[ix]);
return;
}
printf("GPU result is correct\n");
}
int main(int argc, char **argv)
{
double *hst_A = NULL;
double *hst_B = NULL;
double *hst_C = NULL;
double *dev_A = NULL;
double *dev_B = NULL;
double *dev_C = NULL;
int nx = 1 << 13;
int ny = 1 << 13;
double cpu_time = 0.0;
double gpu_time = 0.0;
dim3 block_size;
dim3 grid_size;
if (argc != 3) {
fprintf(stderr, "usage: %s dimx dimy\n", argv[0]);
exit(EXIT_FAILURE);
}
host_alloc(hst_A, double, nx * ny);
host_alloc(hst_B, double, nx * ny);
host_alloc(hst_C, double, nx * ny);
cuda_exec(hipMalloc(&dev_A, nx * ny * sizeof(double)));
cuda_exec(hipMalloc(&dev_B, nx * ny * sizeof(double)));
cuda_exec(hipMalloc(&dev_C, nx * ny * sizeof(double)));
init_matrix(hst_A, nx, ny, nx);
init_matrix(hst_B, nx, ny, nx);
cuda_exec(hipMemcpy(dev_A, hst_A, nx * ny * sizeof(double), hipMemcpyHostToDevice));
cuda_exec(hipMemcpy(dev_B, hst_B, nx * ny * sizeof(double), hipMemcpyHostToDevice));
block_size.x = atoi(argv[1]);
block_size.y = atoi(argv[2]);
grid_size.x = min((nx + block_size.x - 1) / block_size.x, 65535);
grid_size.y = min((ny + block_size.y - 1) / block_size.y, 65535);
gpu_time -= timer();
hipLaunchKernelGGL(( gpu_mat_add), dim3(grid_size), dim3(block_size), 0, 0, dev_A, dev_B, dev_C, nx, ny);
cuda_exec(hipDeviceSynchronize());
gpu_time += timer();
cpu_time -= timer();
cpu_mat_add(hst_A, hst_B, hst_C, nx, ny);
cpu_time += timer();
cuda_exec(hipMemcpy(hst_B, dev_C, nx * ny * sizeof(double), hipMemcpyDeviceToHost));
check_results(hst_C, hst_B, nx * ny);
printf("Execution configuration: grid (%d, %d), block (%d, %d)\n", grid_size.x, grid_size.y, block_size.x, block_size.y);
printf("CPU time: %.3lgms\n", 1000 * cpu_time);
printf("GPU time: %.3lgms\n", 1000 * gpu_time);
free(hst_A);
free(hst_B);
free(hst_C);
hipFree(dev_A);
hipFree(dev_B);
hipFree(dev_C);
return 0;
}
| 35dcb0b105b9db3549807b37df8fb2ba3f51dd5e.cu | #include <stdio.h>
#include <float.h>
#include "cuda_auxiliary.h"
__global__ void gpu_mat_add(double *A, double *B, double *C, const int nx, const int ny)
{
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int idx = iy * nx + ix;
C[idx] = A[idx] + B[idx];
}
void cpu_mat_add(double *A, double *B, double *C, const int nx, const int ny)
{
for (int iy = 0; iy < ny; ++iy) {
for (int ix = 0; ix < nx; ++ix)
C[ix] = A[ix] + B[ix];
A += nx;
B += nx;
C += nx;
}
}
void check_results(double *cpu_array, double *gpu_array, int size)
{
for (int ix = 0; ix < size; ++ix)
if (abs(cpu_array[ix] - gpu_array[ix]) >= DBL_EPSILON) {
printf("CPU and GPU results differ at element %d\n", ix);
printf("CPU value: %lg\n", cpu_array[ix]);
printf("GPU value: %lg\n", gpu_array[ix]);
return;
}
printf("GPU result is correct\n");
}
int main(int argc, char **argv)
{
double *hst_A = NULL;
double *hst_B = NULL;
double *hst_C = NULL;
double *dev_A = NULL;
double *dev_B = NULL;
double *dev_C = NULL;
int nx = 1 << 13;
int ny = 1 << 13;
double cpu_time = 0.0;
double gpu_time = 0.0;
dim3 block_size;
dim3 grid_size;
if (argc != 3) {
fprintf(stderr, "usage: %s dimx dimy\n", argv[0]);
exit(EXIT_FAILURE);
}
host_alloc(hst_A, double, nx * ny);
host_alloc(hst_B, double, nx * ny);
host_alloc(hst_C, double, nx * ny);
cuda_exec(cudaMalloc(&dev_A, nx * ny * sizeof(double)));
cuda_exec(cudaMalloc(&dev_B, nx * ny * sizeof(double)));
cuda_exec(cudaMalloc(&dev_C, nx * ny * sizeof(double)));
init_matrix(hst_A, nx, ny, nx);
init_matrix(hst_B, nx, ny, nx);
cuda_exec(cudaMemcpy(dev_A, hst_A, nx * ny * sizeof(double), cudaMemcpyHostToDevice));
cuda_exec(cudaMemcpy(dev_B, hst_B, nx * ny * sizeof(double), cudaMemcpyHostToDevice));
block_size.x = atoi(argv[1]);
block_size.y = atoi(argv[2]);
grid_size.x = min((nx + block_size.x - 1) / block_size.x, 65535);
grid_size.y = min((ny + block_size.y - 1) / block_size.y, 65535);
gpu_time -= timer();
gpu_mat_add<<<grid_size, block_size>>>(dev_A, dev_B, dev_C, nx, ny);
cuda_exec(cudaDeviceSynchronize());
gpu_time += timer();
cpu_time -= timer();
cpu_mat_add(hst_A, hst_B, hst_C, nx, ny);
cpu_time += timer();
cuda_exec(cudaMemcpy(hst_B, dev_C, nx * ny * sizeof(double), cudaMemcpyDeviceToHost));
check_results(hst_C, hst_B, nx * ny);
printf("Execution configuration: grid (%d, %d), block (%d, %d)\n", grid_size.x, grid_size.y, block_size.x, block_size.y);
printf("CPU time: %.3lgms\n", 1000 * cpu_time);
printf("GPU time: %.3lgms\n", 1000 * gpu_time);
free(hst_A);
free(hst_B);
free(hst_C);
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
return 0;
}
|
8834bdd2537cb4f65ef7da60b625cc220a76393b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zmgeellmv.cu normal z -> c, Fri Sep 11 18:29:42 2015
*/
#include "common_magma.h"
#define BLOCK_SIZE 512
__global__ void
cmgeellmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ magmaFloatComplex dot[];
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
dot[ threadIdx.x + i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
magmaFloatComplex val = dval [ num_cols_per_row * row + n ];
if( val != 0){
for( int i=0; i<num_vecs; i++)
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++)
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * dy [ row + i * num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELLPACK.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgeellmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaFloatComplex ); // num_vecs vectors
hipLaunchKernelGGL(( cmgeellmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue ,
m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
| 8834bdd2537cb4f65ef7da60b625cc220a76393b.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zmgeellmv.cu normal z -> c, Fri Sep 11 18:29:42 2015
*/
#include "common_magma.h"
#define BLOCK_SIZE 512
__global__ void
cmgeellmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ magmaFloatComplex dot[];
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
dot[ threadIdx.x + i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
magmaFloatComplex val = dval [ num_cols_per_row * row + n ];
if( val != 0){
for( int i=0; i<num_vecs; i++)
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++)
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * dy [ row + i * num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELLPACK.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgeellmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaFloatComplex ); // num_vecs vectors
cmgeellmv_kernel<<< grid, threads, MEM_SIZE, queue >>>
( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
|
1ca8c919eb22db9f9f5b62948871ebbc8b141e0f.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/*
This example demonstrates how to use the Cuda OpenGL bindings to
dynamically modify a vertex buffer using a Cuda kernel.
The steps are:
1. Create an empty vertex buffer object (VBO)
2. Register the VBO with Cuda
3. Map the VBO for writing from Cuda
4. Run Cuda kernel to modify the vertex positions
5. Unmap the VBO
6. Render the results using OpenGL
Host code
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <helper_gl.h>
#if defined (__APPLE__) || defined(MACOSX)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#include <GLUT/glut.h>
#ifndef glutCloseFunc
#define glutCloseFunc glutWMCloseFunc
#endif
#else
#include <GL/freeglut.h>
#endif
// includes, cuda
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
//#include <helper_cuda_gl.h> // helper functions for CUDA/GL interop
#include <hip/hip_vector_types.h>
#define MAX_EPSILON_ERROR 10.0f
#define THRESHOLD 0.30f
#define REFRESH_DELAY 10 //ms
////////////////////////////////////////////////////////////////////////////////
// constants
const unsigned int window_width = 512;
const unsigned int window_height = 512;
const unsigned int mesh_width = 256;
const unsigned int mesh_height = 256;
// vbo variables
GLuint vbo;
struct cudaGraphicsResource *cuda_vbo_resource;
void *d_vbo_buffer = NULL;
float g_fAnim = 0.0;
// mouse controls
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_y = 0.0;
float translate_z = -3.0;
StopWatchInterface *timer = NULL;
// Auto-Verification Code
int fpsCount = 0; // FPS count for averaging
int fpsLimit = 1; // FPS limit for sampling
int g_Index = 0;
float avgFPS = 0.0f;
unsigned int frameCount = 0;
unsigned int g_TotalErrors = 0;
bool g_bQAReadback = false;
int *pArgc = NULL;
char **pArgv = NULL;
#define MAX(a,b) ((a > b) ? a : b)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
bool runTest(int argc, char **argv, char *ref_file);
void cleanup();
// GL functionality
bool initGL(int *argc, char **argv);
void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res,
unsigned int vbo_res_flags);
void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res);
// rendering callbacks
void display();
void keyboard(unsigned char key, int x, int y);
void mouse(int button, int state, int x, int y);
void motion(int x, int y);
void timerEvent(int value);
// Cuda functionality
void runCuda(struct cudaGraphicsResource **vbo_resource);
void runAutoTest(int devID, char **argv, char *ref_file);
void checkResultCuda(int argc, char **argv, const GLuint &vbo);
const char *sSDKsample = "simpleGL (VBO)";
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
pos[y*width+x] = make_float4(u, w, v, 1.0f);
}
void launch_kernel(float4 *pos, unsigned int mesh_width,
unsigned int mesh_height, float time)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
hipLaunchKernelGGL(( simple_vbo_kernel), dim3(grid), dim3(block), 0, 0, pos, mesh_width, mesh_height, time);
}
bool checkHW(char *name, const char *gpuType, int dev)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
strcpy(name, deviceProp.name);
if (!STRNCASECMP(deviceProp.name, gpuType, strlen(gpuType)))
{
return true;
}
else
{
return false;
}
}
int findGraphicsGPU(char *name)
{
int nGraphicsGPU = 0;
int deviceCount = 0;
bool bFoundGraphics = false;
char firstGraphicsName[256], temp[256];
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess)
{
printf("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id));
printf("> FAILED %s sample finished, exiting...\n", sSDKsample);
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("> There are no device(s) supporting CUDA\n");
return false;
}
else
{
printf("> Found %d CUDA Capable Device(s)\n", deviceCount);
}
for (int dev = 0; dev < deviceCount; ++dev)
{
bool bGraphics = !checkHW(temp, (const char *)"Tesla", dev);
printf("> %s\t\tGPU %d: %s\n", (bGraphics ? "Graphics" : "Compute"), dev, temp);
if (bGraphics)
{
if (!bFoundGraphics)
{
strcpy(firstGraphicsName, temp);
}
nGraphicsGPU++;
}
}
if (nGraphicsGPU)
{
strcpy(name, firstGraphicsName);
}
else
{
strcpy(name, "this hardware");
}
return nGraphicsGPU;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
char *ref_file = NULL;
pArgc = &argc;
pArgv = argv;
#if defined(__linux__)
setenv ("DISPLAY", ":0", 0);
#endif
printf("%s starting...\n", sSDKsample);
if (argc > 1)
{
if (checkCmdLineFlag(argc, (const char **)argv, "file"))
{
// In this mode, we are running non-OpenGL and doing a compare of the VBO was generated correctly
getCmdLineArgumentString(argc, (const char **)argv, "file", (char **)&ref_file);
}
}
printf("\n");
runTest(argc, argv, ref_file);
printf("%s completed, returned %s\n", sSDKsample, (g_TotalErrors == 0) ? "OK" : "ERROR!");
exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
void computeFPS()
{
frameCount++;
fpsCount++;
if (fpsCount == fpsLimit)
{
avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f);
fpsCount = 0;
fpsLimit = (int)MAX(avgFPS, 1.f);
sdkResetTimer(&timer);
}
char fps[256];
sprintf(fps, "Cuda GL Interop (VBO): %3.1f fps (Max 100Hz)", avgFPS);
glutSetWindowTitle(fps);
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize GL
////////////////////////////////////////////////////////////////////////////////
bool initGL(int *argc, char **argv)
{
glutInit(argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("Cuda GL Interop (VBO)");
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutMotionFunc(motion);
glutTimerFunc(REFRESH_DELAY, timerEvent,0);
// initialize necessary OpenGL extensions
if (! isGLVersionSupported(2,0))
{
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
return false;
}
// default initialization
glClearColor(0.0, 0.0, 0.0, 1.0);
glDisable(GL_DEPTH_TEST);
// viewport
glViewport(0, 0, window_width, window_height);
// projection
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 10.0);
SDK_CHECK_ERROR_GL();
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
bool runTest(int argc, char **argv, char *ref_file)
{
// Create the CUTIL timer
sdkCreateTimer(&timer);
// command line mode only
if (ref_file != NULL)
{
// This will pick the best possible CUDA capable device
int devID = findCudaDevice(argc, (const char **)argv);
// create VBO
checkCudaErrors(hipMalloc((void **)&d_vbo_buffer, mesh_width*mesh_height*4*sizeof(float)));
// run the cuda part
runAutoTest(devID, argv, ref_file);
// check result of Cuda step
checkResultCuda(argc, argv, vbo);
hipFree(d_vbo_buffer);
d_vbo_buffer = NULL;
}
else
{
// First initialize OpenGL context, so we can properly set the GL for CUDA.
// This is necessary in order to achieve optimal performance with OpenGL/CUDA interop.
if (false == initGL(&argc, argv))
{
return false;
}
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
//if (checkCmdLineFlag(argc, (const char **)argv, "device"))
//{
// if (gpuGLDeviceInit(argc, (const char **)argv) == -1)
// {
// return false;
// }
//}
//else
//{
// hipGLSetGLDevice(gpuGetMaxGflopsDeviceId());
//}
// register callbacks
//glutDisplayFunc(display);
//glutKeyboardFunc(keyboard);
glutMouseFunc(mouse);
//glutMotionFunc(motion);
#if defined (__APPLE__) || defined(MACOSX)
atexit(cleanup);
#else
glutCloseFunc(cleanup);
#endif
// create VBO
createVBO(&vbo, &cuda_vbo_resource, hipGraphicsMapFlagsWriteDiscard);
// run the cuda part
runCuda(&cuda_vbo_resource);
// start rendering mainloop
glutMainLoop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void runCuda(struct cudaGraphicsResource **vbo_resource)
{
// map OpenGL buffer object for writing from CUDA
float4 *dptr;
checkCudaErrors(hipGraphicsMapResources(1, vbo_resource, 0));
size_t num_bytes;
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes,
*vbo_resource));
//printf("CUDA mapped VBO: May access %ld bytes\n", num_bytes);
// execute the kernel
// dim3 block(8, 8, 1);
// dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
// kernel<<< grid, block>>>(dptr, mesh_width, mesh_height, g_fAnim);
launch_kernel(dptr, mesh_width, mesh_height, g_fAnim);
// unmap buffer object
checkCudaErrors(hipGraphicsUnmapResources(1, vbo_resource, 0));
}
#ifdef _WIN32
#ifndef FOPEN
#define FOPEN(fHandle,filename,mode) fopen_s(&fHandle, filename, mode)
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle,filename,mode) (fHandle = fopen(filename, mode))
#endif
#endif
void sdkDumpBin2(void *data, unsigned int bytes, const char *filename)
{
printf("sdkDumpBin: <%s>\n", filename);
FILE *fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void runAutoTest(int devID, char **argv, char *ref_file)
{
char *reference_file = NULL;
void *imageData = malloc(mesh_width*mesh_height*sizeof(float));
// execute the kernel
launch_kernel((float4 *)d_vbo_buffer, mesh_width, mesh_height, g_fAnim);
hipDeviceSynchronize();
getLastCudaError("launch_kernel failed");
checkCudaErrors(hipMemcpy(imageData, d_vbo_buffer, mesh_width*mesh_height*sizeof(float), hipMemcpyDeviceToHost));
sdkDumpBin2(imageData, mesh_width*mesh_height*sizeof(float), "simpleGL.bin");
reference_file = sdkFindFilePath(ref_file, argv[0]);
if (reference_file &&
!sdkCompareBin2BinFloat("simpleGL.bin", reference_file,
mesh_width*mesh_height*sizeof(float),
MAX_EPSILON_ERROR, THRESHOLD, pArgv[0]))
{
g_TotalErrors++;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Create VBO
////////////////////////////////////////////////////////////////////////////////
void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res,
unsigned int vbo_res_flags)
{
assert(vbo);
// create buffer object
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
// initialize buffer object
unsigned int size = mesh_width * mesh_height * 4 * sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// register this buffer object with CUDA
checkCudaErrors(hipGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags));
SDK_CHECK_ERROR_GL();
}
////////////////////////////////////////////////////////////////////////////////
//! Delete VBO
////////////////////////////////////////////////////////////////////////////////
void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res)
{
// unregister this buffer object with CUDA
checkCudaErrors(hipGraphicsUnregisterResource(vbo_res));
glBindBuffer(1, *vbo);
glDeleteBuffers(1, vbo);
*vbo = 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Display callback
////////////////////////////////////////////////////////////////////////////////
void display()
{
sdkStartTimer(&timer);
// run CUDA kernel to generate vertex positions
runCuda(&cuda_vbo_resource);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// set view matrix
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0, 0.0, translate_z);
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_y, 0.0, 1.0, 0.0);
// render from the vbo
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(4, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glColor3f(1.0, 0.0, 0.0);
glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height);
glDisableClientState(GL_VERTEX_ARRAY);
glutSwapBuffers();
g_fAnim += 0.01f;
sdkStopTimer(&timer);
computeFPS();
}
void timerEvent(int value)
{
if (glutGetWindow())
{
glutPostRedisplay();
glutTimerFunc(REFRESH_DELAY, timerEvent,0);
}
}
void cleanup()
{
sdkDeleteTimer(&timer);
if (vbo)
{
deleteVBO(&vbo, cuda_vbo_resource);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Keyboard events handler
////////////////////////////////////////////////////////////////////////////////
void keyboard(unsigned char key, int /*x*/, int /*y*/)
{
switch (key)
{
case (27) :
#if defined(__APPLE__) || defined(MACOSX)
exit(EXIT_SUCCESS);
#else
glutDestroyWindow(glutGetWindow());
return;
#endif
}
}
////////////////////////////////////////////////////////////////////////////////
//! Mouse event handlers
////////////////////////////////////////////////////////////////////////////////
void mouse(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
mouse_buttons |= 1<<button;
}
else if (state == GLUT_UP)
{
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
}
void motion(int x, int y)
{
float dx, dy;
dx = (float)(x - mouse_old_x);
dy = (float)(y - mouse_old_y);
if (mouse_buttons & 1)
{
rotate_x += dy * 0.2f;
rotate_y += dx * 0.2f;
}
else if (mouse_buttons & 4)
{
translate_z += dy * 0.01f;
}
mouse_old_x = x;
mouse_old_y = y;
}
////////////////////////////////////////////////////////////////////////////////
//! Check if the result is correct or write data to file for external
//! regression testing
////////////////////////////////////////////////////////////////////////////////
void checkResultCuda(int argc, char **argv, const GLuint &vbo)
{
if (!d_vbo_buffer)
{
checkCudaErrors(hipGraphicsUnregisterResource(cuda_vbo_resource));
// map buffer object
glBindBuffer(GL_ARRAY_BUFFER, vbo);
float *data = (float *) glMapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY);
// check result
if (checkCmdLineFlag(argc, (const char **) argv, "regression"))
{
// write file for regression test
sdkWriteFile<float>("./data/regression.dat",
data, mesh_width * mesh_height * 3, 0.0, false);
}
// unmap GL buffer object
if (!glUnmapBuffer(GL_ARRAY_BUFFER))
{
fprintf(stderr, "Unmap buffer failed.\n");
fflush(stderr);
}
checkCudaErrors(hipGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo,
hipGraphicsMapFlagsWriteDiscard));
SDK_CHECK_ERROR_GL();
}
}
| 1ca8c919eb22db9f9f5b62948871ebbc8b141e0f.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/*
This example demonstrates how to use the Cuda OpenGL bindings to
dynamically modify a vertex buffer using a Cuda kernel.
The steps are:
1. Create an empty vertex buffer object (VBO)
2. Register the VBO with Cuda
3. Map the VBO for writing from Cuda
4. Run Cuda kernel to modify the vertex positions
5. Unmap the VBO
6. Render the results using OpenGL
Host code
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <helper_gl.h>
#if defined (__APPLE__) || defined(MACOSX)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#include <GLUT/glut.h>
#ifndef glutCloseFunc
#define glutCloseFunc glutWMCloseFunc
#endif
#else
#include <GL/freeglut.h>
#endif
// includes, cuda
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
//#include <helper_cuda_gl.h> // helper functions for CUDA/GL interop
#include <vector_types.h>
#define MAX_EPSILON_ERROR 10.0f
#define THRESHOLD 0.30f
#define REFRESH_DELAY 10 //ms
////////////////////////////////////////////////////////////////////////////////
// constants
const unsigned int window_width = 512;
const unsigned int window_height = 512;
const unsigned int mesh_width = 256;
const unsigned int mesh_height = 256;
// vbo variables
GLuint vbo;
struct cudaGraphicsResource *cuda_vbo_resource;
void *d_vbo_buffer = NULL;
float g_fAnim = 0.0;
// mouse controls
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_y = 0.0;
float translate_z = -3.0;
StopWatchInterface *timer = NULL;
// Auto-Verification Code
int fpsCount = 0; // FPS count for averaging
int fpsLimit = 1; // FPS limit for sampling
int g_Index = 0;
float avgFPS = 0.0f;
unsigned int frameCount = 0;
unsigned int g_TotalErrors = 0;
bool g_bQAReadback = false;
int *pArgc = NULL;
char **pArgv = NULL;
#define MAX(a,b) ((a > b) ? a : b)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
bool runTest(int argc, char **argv, char *ref_file);
void cleanup();
// GL functionality
bool initGL(int *argc, char **argv);
void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res,
unsigned int vbo_res_flags);
void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res);
// rendering callbacks
void display();
void keyboard(unsigned char key, int x, int y);
void mouse(int button, int state, int x, int y);
void motion(int x, int y);
void timerEvent(int value);
// Cuda functionality
void runCuda(struct cudaGraphicsResource **vbo_resource);
void runAutoTest(int devID, char **argv, char *ref_file);
void checkResultCuda(int argc, char **argv, const GLuint &vbo);
const char *sSDKsample = "simpleGL (VBO)";
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
pos[y*width+x] = make_float4(u, w, v, 1.0f);
}
void launch_kernel(float4 *pos, unsigned int mesh_width,
unsigned int mesh_height, float time)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
simple_vbo_kernel<<< grid, block>>>(pos, mesh_width, mesh_height, time);
}
bool checkHW(char *name, const char *gpuType, int dev)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
strcpy(name, deviceProp.name);
if (!STRNCASECMP(deviceProp.name, gpuType, strlen(gpuType)))
{
return true;
}
else
{
return false;
}
}
int findGraphicsGPU(char *name)
{
int nGraphicsGPU = 0;
int deviceCount = 0;
bool bFoundGraphics = false;
char firstGraphicsName[256], temp[256];
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
printf("> FAILED %s sample finished, exiting...\n", sSDKsample);
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("> There are no device(s) supporting CUDA\n");
return false;
}
else
{
printf("> Found %d CUDA Capable Device(s)\n", deviceCount);
}
for (int dev = 0; dev < deviceCount; ++dev)
{
bool bGraphics = !checkHW(temp, (const char *)"Tesla", dev);
printf("> %s\t\tGPU %d: %s\n", (bGraphics ? "Graphics" : "Compute"), dev, temp);
if (bGraphics)
{
if (!bFoundGraphics)
{
strcpy(firstGraphicsName, temp);
}
nGraphicsGPU++;
}
}
if (nGraphicsGPU)
{
strcpy(name, firstGraphicsName);
}
else
{
strcpy(name, "this hardware");
}
return nGraphicsGPU;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
char *ref_file = NULL;
pArgc = &argc;
pArgv = argv;
#if defined(__linux__)
setenv ("DISPLAY", ":0", 0);
#endif
printf("%s starting...\n", sSDKsample);
if (argc > 1)
{
if (checkCmdLineFlag(argc, (const char **)argv, "file"))
{
// In this mode, we are running non-OpenGL and doing a compare of the VBO was generated correctly
getCmdLineArgumentString(argc, (const char **)argv, "file", (char **)&ref_file);
}
}
printf("\n");
runTest(argc, argv, ref_file);
printf("%s completed, returned %s\n", sSDKsample, (g_TotalErrors == 0) ? "OK" : "ERROR!");
exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
void computeFPS()
{
frameCount++;
fpsCount++;
if (fpsCount == fpsLimit)
{
avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f);
fpsCount = 0;
fpsLimit = (int)MAX(avgFPS, 1.f);
sdkResetTimer(&timer);
}
char fps[256];
sprintf(fps, "Cuda GL Interop (VBO): %3.1f fps (Max 100Hz)", avgFPS);
glutSetWindowTitle(fps);
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize GL
////////////////////////////////////////////////////////////////////////////////
bool initGL(int *argc, char **argv)
{
glutInit(argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("Cuda GL Interop (VBO)");
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutMotionFunc(motion);
glutTimerFunc(REFRESH_DELAY, timerEvent,0);
// initialize necessary OpenGL extensions
if (! isGLVersionSupported(2,0))
{
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
return false;
}
// default initialization
glClearColor(0.0, 0.0, 0.0, 1.0);
glDisable(GL_DEPTH_TEST);
// viewport
glViewport(0, 0, window_width, window_height);
// projection
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 10.0);
SDK_CHECK_ERROR_GL();
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
bool runTest(int argc, char **argv, char *ref_file)
{
// Create the CUTIL timer
sdkCreateTimer(&timer);
// command line mode only
if (ref_file != NULL)
{
// This will pick the best possible CUDA capable device
int devID = findCudaDevice(argc, (const char **)argv);
// create VBO
checkCudaErrors(cudaMalloc((void **)&d_vbo_buffer, mesh_width*mesh_height*4*sizeof(float)));
// run the cuda part
runAutoTest(devID, argv, ref_file);
// check result of Cuda step
checkResultCuda(argc, argv, vbo);
cudaFree(d_vbo_buffer);
d_vbo_buffer = NULL;
}
else
{
// First initialize OpenGL context, so we can properly set the GL for CUDA.
// This is necessary in order to achieve optimal performance with OpenGL/CUDA interop.
if (false == initGL(&argc, argv))
{
return false;
}
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
//if (checkCmdLineFlag(argc, (const char **)argv, "device"))
//{
// if (gpuGLDeviceInit(argc, (const char **)argv) == -1)
// {
// return false;
// }
//}
//else
//{
// cudaGLSetGLDevice(gpuGetMaxGflopsDeviceId());
//}
// register callbacks
//glutDisplayFunc(display);
//glutKeyboardFunc(keyboard);
glutMouseFunc(mouse);
//glutMotionFunc(motion);
#if defined (__APPLE__) || defined(MACOSX)
atexit(cleanup);
#else
glutCloseFunc(cleanup);
#endif
// create VBO
createVBO(&vbo, &cuda_vbo_resource, cudaGraphicsMapFlagsWriteDiscard);
// run the cuda part
runCuda(&cuda_vbo_resource);
// start rendering mainloop
glutMainLoop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void runCuda(struct cudaGraphicsResource **vbo_resource)
{
// map OpenGL buffer object for writing from CUDA
float4 *dptr;
checkCudaErrors(cudaGraphicsMapResources(1, vbo_resource, 0));
size_t num_bytes;
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes,
*vbo_resource));
//printf("CUDA mapped VBO: May access %ld bytes\n", num_bytes);
// execute the kernel
// dim3 block(8, 8, 1);
// dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
// kernel<<< grid, block>>>(dptr, mesh_width, mesh_height, g_fAnim);
launch_kernel(dptr, mesh_width, mesh_height, g_fAnim);
// unmap buffer object
checkCudaErrors(cudaGraphicsUnmapResources(1, vbo_resource, 0));
}
#ifdef _WIN32
#ifndef FOPEN
#define FOPEN(fHandle,filename,mode) fopen_s(&fHandle, filename, mode)
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle,filename,mode) (fHandle = fopen(filename, mode))
#endif
#endif
void sdkDumpBin2(void *data, unsigned int bytes, const char *filename)
{
printf("sdkDumpBin: <%s>\n", filename);
FILE *fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void runAutoTest(int devID, char **argv, char *ref_file)
{
char *reference_file = NULL;
void *imageData = malloc(mesh_width*mesh_height*sizeof(float));
// execute the kernel
launch_kernel((float4 *)d_vbo_buffer, mesh_width, mesh_height, g_fAnim);
cudaDeviceSynchronize();
getLastCudaError("launch_kernel failed");
checkCudaErrors(cudaMemcpy(imageData, d_vbo_buffer, mesh_width*mesh_height*sizeof(float), cudaMemcpyDeviceToHost));
sdkDumpBin2(imageData, mesh_width*mesh_height*sizeof(float), "simpleGL.bin");
reference_file = sdkFindFilePath(ref_file, argv[0]);
if (reference_file &&
!sdkCompareBin2BinFloat("simpleGL.bin", reference_file,
mesh_width*mesh_height*sizeof(float),
MAX_EPSILON_ERROR, THRESHOLD, pArgv[0]))
{
g_TotalErrors++;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Create VBO
////////////////////////////////////////////////////////////////////////////////
void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res,
unsigned int vbo_res_flags)
{
assert(vbo);
// create buffer object
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
// initialize buffer object
unsigned int size = mesh_width * mesh_height * 4 * sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// register this buffer object with CUDA
checkCudaErrors(cudaGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags));
SDK_CHECK_ERROR_GL();
}
////////////////////////////////////////////////////////////////////////////////
//! Delete VBO
////////////////////////////////////////////////////////////////////////////////
void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res)
{
// unregister this buffer object with CUDA
checkCudaErrors(cudaGraphicsUnregisterResource(vbo_res));
glBindBuffer(1, *vbo);
glDeleteBuffers(1, vbo);
*vbo = 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Display callback
////////////////////////////////////////////////////////////////////////////////
void display()
{
sdkStartTimer(&timer);
// run CUDA kernel to generate vertex positions
runCuda(&cuda_vbo_resource);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// set view matrix
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0, 0.0, translate_z);
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_y, 0.0, 1.0, 0.0);
// render from the vbo
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(4, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glColor3f(1.0, 0.0, 0.0);
glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height);
glDisableClientState(GL_VERTEX_ARRAY);
glutSwapBuffers();
g_fAnim += 0.01f;
sdkStopTimer(&timer);
computeFPS();
}
void timerEvent(int value)
{
if (glutGetWindow())
{
glutPostRedisplay();
glutTimerFunc(REFRESH_DELAY, timerEvent,0);
}
}
void cleanup()
{
sdkDeleteTimer(&timer);
if (vbo)
{
deleteVBO(&vbo, cuda_vbo_resource);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Keyboard events handler
////////////////////////////////////////////////////////////////////////////////
void keyboard(unsigned char key, int /*x*/, int /*y*/)
{
switch (key)
{
case (27) :
#if defined(__APPLE__) || defined(MACOSX)
exit(EXIT_SUCCESS);
#else
glutDestroyWindow(glutGetWindow());
return;
#endif
}
}
////////////////////////////////////////////////////////////////////////////////
//! Mouse event handlers
////////////////////////////////////////////////////////////////////////////////
void mouse(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
mouse_buttons |= 1<<button;
}
else if (state == GLUT_UP)
{
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
}
void motion(int x, int y)
{
float dx, dy;
dx = (float)(x - mouse_old_x);
dy = (float)(y - mouse_old_y);
if (mouse_buttons & 1)
{
rotate_x += dy * 0.2f;
rotate_y += dx * 0.2f;
}
else if (mouse_buttons & 4)
{
translate_z += dy * 0.01f;
}
mouse_old_x = x;
mouse_old_y = y;
}
////////////////////////////////////////////////////////////////////////////////
//! Check if the result is correct or write data to file for external
//! regression testing
////////////////////////////////////////////////////////////////////////////////
void checkResultCuda(int argc, char **argv, const GLuint &vbo)
{
if (!d_vbo_buffer)
{
checkCudaErrors(cudaGraphicsUnregisterResource(cuda_vbo_resource));
// map buffer object
glBindBuffer(GL_ARRAY_BUFFER, vbo);
float *data = (float *) glMapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY);
// check result
if (checkCmdLineFlag(argc, (const char **) argv, "regression"))
{
// write file for regression test
sdkWriteFile<float>("./data/regression.dat",
data, mesh_width * mesh_height * 3, 0.0, false);
}
// unmap GL buffer object
if (!glUnmapBuffer(GL_ARRAY_BUFFER))
{
fprintf(stderr, "Unmap buffer failed.\n");
fflush(stderr);
}
checkCudaErrors(cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo,
cudaGraphicsMapFlagsWriteDiscard));
SDK_CHECK_ERROR_GL();
}
}
|
d90368d4cdbd02936835fa2645928e0a8898c603.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <tuple>
#include <SFML/Graphics.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/videoio/videoio.hpp"
#include "cud_defs.h"
using namespace cv;
using namespace std;
__device__ void d_apply_bilineal(float* coeff_x, float* coeff_y, int x_i, int y_i, int& x_o, int& y_o){
x_o = coeff_x[0] * x_i + coeff_x[1] * y_i + coeff_x[2] * (x_i * y_i) + coeff_x[3];
y_o = coeff_y[0] * x_i + coeff_y[1] * y_i + coeff_y[2] * (x_i * y_i) + coeff_y[3];
}
__global__ void apply_bilineal_mat(uchar* mat_i, uchar* mat_o, float* d_coeff_x, float* d_coeff_y, int h, int w, int h_o, int w_o){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < h && j < w) {
int i_o, j_o;
d_apply_bilineal(d_coeff_x, d_coeff_y, j, i, j_o, i_o);
mat_o[i_o * w_o * 3 + j_o * 3] = mat_i[i * w * 3 + j * 3];
mat_o[i_o * w_o * 3 + j_o * 3 + 1] = mat_i[i * w * 3 + j * 3 + 1];
mat_o[i_o * w_o * 3 + j_o * 3 + 2] = mat_i[i * w * 3 + j * 3 + 2];
// mat_o[i * w_o * 3 + j * 3] = mat_i[i * w * 3 + j * 3];
// mat_o[i * w_o * 3 + j * 3 + 1] = mat_i[i * w * 3 + j * 3 + 1];
// mat_o[i * w_o * 3 + j * 3 + 2] = mat_i[i * w * 3 + j * 3 + 2];
}
}
void apply_bilineal(float* coeff_x, float* coeff_y, int x_i, int y_i, int& x_o, int& y_o);
void get_mat_bilineal(vector<pair<float, float> > input_points, vector<pair<float, float> > output_points, vector<float>& coeff_x, vector<float>& coeff_y);
int main()
{
sf::RenderWindow window(sf::VideoMode(1200, 900), "RenderWindow");
sf::Image image;
sf::Texture texture;
sf::Event event;
sf::Sprite sprite;
cv::Mat frameRGB, frameRGBA;
sf::Image image2;
sf::Texture texture2;
sf::Sprite sprite2;
cv::Mat res2;
frameRGB = imread("stark.jpeg");
vector<pair<float, float> > input_points = {make_pair(0, 0), make_pair(0, 5), make_pair(5, 0), make_pair(5, 5)};
vector<pair<float, float> > output_points = {make_pair(5, 5), make_pair(10, 10), make_pair(10, 5), make_pair(15, 10)};
// vector<pair<float, float> > input_points = {make_pair(0, 0), make_pair(5, 0), make_pair(0, 5), make_pair(5, 5)};
// vector<pair<float, float> > output_points = {make_pair(5, 5), make_pair(10, 10), make_pair(5, 10), make_pair(10, 15)};
vector<float> coeff_x, coeff_y;
get_mat_bilineal(input_points, output_points, coeff_x, coeff_y);
int x_min, x_max, y_min, y_max;
x_min = 0; x_max = 0; y_min = 0; y_max = 0;
int x_o, y_o;
vector<pair<float, float> > esquinas = {make_pair(0, 0), make_pair(frameRGB.cols - 1, 0), make_pair(0, frameRGB.rows - 1), make_pair(frameRGB.cols - 1, frameRGB.rows - 1)};
for (size_t i = 0; i < esquinas.size(); i++) {
apply_bilineal(coeff_x.data(), coeff_y.data(), esquinas[i].first, esquinas[i].second, x_o, y_o);
if(x_o > x_max)
x_max = x_o;
if(x_o < x_min)
x_min = x_o;
if(y_o > y_max)
y_max = y_o;
if(y_o < y_min)
y_min = y_o;
}
cout<<x_min<<" - "<<x_max<<endl;
cout<<y_min<<" - "<<y_max<<endl;
Mat res = Mat::zeros(y_max - y_min + 1, x_max - x_min + 1, CV_8UC3);
// Mat res = Mat::zeros(frameRGB.rows, frameRGB.cols, CV_8UC3);
cout<<frameRGB.cols<<" - "<<frameRGB.rows<<endl;
cout<<res.cols<<" - "<<res.rows<<endl;
float block_size = 16;
dim3 block = dim3(block_size, block_size , 1);
dim3 grid = dim3(ceil(frameRGB.rows / block_size), ceil(frameRGB.cols / block_size), 1);
uchar* d_mat_i = cuda_array<uchar>(frameRGB.cols * frameRGB.rows * 3);
cuda_H2D<uchar>(frameRGB.data, d_mat_i, frameRGB.cols * frameRGB.rows * 3);
CHECK(hipDeviceSynchronize());
uchar* d_mat_o = cuda_array<uchar>(res.cols * res.rows * 3);
cuda_H2D<uchar>(res.data, d_mat_o, res.cols * res.rows * 3);
CHECK(hipDeviceSynchronize());
float *d_coeff_x, *d_coeff_y;
d_coeff_x = cuda_array<float>(coeff_x.size());
d_coeff_y = cuda_array<float>(coeff_y.size());
cuda_H2D<float>(coeff_x.data(), d_coeff_x, coeff_x.size());
cuda_H2D<float>(coeff_y.data(), d_coeff_y, coeff_y.size());
hipLaunchKernelGGL(( apply_bilineal_mat), dim3(grid), dim3(block), 0, 0, d_mat_i, d_mat_o, d_coeff_x, d_coeff_y, frameRGB.rows, frameRGB.cols, res.rows, res.cols);
CHECK(hipDeviceSynchronize());
cuda_D2H(d_mat_o, res.data, res.cols * res.rows * 3);
CHECK(hipDeviceSynchronize());
// imshow("orig",frameRGB);
// imshow("orig_m",res);
// waitKey(0);
cv::cvtColor(frameRGB, frameRGBA, cv::COLOR_BGR2RGBA);
image.create(frameRGBA.cols, frameRGBA.rows, frameRGBA.ptr());
texture.loadFromImage(image);
sprite.setTexture(texture);
cv::cvtColor(res, res2, cv::COLOR_BGR2RGBA);
image2.create(res2.cols, res2.rows, res2.ptr());
texture2.loadFromImage(image2);
sprite2.setTexture(texture2);
sprite2.setPosition(0,frameRGB.rows);
while (window.isOpen()) {
// cap >> frameRGB;
if(frameRGB.empty()) {
break;
}
while (window.pollEvent(event)) {
if (event.type == sf::Event::Closed)
window.close();
}
window.draw(sprite2);
window.draw(sprite);
window.display();
}
return 0;
}
| d90368d4cdbd02936835fa2645928e0a8898c603.cu | #include <iostream>
#include <vector>
#include <tuple>
#include <SFML/Graphics.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/videoio/videoio.hpp"
#include "cud_defs.h"
using namespace cv;
using namespace std;
__device__ void d_apply_bilineal(float* coeff_x, float* coeff_y, int x_i, int y_i, int& x_o, int& y_o){
x_o = coeff_x[0] * x_i + coeff_x[1] * y_i + coeff_x[2] * (x_i * y_i) + coeff_x[3];
y_o = coeff_y[0] * x_i + coeff_y[1] * y_i + coeff_y[2] * (x_i * y_i) + coeff_y[3];
}
__global__ void apply_bilineal_mat(uchar* mat_i, uchar* mat_o, float* d_coeff_x, float* d_coeff_y, int h, int w, int h_o, int w_o){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < h && j < w) {
int i_o, j_o;
d_apply_bilineal(d_coeff_x, d_coeff_y, j, i, j_o, i_o);
mat_o[i_o * w_o * 3 + j_o * 3] = mat_i[i * w * 3 + j * 3];
mat_o[i_o * w_o * 3 + j_o * 3 + 1] = mat_i[i * w * 3 + j * 3 + 1];
mat_o[i_o * w_o * 3 + j_o * 3 + 2] = mat_i[i * w * 3 + j * 3 + 2];
// mat_o[i * w_o * 3 + j * 3] = mat_i[i * w * 3 + j * 3];
// mat_o[i * w_o * 3 + j * 3 + 1] = mat_i[i * w * 3 + j * 3 + 1];
// mat_o[i * w_o * 3 + j * 3 + 2] = mat_i[i * w * 3 + j * 3 + 2];
}
}
void apply_bilineal(float* coeff_x, float* coeff_y, int x_i, int y_i, int& x_o, int& y_o);
void get_mat_bilineal(vector<pair<float, float> > input_points, vector<pair<float, float> > output_points, vector<float>& coeff_x, vector<float>& coeff_y);
int main()
{
sf::RenderWindow window(sf::VideoMode(1200, 900), "RenderWindow");
sf::Image image;
sf::Texture texture;
sf::Event event;
sf::Sprite sprite;
cv::Mat frameRGB, frameRGBA;
sf::Image image2;
sf::Texture texture2;
sf::Sprite sprite2;
cv::Mat res2;
frameRGB = imread("stark.jpeg");
vector<pair<float, float> > input_points = {make_pair(0, 0), make_pair(0, 5), make_pair(5, 0), make_pair(5, 5)};
vector<pair<float, float> > output_points = {make_pair(5, 5), make_pair(10, 10), make_pair(10, 5), make_pair(15, 10)};
// vector<pair<float, float> > input_points = {make_pair(0, 0), make_pair(5, 0), make_pair(0, 5), make_pair(5, 5)};
// vector<pair<float, float> > output_points = {make_pair(5, 5), make_pair(10, 10), make_pair(5, 10), make_pair(10, 15)};
vector<float> coeff_x, coeff_y;
get_mat_bilineal(input_points, output_points, coeff_x, coeff_y);
int x_min, x_max, y_min, y_max;
x_min = 0; x_max = 0; y_min = 0; y_max = 0;
int x_o, y_o;
vector<pair<float, float> > esquinas = {make_pair(0, 0), make_pair(frameRGB.cols - 1, 0), make_pair(0, frameRGB.rows - 1), make_pair(frameRGB.cols - 1, frameRGB.rows - 1)};
for (size_t i = 0; i < esquinas.size(); i++) {
apply_bilineal(coeff_x.data(), coeff_y.data(), esquinas[i].first, esquinas[i].second, x_o, y_o);
if(x_o > x_max)
x_max = x_o;
if(x_o < x_min)
x_min = x_o;
if(y_o > y_max)
y_max = y_o;
if(y_o < y_min)
y_min = y_o;
}
cout<<x_min<<" - "<<x_max<<endl;
cout<<y_min<<" - "<<y_max<<endl;
Mat res = Mat::zeros(y_max - y_min + 1, x_max - x_min + 1, CV_8UC3);
// Mat res = Mat::zeros(frameRGB.rows, frameRGB.cols, CV_8UC3);
cout<<frameRGB.cols<<" - "<<frameRGB.rows<<endl;
cout<<res.cols<<" - "<<res.rows<<endl;
float block_size = 16;
dim3 block = dim3(block_size, block_size , 1);
dim3 grid = dim3(ceil(frameRGB.rows / block_size), ceil(frameRGB.cols / block_size), 1);
uchar* d_mat_i = cuda_array<uchar>(frameRGB.cols * frameRGB.rows * 3);
cuda_H2D<uchar>(frameRGB.data, d_mat_i, frameRGB.cols * frameRGB.rows * 3);
CHECK(cudaDeviceSynchronize());
uchar* d_mat_o = cuda_array<uchar>(res.cols * res.rows * 3);
cuda_H2D<uchar>(res.data, d_mat_o, res.cols * res.rows * 3);
CHECK(cudaDeviceSynchronize());
float *d_coeff_x, *d_coeff_y;
d_coeff_x = cuda_array<float>(coeff_x.size());
d_coeff_y = cuda_array<float>(coeff_y.size());
cuda_H2D<float>(coeff_x.data(), d_coeff_x, coeff_x.size());
cuda_H2D<float>(coeff_y.data(), d_coeff_y, coeff_y.size());
apply_bilineal_mat<<<grid, block>>>(d_mat_i, d_mat_o, d_coeff_x, d_coeff_y, frameRGB.rows, frameRGB.cols, res.rows, res.cols);
CHECK(cudaDeviceSynchronize());
cuda_D2H(d_mat_o, res.data, res.cols * res.rows * 3);
CHECK(cudaDeviceSynchronize());
// imshow("orig",frameRGB);
// imshow("orig_m",res);
// waitKey(0);
cv::cvtColor(frameRGB, frameRGBA, cv::COLOR_BGR2RGBA);
image.create(frameRGBA.cols, frameRGBA.rows, frameRGBA.ptr());
texture.loadFromImage(image);
sprite.setTexture(texture);
cv::cvtColor(res, res2, cv::COLOR_BGR2RGBA);
image2.create(res2.cols, res2.rows, res2.ptr());
texture2.loadFromImage(image2);
sprite2.setTexture(texture2);
sprite2.setPosition(0,frameRGB.rows);
while (window.isOpen()) {
// cap >> frameRGB;
if(frameRGB.empty()) {
break;
}
while (window.pollEvent(event)) {
if (event.type == sf::Event::Closed)
window.close();
}
window.draw(sprite2);
window.draw(sprite);
window.display();
}
return 0;
}
|
63b70b3ef864063975abfcbb209523dc2815e858.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
#include "stdio.h"
#include "stdlib.h"
#define blockSize 256
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
namespace StreamCompaction {
namespace Efficient {
__device__ int threadIndex() {
return (blockIdx.x * blockDim.x) + threadIdx.x;
}
__global__ void kernUpSweep(int n, int d, int *odata, int *idata) {
int index = threadIndex();
if (index >= n) return;
int addTerm = (index + 1) % (d * 2) == 0 ? idata[index - d] : 0;
odata[index] = idata[index] + addTerm;
}
__global__ void kernDownSweep(int length, int d, int *odata, int *idata) {
int index = threadIndex();
if (index >= length) return;
// On the first iteration, and using only one thread, set the last element to 0.
if ((index + 1) % d == 0) {
int swapIndex = index - (d / 2);
int term = (length == d) && (index == d - 1) ? 0 : idata[index];
odata[index] = term + idata[swapIndex];
odata[swapIndex] = term;
}
}
int bufferToPow2(int n) {
return pow(2, ceil(log2(n))); // n rounded up to the nearest power of 2
}
void dev_scan(int n, int *dev_odata, int *dev_idata) {
int bufferedLength = bufferToPow2(n);
int numBlocks = getNumBlocks(blockSize, n); // enough blocks to allocate one thread to each array element
// upsweep
for (int d = 1; d <= n; d *= 2) {
kernUpSweep << <numBlocks, blockSize >> >(n, d, dev_odata, dev_idata);
// swap dev_idata with dev_odata
int *swap = dev_idata;
dev_idata = dev_odata;
dev_odata = swap;
}
// downsweep
for (int d = bufferedLength; d >= 1; d /= 2) {
kernDownSweep << <numBlocks, blockSize >> >(bufferedLength, d, dev_odata, dev_idata);
// swap dev_idata with dev_odata
int *swap = dev_idata;
dev_idata = dev_odata;
dev_odata = swap;
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// declare arrays
int* dev_idata;
int* dev_odata;
int bufferedLength = bufferToPow2(n);
// allocate memory
hipMalloc((void**)&dev_idata, bufferedLength * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_idata failed!");
hipMalloc((void**)&dev_odata, bufferedLength * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata failed!");
// copy memory and run the algorithm
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
dev_scan(n, dev_odata, dev_idata);
hipMemcpy(odata, dev_idata, n* sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_idata);
hipFree(dev_odata);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
// declare arrays
int* dev_idata;
int* dev_odata;
int* dev_bools;
int* dev_pingPong;
int* dev_indices;
int* bools = (int*)calloc(n, sizeof(int));
int* indices = (int*)calloc(n, sizeof(int));
int* pingPong = (int*)calloc(n, sizeof(int));
//hipEvent_t start, stop;
//hipEventCreate(&start);
//hipEventCreate(&stop);
//hipEventRecord(start);
//saxpy << <(N + 255) / 256, 256 >> >(N, 2.0f, d_x, d_y);
//hipEventRecord(stop);
//hipEventSynchronize(stop);6
//float milliseconds = 0;
//hipEventElapsedTime(&milliseconds, start, stop);
// allocate memory
hipMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_idata failed!");
hipMalloc((void**)&dev_bools, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_bools failed!");
hipMalloc((void**)&dev_pingPong, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_pingPong failed!");
hipMalloc((void**)&dev_indices, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_indices failed!");
hipMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata failed!");
// copy input data to device
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
////////////
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
////////////////
// enough blocks to allocate one thread to each array element
int numBlocks = (n / blockSize) + 1;
// get array of booleans determining whether
Common::kernMapToBoolean << <numBlocks, blockSize >> > (n, dev_bools, dev_idata);
hipMemcpy(dev_pingPong, dev_bools, n * sizeof(int), hipMemcpyDeviceToDevice);
// allocate memory and run scan
dev_scan(n, dev_indices, dev_pingPong);
Common::kernScatter << <numBlocks, blockSize >> > (n, dev_odata, dev_idata, dev_bools, dev_indices);
///////////
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("%f\n", milliseconds);
hipEventDestroy(start);
hipEventDestroy(stop);
/////////
// copy from device
hipMemcpy(indices, dev_indices, n * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(bools, dev_bools, n * sizeof(int), hipMemcpyDeviceToHost);
int newLength = indices[n - 1] + bools[n - 1]; // return value
hipMemcpy(odata, dev_odata, newLength * sizeof(int), hipMemcpyDeviceToHost);
// free memory
hipFree(dev_idata);
hipFree(dev_odata);
hipFree(dev_bools);
hipFree(dev_indices);
free(indices);
free(bools);
free(pingPong);
return newLength;
}
}
}
| 63b70b3ef864063975abfcbb209523dc2815e858.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
#include "stdio.h"
#include "stdlib.h"
#define blockSize 256
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
namespace StreamCompaction {
namespace Efficient {
__device__ int threadIndex() {
return (blockIdx.x * blockDim.x) + threadIdx.x;
}
__global__ void kernUpSweep(int n, int d, int *odata, int *idata) {
int index = threadIndex();
if (index >= n) return;
int addTerm = (index + 1) % (d * 2) == 0 ? idata[index - d] : 0;
odata[index] = idata[index] + addTerm;
}
__global__ void kernDownSweep(int length, int d, int *odata, int *idata) {
int index = threadIndex();
if (index >= length) return;
// On the first iteration, and using only one thread, set the last element to 0.
if ((index + 1) % d == 0) {
int swapIndex = index - (d / 2);
int term = (length == d) && (index == d - 1) ? 0 : idata[index];
odata[index] = term + idata[swapIndex];
odata[swapIndex] = term;
}
}
int bufferToPow2(int n) {
return pow(2, ceil(log2(n))); // n rounded up to the nearest power of 2
}
void dev_scan(int n, int *dev_odata, int *dev_idata) {
int bufferedLength = bufferToPow2(n);
int numBlocks = getNumBlocks(blockSize, n); // enough blocks to allocate one thread to each array element
// upsweep
for (int d = 1; d <= n; d *= 2) {
kernUpSweep << <numBlocks, blockSize >> >(n, d, dev_odata, dev_idata);
// swap dev_idata with dev_odata
int *swap = dev_idata;
dev_idata = dev_odata;
dev_odata = swap;
}
// downsweep
for (int d = bufferedLength; d >= 1; d /= 2) {
kernDownSweep << <numBlocks, blockSize >> >(bufferedLength, d, dev_odata, dev_idata);
// swap dev_idata with dev_odata
int *swap = dev_idata;
dev_idata = dev_odata;
dev_odata = swap;
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// declare arrays
int* dev_idata;
int* dev_odata;
int bufferedLength = bufferToPow2(n);
// allocate memory
cudaMalloc((void**)&dev_idata, bufferedLength * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_idata failed!");
cudaMalloc((void**)&dev_odata, bufferedLength * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata failed!");
// copy memory and run the algorithm
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
dev_scan(n, dev_odata, dev_idata);
cudaMemcpy(odata, dev_idata, n* sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_idata);
cudaFree(dev_odata);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
// declare arrays
int* dev_idata;
int* dev_odata;
int* dev_bools;
int* dev_pingPong;
int* dev_indices;
int* bools = (int*)calloc(n, sizeof(int));
int* indices = (int*)calloc(n, sizeof(int));
int* pingPong = (int*)calloc(n, sizeof(int));
//cudaEvent_t start, stop;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
//cudaEventRecord(start);
//saxpy << <(N + 255) / 256, 256 >> >(N, 2.0f, d_x, d_y);
//cudaEventRecord(stop);
//cudaEventSynchronize(stop);6
//float milliseconds = 0;
//cudaEventElapsedTime(&milliseconds, start, stop);
// allocate memory
cudaMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_idata failed!");
cudaMalloc((void**)&dev_bools, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_bools failed!");
cudaMalloc((void**)&dev_pingPong, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_pingPong failed!");
cudaMalloc((void**)&dev_indices, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_indices failed!");
cudaMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata failed!");
// copy input data to device
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
////////////
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
////////////////
// enough blocks to allocate one thread to each array element
int numBlocks = (n / blockSize) + 1;
// get array of booleans determining whether
Common::kernMapToBoolean << <numBlocks, blockSize >> > (n, dev_bools, dev_idata);
cudaMemcpy(dev_pingPong, dev_bools, n * sizeof(int), cudaMemcpyDeviceToDevice);
// allocate memory and run scan
dev_scan(n, dev_indices, dev_pingPong);
Common::kernScatter << <numBlocks, blockSize >> > (n, dev_odata, dev_idata, dev_bools, dev_indices);
///////////
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f\n", milliseconds);
cudaEventDestroy(start);
cudaEventDestroy(stop);
/////////
// copy from device
cudaMemcpy(indices, dev_indices, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(bools, dev_bools, n * sizeof(int), cudaMemcpyDeviceToHost);
int newLength = indices[n - 1] + bools[n - 1]; // return value
cudaMemcpy(odata, dev_odata, newLength * sizeof(int), cudaMemcpyDeviceToHost);
// free memory
cudaFree(dev_idata);
cudaFree(dev_odata);
cudaFree(dev_bools);
cudaFree(dev_indices);
free(indices);
free(bools);
free(pingPong);
return newLength;
}
}
}
|
f9c0aa8a74c80e320eb6d97c2e2eb7eb6d54a121.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* sudokuSolver.cu
*
* Command used for accessing compute node:
* qrsh -l gpus=1 -l gpu_type=M2070
*
* Command for compiling:
* nvcc sudokuSolver.cu -o sudokuSolver
*
*
* Author: Maxwell Gates
*
* A parallelized backtracking sudoku solver. Solves sodoku puzzles
* using the backtracking method for the CPU and parallelization
* techniques using GPU then compares the two. Accepts input with each
* new puzzle separated by newline.
*
* Puzzles:
* 0-199 EASY (45 Given Numbers)
* 200-499 MEDIUM (30 Given Numbers)
* 500-999 HARD (25 Given Numbers)
*
*/
#include <cstdio>
#include <cstdlib>
#include <unistd.h>
#include <time.h>
#include <math.h>
// CPU & GPU Directives
#define NUM_PUZZLES 1 // Number of h_puzzles to solve
#define PUZZLE_SIZE 81
#define OPTIONS 2 // CPU & GPU
#include "cpu_functions.cuh"
#include "cuda_functions.cuh"
int main(int argc, char **argv)
{
// CPU Timing Variables
struct timespec diff(struct timespec start, struct timespec end);
struct timespec time1, time2;
struct timespec time_stamp[OPTIONS][NUM_PUZZLES + 1];
int clock_gettime(clockid_t clk_id, struct timespec * tp);
// GPU Timing variables
hipEvent_t start, stop;
float elapsed_gpu;
int OPTION;
// Host variables...I will be opening file and reading on host, then transfering to global memory on GPU
int h_puzzle[9][9];
char temp_ch;
char *fileName;
FILE *sudokuFile;
int i, j, num;
int cpu_success[NUM_PUZZLES];
// bool *h_bitmap;
// GPU Variables
int *d_puzzle;
bool *bitmap;
bool *empties;
fileName = "test.txt"; // Where the test h_puzzles will be.
sudokuFile = fopen(fileName, "r");
if (sudokuFile == NULL)
{
printf("Couldn't open test file for reading!");
return 1;
}
OPTION = 0;
for (num = 0; num < NUM_PUZZLES; num++)
{
// Select GPU
CUDA_SAFE_CALL(hipSetDevice(0));
// Allocate GPU memory
size_t allocSize_int = PUZZLE_SIZE * sizeof(int); // NUM_PUZZLES * PUZZLE_SIZE * sizeof(int)
size_t allocSize_bool = 9 * PUZZLE_SIZE * sizeof(bool);
CUDA_SAFE_CALL(hipMalloc((void **)&d_puzzle, allocSize_int));
CUDA_SAFE_CALL(hipMalloc((void **)&bitmap, allocSize_bool));
CUDA_SAFE_CALL(hipMalloc((void **)&empties, allocSize_bool));
for (i = 0; i < 9; i++)
{
for (j = 0; j < 9; j++)
{
if ((j == 8) && (i == 8))
fscanf(sudokuFile, "%c\n", &temp_ch);
else
fscanf(sudokuFile, "%c", &temp_ch);
h_puzzle[i][j] = atoi(&temp_ch);
}
}
// Transfer the puzzle to the GPU memory
CUDA_SAFE_CALL(hipMemcpy(d_puzzle, h_puzzle, allocSize, hipMemcpyHostToDevice));
printf("\nSolving Puzzle #%d (CPU)\n", num);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1);
cpu_success[num] = cpu_solveSudoku(h_puzzle);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time2);
time_stamp[OPTION][num] = diff(time1, time2);
OPTION++;
// Call CUDA Kernel
printf("Solving Puzzle #%d (GPU)\n\n", num);
// Set up thread/block hierarchy
dim3 threadsPerBlock(9, 9); // 9x9 Puzzle
int numBlocks = 9;
// Create the cuda events
hipEventCreate(&start);
hipEventCreate(&stop);
// Record event on the default stream
hipEventRecord(start, 0);
hipLaunchKernelGGL(( gpu_solveSudoku), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_puzzle, bitmap, empties);
hipEventRecord(stop, 0);
// Check for errors during launch
CUDA_SAFE_CALL(hipPeekAtLastError());
// Transfer the results back to the host
CUDA_SAFE_CALL(hipMemcpy(h_puzzle, d_puzzle, allocSize, hipMemcpyDeviceToHost));
hipFree(d_puzzle);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (nsec)\n", 1000000 * elapsed_gpu);
//time_stamp[OPTION][num] = (struct timespec)(1000000*elapsed_gpu);
hipEventDestroy(start);
hipEventDestroy(stop);
}
// Close file and open again
/* Output times */
printf("\n\nPuzzle #, CPU, GPU\n");
for (i = 0; i < NUM_PUZZLES; i++)
{
printf("\nPuzzle #%d, ", i);
for (j = 0; j < OPTIONS; j++)
{
if (j != 0)
printf(", ");
printf("%ld", (long int)((double)(GIG * time_stamp[j][i].tv_sec + time_stamp[j][i].tv_nsec)));
}
}
// Checks to make sure the h_puzzles were solved correctly
for (i = 0; i < NUM_PUZZLES; i++)
{
if (cpu_success[i] != 1)
{
printf("\nError in solving h_puzzle (CPU): %d", i);
}
// GPU Case
}
printf("\n");
return 0;
}
| f9c0aa8a74c80e320eb6d97c2e2eb7eb6d54a121.cu | /*
* sudokuSolver.cu
*
* Command used for accessing compute node:
* qrsh -l gpus=1 -l gpu_type=M2070
*
* Command for compiling:
* nvcc sudokuSolver.cu -o sudokuSolver
*
*
* Author: Maxwell Gates
*
* A parallelized backtracking sudoku solver. Solves sodoku puzzles
* using the backtracking method for the CPU and parallelization
* techniques using GPU then compares the two. Accepts input with each
* new puzzle separated by newline.
*
* Puzzles:
* 0-199 EASY (45 Given Numbers)
* 200-499 MEDIUM (30 Given Numbers)
* 500-999 HARD (25 Given Numbers)
*
*/
#include <cstdio>
#include <cstdlib>
#include <unistd.h>
#include <time.h>
#include <math.h>
// CPU & GPU Directives
#define NUM_PUZZLES 1 // Number of h_puzzles to solve
#define PUZZLE_SIZE 81
#define OPTIONS 2 // CPU & GPU
#include "cpu_functions.cuh"
#include "cuda_functions.cuh"
int main(int argc, char **argv)
{
// CPU Timing Variables
struct timespec diff(struct timespec start, struct timespec end);
struct timespec time1, time2;
struct timespec time_stamp[OPTIONS][NUM_PUZZLES + 1];
int clock_gettime(clockid_t clk_id, struct timespec * tp);
// GPU Timing variables
cudaEvent_t start, stop;
float elapsed_gpu;
int OPTION;
// Host variables...I will be opening file and reading on host, then transfering to global memory on GPU
int h_puzzle[9][9];
char temp_ch;
char *fileName;
FILE *sudokuFile;
int i, j, num;
int cpu_success[NUM_PUZZLES];
// bool *h_bitmap;
// GPU Variables
int *d_puzzle;
bool *bitmap;
bool *empties;
fileName = "test.txt"; // Where the test h_puzzles will be.
sudokuFile = fopen(fileName, "r");
if (sudokuFile == NULL)
{
printf("Couldn't open test file for reading!");
return 1;
}
OPTION = 0;
for (num = 0; num < NUM_PUZZLES; num++)
{
// Select GPU
CUDA_SAFE_CALL(cudaSetDevice(0));
// Allocate GPU memory
size_t allocSize_int = PUZZLE_SIZE * sizeof(int); // NUM_PUZZLES * PUZZLE_SIZE * sizeof(int)
size_t allocSize_bool = 9 * PUZZLE_SIZE * sizeof(bool);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_puzzle, allocSize_int));
CUDA_SAFE_CALL(cudaMalloc((void **)&bitmap, allocSize_bool));
CUDA_SAFE_CALL(cudaMalloc((void **)&empties, allocSize_bool));
for (i = 0; i < 9; i++)
{
for (j = 0; j < 9; j++)
{
if ((j == 8) && (i == 8))
fscanf(sudokuFile, "%c\n", &temp_ch);
else
fscanf(sudokuFile, "%c", &temp_ch);
h_puzzle[i][j] = atoi(&temp_ch);
}
}
// Transfer the puzzle to the GPU memory
CUDA_SAFE_CALL(cudaMemcpy(d_puzzle, h_puzzle, allocSize, cudaMemcpyHostToDevice));
printf("\nSolving Puzzle #%d (CPU)\n", num);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1);
cpu_success[num] = cpu_solveSudoku(h_puzzle);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time2);
time_stamp[OPTION][num] = diff(time1, time2);
OPTION++;
// Call CUDA Kernel
printf("Solving Puzzle #%d (GPU)\n\n", num);
// Set up thread/block hierarchy
dim3 threadsPerBlock(9, 9); // 9x9 Puzzle
int numBlocks = 9;
// Create the cuda events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Record event on the default stream
cudaEventRecord(start, 0);
gpu_solveSudoku<<<numBlocks, threadsPerBlock>>>(d_puzzle, bitmap, empties);
cudaEventRecord(stop, 0);
// Check for errors during launch
CUDA_SAFE_CALL(cudaPeekAtLastError());
// Transfer the results back to the host
CUDA_SAFE_CALL(cudaMemcpy(h_puzzle, d_puzzle, allocSize, cudaMemcpyDeviceToHost));
cudaFree(d_puzzle);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (nsec)\n", 1000000 * elapsed_gpu);
//time_stamp[OPTION][num] = (struct timespec)(1000000*elapsed_gpu);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
// Close file and open again
/* Output times */
printf("\n\nPuzzle #, CPU, GPU\n");
for (i = 0; i < NUM_PUZZLES; i++)
{
printf("\nPuzzle #%d, ", i);
for (j = 0; j < OPTIONS; j++)
{
if (j != 0)
printf(", ");
printf("%ld", (long int)((double)(GIG * time_stamp[j][i].tv_sec + time_stamp[j][i].tv_nsec)));
}
}
// Checks to make sure the h_puzzles were solved correctly
for (i = 0; i < NUM_PUZZLES; i++)
{
if (cpu_success[i] != 1)
{
printf("\nError in solving h_puzzle (CPU): %d", i);
}
// GPU Case
}
printf("\n");
return 0;
}
|
79f753877bb7ba0a09b46b58aad4c41eebf14341.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by Tuowen Zhao on 2/17/19.
// Experiments using unified memory (through ATS)
//
#include <brick-cuda.h>
#include <brick-mpi.h>
#include <brick.h>
#include <bricksetup.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <mpi.h>
#include "bitset.h"
#include "stencils/cudaarray.h"
#include "stencils/cudavfold.h"
#include "stencils/fake.h"
#include "stencils/stencils.h"
#include <brickcompare.h>
#include <multiarray.h>
#include "args.h"
#include <array-mpi.h>
#include <unistd.h>
typedef Brick<Dim<BDIM>, Dim<VFOLD>> Brick3D;
__global__ void arr_kernel(bElem *in_ptr, bElem *out_ptr, unsigned *stride) {
long k = PADDING + blockIdx.z * TILE + threadIdx.z;
long j = PADDING + blockIdx.y * TILE + threadIdx.y;
long i = PADDING + blockIdx.x * TILE + threadIdx.x;
long pos = i + j * stride[1] + k * stride[2];
ST_GPU;
}
__global__ void brick_kernel(unsigned *grid, Brick3D in, Brick3D out, unsigned *stride) {
unsigned bk = blockIdx.z;
unsigned bj = blockIdx.y;
unsigned bi = blockIdx.x;
unsigned b = grid[bi + (bj + bk * stride[1]) * stride[0]];
brick(ST_SCRTPT, VSVEC, (BDIM), (VFOLD), b);
}
int main(int argc, char **argv) {
MPI_ITER = 100;
int provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_SERIALIZED, &provided);
if (provided != MPI_THREAD_SERIALIZED) {
MPI_Finalize();
return 1;
}
MPI_Comm cart = parseArgs(argc, argv, "cuda-mmap");
if (cart != MPI_COMM_NULL) {
int rank;
MPI_Comm_rank(cart, &rank);
MEMFD::setup_prefix("mpi-main", rank);
int prd[3] = {1, 1, 1};
int coo[3];
MPI_Cart_get(cart, 3, (int *)dim_size.data(), prd, coo);
std::vector<long> stride(3), strideb(3), strideg(3);
for (int i = 0; i < 3; ++i) {
stride[i] = dom_size[i] + 2 * TILE + 2 * GZ;
strideg[i] = dom_size[i] + 2 * TILE;
strideb[i] = strideg[i] / TILE;
}
bElem *in_ptr = randomArray(stride);
hipDevice_t device = 0;
hipCtx_t pctx;
gpuCheck((hipError_t)hipSetDevice(device));
gpuCheck((hipError_t)hipCtxCreate(&pctx, HIP_CTX_SCHED_AUTO | HIP_CTX_MAP_HOST, device));
BrickDecomp<3, BDIM> bDecomp(dom_size, GZ);
bDecomp.comm = cart;
populate(cart, bDecomp, 0, 1, coo);
auto bSize = cal_size<BDIM>::value;
bDecomp.initialize(skin3d_good);
BrickInfo<3> bInfo = bDecomp.getBrickInfo();
auto bStorage = bInfo.mmap_alloc(bSize);
auto bStorageInt0 = bInfo.allocate(bSize);
auto bStorageInt1 = bInfo.allocate(bSize);
auto grid_ptr = (unsigned *)malloc(sizeof(unsigned) * strideb[2] * strideb[1] * strideb[0]);
auto grid = (unsigned(*)[strideb[1]][strideb[0]])grid_ptr;
for (long k = 0; k < strideb[2]; ++k)
for (long j = 0; j < strideb[1]; ++j)
for (long i = 0; i < strideb[0]; ++i)
grid[k][j][i] = bDecomp[k][j][i];
for (long k = 1; k < strideb[2] - 1; ++k)
for (long j = 1; j < strideb[1] - 1; ++j)
for (long i = 1; i < strideb[0] - 1; ++i) {
auto l = grid[k][j][i];
for (long id = 0; id < 27; ++id)
if (bInfo.adj[bInfo.adj[l][id]][26 - id] != l)
throw std::runtime_error("err");
}
Brick3D bIn(&bInfo, bStorage, 0);
copyToBrick<3>(strideg, {PADDING, PADDING, PADDING}, {0, 0, 0}, in_ptr, grid_ptr, bIn);
bElem *out_ptr = zeroArray(stride);
unsigned *arr_stride_dev = nullptr;
{
unsigned arr_stride_tmp[3];
unsigned s = 1;
for (int i = 0; i < 3; ++i) {
arr_stride_tmp[i] = s;
s *= stride[i];
}
copyToDevice({3}, arr_stride_dev, arr_stride_tmp);
}
bElem *in_ptr_dev = nullptr;
bElem *out_ptr_dev = nullptr;
copyToDevice(stride, in_ptr_dev, in_ptr);
copyToDevice(stride, out_ptr_dev, out_ptr);
size_t tsize = 0;
for (int i = 0; i < bDecomp.ghost.size(); ++i)
tsize += bDecomp.ghost[i].len * bStorage.step * sizeof(bElem) * 2;
std::unordered_map<uint64_t, MPI_Datatype> stypemap;
std::unordered_map<uint64_t, MPI_Datatype> rtypemap;
exchangeArrPrepareTypes<3>(stypemap, rtypemap, {dom_size[0], dom_size[1], dom_size[2]},
{PADDING, PADDING, PADDING}, {GZ, GZ, GZ});
{
long arr_size = stride[0] * stride[1] * stride[2] * sizeof(bElem);
gpuCheck(hipMemAdvise(in_ptr, arr_size, hipMemAdviseSetPreferredLocation, hipCpuDeviceId));
hipMemPrefetchAsync(in_ptr, arr_size, device);
gpuCheck(
hipMemAdvise(out_ptr, arr_size, hipMemAdviseSetPreferredLocation, hipCpuDeviceId));
hipMemPrefetchAsync(out_ptr, arr_size, device);
}
auto arr_func = [&]() -> void {
float elapsed;
hipEvent_t c_0, c_1;
hipEventCreate(&c_0);
hipEventCreate(&c_1);
#ifdef USE_TYPES
exchangeArrTypes<3>(in_ptr, cart, bDecomp.rank_map, stypemap, rtypemap);
#else
exchangeArr<3>(in_ptr, cart, bDecomp.rank_map, {dom_size[0], dom_size[1], dom_size[2]},
{PADDING, PADDING, PADDING}, {GZ, GZ, GZ});
#endif
hipEventRecord(c_0);
dim3 block(strideb[0], strideb[1], strideb[2]), thread(TILE, TILE, TILE);
hipLaunchKernelGGL(( arr_kernel), dim3(block), dim3(thread), 0, 0, in_ptr, out_ptr_dev, arr_stride_dev);
for (int i = 0; i < ST_ITER / 2 - 1; ++i) {
hipLaunchKernelGGL(( arr_kernel), dim3(block), dim3(thread), 0, 0, out_ptr_dev, in_ptr_dev, arr_stride_dev);
hipLaunchKernelGGL(( arr_kernel), dim3(block), dim3(thread), 0, 0, in_ptr_dev, out_ptr_dev, arr_stride_dev);
}
hipLaunchKernelGGL(( arr_kernel), dim3(block), dim3(thread), 0, 0, out_ptr_dev, in_ptr, arr_stride_dev);
hipEventRecord(c_1);
hipEventSynchronize(c_1);
hipEventElapsedTime(&elapsed, c_0, c_1);
calctime += elapsed / 1000.0;
};
if (rank == 0)
std::cout << "d3pt7 MPI decomp" << std::endl;
int cnt;
double total;
total = time_mpi(arr_func, cnt, bDecomp);
cnt *= ST_ITER;
{
mpi_stats calc_s = mpi_statistics(calctime / cnt, MPI_COMM_WORLD);
mpi_stats call_s = mpi_statistics(calltime / cnt, MPI_COMM_WORLD);
mpi_stats wait_s = mpi_statistics(waittime / cnt, MPI_COMM_WORLD);
mpi_stats mspd_s =
mpi_statistics(tsize / 1.0e9 / (calltime + waittime) * cnt, MPI_COMM_WORLD);
mpi_stats move_s = mpi_statistics(movetime / cnt, MPI_COMM_WORLD);
mpi_stats pack_s = mpi_statistics(packtime / cnt, MPI_COMM_WORLD);
mpi_stats size_s = mpi_statistics((double)tsize * 1.0e-6, MPI_COMM_WORLD);
if (rank == 0) {
total = calc_s.avg + call_s.avg + wait_s.avg + move_s.avg + pack_s.avg;
std::cout << "Arr: " << total << std::endl;
std::cout << "calc " << calc_s << std::endl;
std::cout << "pack " << pack_s << std::endl;
std::cout << "move " << move_s << std::endl;
std::cout << "call " << call_s << std::endl;
std::cout << "wait " << wait_s << std::endl;
std::cout << " | MPI size (MB): " << size_s << std::endl;
std::cout << " | MPI speed (GB/s): " << mspd_s << std::endl;
double perf = (double)tot_elems * 1.0e-9;
perf = perf / total;
std::cout << "perf " << perf << " GStencil/s" << std::endl;
std::cout << std::endl;
}
}
// setup brick on device
BrickInfo<3> *bInfo_dev;
auto _bInfo_dev = movBrickInfo(bInfo, hipMemcpyHostToDevice);
{
unsigned size = sizeof(BrickInfo<3>);
hipMalloc(&bInfo_dev, size);
hipMemcpy(bInfo_dev, &_bInfo_dev, size, hipMemcpyHostToDevice);
}
BrickStorage bStorageInt0_dev = movBrickStorage(bStorageInt0, hipMemcpyHostToDevice);
BrickStorage bStorageInt1_dev = movBrickStorage(bStorageInt1, hipMemcpyHostToDevice);
Brick3D bIn_dev(bInfo_dev, bStorage, 0);
Brick3D bInt0_dev(bInfo_dev, bStorageInt0_dev, 0);
Brick3D bInt1_dev(bInfo_dev, bStorageInt1_dev, 0);
unsigned *grid_dev_ptr = nullptr;
copyToDevice(strideb, grid_dev_ptr, grid_ptr);
unsigned *grid_stride_dev = nullptr;
{
unsigned grid_stride_tmp[3];
for (int i = 0; i < 3; ++i)
grid_stride_tmp[i] = strideb[i];
copyToDevice({3}, grid_stride_dev, grid_stride_tmp);
}
#ifndef DECOMP_PAGEUNALIGN
ExchangeView ev = bDecomp.exchangeView(bStorage);
#endif
gpuCheck(hipMemAdvise(bStorage.dat.get(), bStorage.step * bDecomp.sep_pos[2] * sizeof(bElem),
hipMemAdviseSetPreferredLocation, device));
hipMemPrefetchAsync(bStorage.dat.get(), bStorage.step * bDecomp.sep_pos[2] * sizeof(bElem),
device);
hipMemPrefetchAsync(grid_ptr, STRIDEB * STRIDEB * STRIDEB * sizeof(unsigned), device);
auto brick_func = [&]() -> void {
float elapsed;
hipEvent_t c_0, c_1;
hipEventCreate(&c_0);
hipEventCreate(&c_1);
#ifdef DECOMP_PAGEUNALIGN
bDecomp.exchange(bStorage);
#else
ev.exchange();
#endif
dim3 block(strideb[0], strideb[1], strideb[2]), thread(32);
hipEventRecord(c_0);
hipLaunchKernelGGL(( brick_kernel), dim3(block), dim3(thread), 0, 0, grid_dev_ptr, bIn_dev, bInt0_dev, grid_stride_dev);
for (int i = 0; i < ST_ITER / 2 - 1; ++i) {
hipLaunchKernelGGL(( brick_kernel), dim3(block), dim3(thread), 0, 0, grid_dev_ptr, bInt0_dev, bInt1_dev, grid_stride_dev);
hipLaunchKernelGGL(( brick_kernel), dim3(block), dim3(thread), 0, 0, grid_dev_ptr, bInt1_dev, bInt0_dev, grid_stride_dev);
}
hipLaunchKernelGGL(( brick_kernel), dim3(block), dim3(thread), 0, 0, grid_dev_ptr, bInt0_dev, bIn_dev, grid_stride_dev);
hipEventRecord(c_1);
hipEventSynchronize(c_1);
hipEventElapsedTime(&elapsed, c_0, c_1);
calctime += elapsed / 1000.0;
};
total = time_mpi(brick_func, cnt, bDecomp);
cnt *= ST_ITER;
{
mpi_stats calc_s = mpi_statistics(calctime / cnt, MPI_COMM_WORLD);
mpi_stats call_s = mpi_statistics(calltime / cnt, MPI_COMM_WORLD);
mpi_stats wait_s = mpi_statistics(waittime / cnt, MPI_COMM_WORLD);
mpi_stats mspd_s =
mpi_statistics(tsize / 1.0e9 / (calltime + waittime) * cnt, MPI_COMM_WORLD);
mpi_stats size_s = mpi_statistics((double)tsize * 1.0e-6, MPI_COMM_WORLD);
#ifndef DECOMP_PAGEUNALIGN
size_t opt_size = 0;
for (auto s : ev.seclen)
opt_size += s * 2;
mpi_stats opt_size_s = mpi_statistics((double)opt_size * 1.0e-6, MPI_COMM_WORLD);
#endif
mpi_stats move_s = mpi_statistics(movetime / cnt, MPI_COMM_WORLD);
if (rank == 0) {
total = calc_s.avg + call_s.avg + wait_s.avg + move_s.avg;
std::cout << "Bri: " << total << std::endl;
std::cout << "calc " << calc_s << std::endl;
std::cout << "move " << move_s << std::endl;
std::cout << "call " << call_s << std::endl;
std::cout << "wait " << wait_s << std::endl;
std::cout << " | MPI size (MB): " << size_s << std::endl;
#ifndef DECOMP_PAGEUNALIGN
std::cout << " | Opt MPI size (MB): " << opt_size_s << std::endl;
#endif
std::cout << " | MPI speed (GB/s): " << mspd_s << std::endl;
double perf = (double)tot_elems * 1.0e-9;
perf = perf / total;
std::cout << "perf " << perf << " GStencil/s" << std::endl;
}
}
if (!compareBrick<3>({dom_size[0], dom_size[1], dom_size[2]}, {PADDING, PADDING, PADDING},
{GZ, GZ, GZ}, in_ptr, grid_ptr, bIn))
std::cout << "result mismatch!" << std::endl;
free(bInfo.adj);
free(out_ptr);
free(in_ptr);
((MEMFD *)bStorage.mmap_info)->cleanup();
}
MPI_Finalize();
return 0;
}
| 79f753877bb7ba0a09b46b58aad4c41eebf14341.cu | //
// Created by Tuowen Zhao on 2/17/19.
// Experiments using unified memory (through ATS)
//
#include <brick-cuda.h>
#include <brick-mpi.h>
#include <brick.h>
#include <bricksetup.h>
#include <cuda.h>
#include <iostream>
#include <mpi.h>
#include "bitset.h"
#include "stencils/cudaarray.h"
#include "stencils/cudavfold.h"
#include "stencils/fake.h"
#include "stencils/stencils.h"
#include <brickcompare.h>
#include <multiarray.h>
#include "args.h"
#include <array-mpi.h>
#include <unistd.h>
typedef Brick<Dim<BDIM>, Dim<VFOLD>> Brick3D;
__global__ void arr_kernel(bElem *in_ptr, bElem *out_ptr, unsigned *stride) {
long k = PADDING + blockIdx.z * TILE + threadIdx.z;
long j = PADDING + blockIdx.y * TILE + threadIdx.y;
long i = PADDING + blockIdx.x * TILE + threadIdx.x;
long pos = i + j * stride[1] + k * stride[2];
ST_GPU;
}
__global__ void brick_kernel(unsigned *grid, Brick3D in, Brick3D out, unsigned *stride) {
unsigned bk = blockIdx.z;
unsigned bj = blockIdx.y;
unsigned bi = blockIdx.x;
unsigned b = grid[bi + (bj + bk * stride[1]) * stride[0]];
brick(ST_SCRTPT, VSVEC, (BDIM), (VFOLD), b);
}
int main(int argc, char **argv) {
MPI_ITER = 100;
int provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_SERIALIZED, &provided);
if (provided != MPI_THREAD_SERIALIZED) {
MPI_Finalize();
return 1;
}
MPI_Comm cart = parseArgs(argc, argv, "cuda-mmap");
if (cart != MPI_COMM_NULL) {
int rank;
MPI_Comm_rank(cart, &rank);
MEMFD::setup_prefix("mpi-main", rank);
int prd[3] = {1, 1, 1};
int coo[3];
MPI_Cart_get(cart, 3, (int *)dim_size.data(), prd, coo);
std::vector<long> stride(3), strideb(3), strideg(3);
for (int i = 0; i < 3; ++i) {
stride[i] = dom_size[i] + 2 * TILE + 2 * GZ;
strideg[i] = dom_size[i] + 2 * TILE;
strideb[i] = strideg[i] / TILE;
}
bElem *in_ptr = randomArray(stride);
CUdevice device = 0;
CUcontext pctx;
gpuCheck((cudaError_t)cudaSetDevice(device));
gpuCheck((cudaError_t)cuCtxCreate(&pctx, CU_CTX_SCHED_AUTO | CU_CTX_MAP_HOST, device));
BrickDecomp<3, BDIM> bDecomp(dom_size, GZ);
bDecomp.comm = cart;
populate(cart, bDecomp, 0, 1, coo);
auto bSize = cal_size<BDIM>::value;
bDecomp.initialize(skin3d_good);
BrickInfo<3> bInfo = bDecomp.getBrickInfo();
auto bStorage = bInfo.mmap_alloc(bSize);
auto bStorageInt0 = bInfo.allocate(bSize);
auto bStorageInt1 = bInfo.allocate(bSize);
auto grid_ptr = (unsigned *)malloc(sizeof(unsigned) * strideb[2] * strideb[1] * strideb[0]);
auto grid = (unsigned(*)[strideb[1]][strideb[0]])grid_ptr;
for (long k = 0; k < strideb[2]; ++k)
for (long j = 0; j < strideb[1]; ++j)
for (long i = 0; i < strideb[0]; ++i)
grid[k][j][i] = bDecomp[k][j][i];
for (long k = 1; k < strideb[2] - 1; ++k)
for (long j = 1; j < strideb[1] - 1; ++j)
for (long i = 1; i < strideb[0] - 1; ++i) {
auto l = grid[k][j][i];
for (long id = 0; id < 27; ++id)
if (bInfo.adj[bInfo.adj[l][id]][26 - id] != l)
throw std::runtime_error("err");
}
Brick3D bIn(&bInfo, bStorage, 0);
copyToBrick<3>(strideg, {PADDING, PADDING, PADDING}, {0, 0, 0}, in_ptr, grid_ptr, bIn);
bElem *out_ptr = zeroArray(stride);
unsigned *arr_stride_dev = nullptr;
{
unsigned arr_stride_tmp[3];
unsigned s = 1;
for (int i = 0; i < 3; ++i) {
arr_stride_tmp[i] = s;
s *= stride[i];
}
copyToDevice({3}, arr_stride_dev, arr_stride_tmp);
}
bElem *in_ptr_dev = nullptr;
bElem *out_ptr_dev = nullptr;
copyToDevice(stride, in_ptr_dev, in_ptr);
copyToDevice(stride, out_ptr_dev, out_ptr);
size_t tsize = 0;
for (int i = 0; i < bDecomp.ghost.size(); ++i)
tsize += bDecomp.ghost[i].len * bStorage.step * sizeof(bElem) * 2;
std::unordered_map<uint64_t, MPI_Datatype> stypemap;
std::unordered_map<uint64_t, MPI_Datatype> rtypemap;
exchangeArrPrepareTypes<3>(stypemap, rtypemap, {dom_size[0], dom_size[1], dom_size[2]},
{PADDING, PADDING, PADDING}, {GZ, GZ, GZ});
{
long arr_size = stride[0] * stride[1] * stride[2] * sizeof(bElem);
gpuCheck(cudaMemAdvise(in_ptr, arr_size, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId));
cudaMemPrefetchAsync(in_ptr, arr_size, device);
gpuCheck(
cudaMemAdvise(out_ptr, arr_size, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId));
cudaMemPrefetchAsync(out_ptr, arr_size, device);
}
auto arr_func = [&]() -> void {
float elapsed;
cudaEvent_t c_0, c_1;
cudaEventCreate(&c_0);
cudaEventCreate(&c_1);
#ifdef USE_TYPES
exchangeArrTypes<3>(in_ptr, cart, bDecomp.rank_map, stypemap, rtypemap);
#else
exchangeArr<3>(in_ptr, cart, bDecomp.rank_map, {dom_size[0], dom_size[1], dom_size[2]},
{PADDING, PADDING, PADDING}, {GZ, GZ, GZ});
#endif
cudaEventRecord(c_0);
dim3 block(strideb[0], strideb[1], strideb[2]), thread(TILE, TILE, TILE);
arr_kernel<<<block, thread>>>(in_ptr, out_ptr_dev, arr_stride_dev);
for (int i = 0; i < ST_ITER / 2 - 1; ++i) {
arr_kernel<<<block, thread>>>(out_ptr_dev, in_ptr_dev, arr_stride_dev);
arr_kernel<<<block, thread>>>(in_ptr_dev, out_ptr_dev, arr_stride_dev);
}
arr_kernel<<<block, thread>>>(out_ptr_dev, in_ptr, arr_stride_dev);
cudaEventRecord(c_1);
cudaEventSynchronize(c_1);
cudaEventElapsedTime(&elapsed, c_0, c_1);
calctime += elapsed / 1000.0;
};
if (rank == 0)
std::cout << "d3pt7 MPI decomp" << std::endl;
int cnt;
double total;
total = time_mpi(arr_func, cnt, bDecomp);
cnt *= ST_ITER;
{
mpi_stats calc_s = mpi_statistics(calctime / cnt, MPI_COMM_WORLD);
mpi_stats call_s = mpi_statistics(calltime / cnt, MPI_COMM_WORLD);
mpi_stats wait_s = mpi_statistics(waittime / cnt, MPI_COMM_WORLD);
mpi_stats mspd_s =
mpi_statistics(tsize / 1.0e9 / (calltime + waittime) * cnt, MPI_COMM_WORLD);
mpi_stats move_s = mpi_statistics(movetime / cnt, MPI_COMM_WORLD);
mpi_stats pack_s = mpi_statistics(packtime / cnt, MPI_COMM_WORLD);
mpi_stats size_s = mpi_statistics((double)tsize * 1.0e-6, MPI_COMM_WORLD);
if (rank == 0) {
total = calc_s.avg + call_s.avg + wait_s.avg + move_s.avg + pack_s.avg;
std::cout << "Arr: " << total << std::endl;
std::cout << "calc " << calc_s << std::endl;
std::cout << "pack " << pack_s << std::endl;
std::cout << "move " << move_s << std::endl;
std::cout << "call " << call_s << std::endl;
std::cout << "wait " << wait_s << std::endl;
std::cout << " | MPI size (MB): " << size_s << std::endl;
std::cout << " | MPI speed (GB/s): " << mspd_s << std::endl;
double perf = (double)tot_elems * 1.0e-9;
perf = perf / total;
std::cout << "perf " << perf << " GStencil/s" << std::endl;
std::cout << std::endl;
}
}
// setup brick on device
BrickInfo<3> *bInfo_dev;
auto _bInfo_dev = movBrickInfo(bInfo, cudaMemcpyHostToDevice);
{
unsigned size = sizeof(BrickInfo<3>);
cudaMalloc(&bInfo_dev, size);
cudaMemcpy(bInfo_dev, &_bInfo_dev, size, cudaMemcpyHostToDevice);
}
BrickStorage bStorageInt0_dev = movBrickStorage(bStorageInt0, cudaMemcpyHostToDevice);
BrickStorage bStorageInt1_dev = movBrickStorage(bStorageInt1, cudaMemcpyHostToDevice);
Brick3D bIn_dev(bInfo_dev, bStorage, 0);
Brick3D bInt0_dev(bInfo_dev, bStorageInt0_dev, 0);
Brick3D bInt1_dev(bInfo_dev, bStorageInt1_dev, 0);
unsigned *grid_dev_ptr = nullptr;
copyToDevice(strideb, grid_dev_ptr, grid_ptr);
unsigned *grid_stride_dev = nullptr;
{
unsigned grid_stride_tmp[3];
for (int i = 0; i < 3; ++i)
grid_stride_tmp[i] = strideb[i];
copyToDevice({3}, grid_stride_dev, grid_stride_tmp);
}
#ifndef DECOMP_PAGEUNALIGN
ExchangeView ev = bDecomp.exchangeView(bStorage);
#endif
gpuCheck(cudaMemAdvise(bStorage.dat.get(), bStorage.step * bDecomp.sep_pos[2] * sizeof(bElem),
cudaMemAdviseSetPreferredLocation, device));
cudaMemPrefetchAsync(bStorage.dat.get(), bStorage.step * bDecomp.sep_pos[2] * sizeof(bElem),
device);
cudaMemPrefetchAsync(grid_ptr, STRIDEB * STRIDEB * STRIDEB * sizeof(unsigned), device);
auto brick_func = [&]() -> void {
float elapsed;
cudaEvent_t c_0, c_1;
cudaEventCreate(&c_0);
cudaEventCreate(&c_1);
#ifdef DECOMP_PAGEUNALIGN
bDecomp.exchange(bStorage);
#else
ev.exchange();
#endif
dim3 block(strideb[0], strideb[1], strideb[2]), thread(32);
cudaEventRecord(c_0);
brick_kernel<<<block, thread>>>(grid_dev_ptr, bIn_dev, bInt0_dev, grid_stride_dev);
for (int i = 0; i < ST_ITER / 2 - 1; ++i) {
brick_kernel<<<block, thread>>>(grid_dev_ptr, bInt0_dev, bInt1_dev, grid_stride_dev);
brick_kernel<<<block, thread>>>(grid_dev_ptr, bInt1_dev, bInt0_dev, grid_stride_dev);
}
brick_kernel<<<block, thread>>>(grid_dev_ptr, bInt0_dev, bIn_dev, grid_stride_dev);
cudaEventRecord(c_1);
cudaEventSynchronize(c_1);
cudaEventElapsedTime(&elapsed, c_0, c_1);
calctime += elapsed / 1000.0;
};
total = time_mpi(brick_func, cnt, bDecomp);
cnt *= ST_ITER;
{
mpi_stats calc_s = mpi_statistics(calctime / cnt, MPI_COMM_WORLD);
mpi_stats call_s = mpi_statistics(calltime / cnt, MPI_COMM_WORLD);
mpi_stats wait_s = mpi_statistics(waittime / cnt, MPI_COMM_WORLD);
mpi_stats mspd_s =
mpi_statistics(tsize / 1.0e9 / (calltime + waittime) * cnt, MPI_COMM_WORLD);
mpi_stats size_s = mpi_statistics((double)tsize * 1.0e-6, MPI_COMM_WORLD);
#ifndef DECOMP_PAGEUNALIGN
size_t opt_size = 0;
for (auto s : ev.seclen)
opt_size += s * 2;
mpi_stats opt_size_s = mpi_statistics((double)opt_size * 1.0e-6, MPI_COMM_WORLD);
#endif
mpi_stats move_s = mpi_statistics(movetime / cnt, MPI_COMM_WORLD);
if (rank == 0) {
total = calc_s.avg + call_s.avg + wait_s.avg + move_s.avg;
std::cout << "Bri: " << total << std::endl;
std::cout << "calc " << calc_s << std::endl;
std::cout << "move " << move_s << std::endl;
std::cout << "call " << call_s << std::endl;
std::cout << "wait " << wait_s << std::endl;
std::cout << " | MPI size (MB): " << size_s << std::endl;
#ifndef DECOMP_PAGEUNALIGN
std::cout << " | Opt MPI size (MB): " << opt_size_s << std::endl;
#endif
std::cout << " | MPI speed (GB/s): " << mspd_s << std::endl;
double perf = (double)tot_elems * 1.0e-9;
perf = perf / total;
std::cout << "perf " << perf << " GStencil/s" << std::endl;
}
}
if (!compareBrick<3>({dom_size[0], dom_size[1], dom_size[2]}, {PADDING, PADDING, PADDING},
{GZ, GZ, GZ}, in_ptr, grid_ptr, bIn))
std::cout << "result mismatch!" << std::endl;
free(bInfo.adj);
free(out_ptr);
free(in_ptr);
((MEMFD *)bStorage.mmap_info)->cleanup();
}
MPI_Finalize();
return 0;
}
|
f92c3f093065995e5297f36ff6eac78e95f4923b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------200
// plasmaKernel_gpu_2
//----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------200
__device__ void kernel_gpu_cuda(
gloop::DeviceLoop<>* loop,
par_str d_par_gpu,
dim_str d_dim_gpu,
box_str* d_box_gpu,
FOUR_VECTOR* d_rv_gpu,
fp* d_qv_gpu,
FOUR_VECTOR* d_fv_gpu)
{
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------180
// THREAD PARAMETERS
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------180
int bx = loop->logicalBlockIdx().x; // get current horizontal block index (0-n)
int tx = threadIdx.x; // get current horizontal thread index (0-n)
// int ax = bx*NUMBER_THREADS+tx;
// int wbx = bx;
int wtx = tx;
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------180
// DO FOR THE NUMBER OF BOXES
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------180
if (bx < d_dim_gpu.number_boxes) {
// while(wbx<box_indexes_counter){
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// Extract input parameters
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// parameters
fp a2 = 2.0 * d_par_gpu.alpha * d_par_gpu.alpha;
// home box
int first_i;
FOUR_VECTOR* rA;
FOUR_VECTOR* fA;
__shared__ FOUR_VECTOR rA_shared[NUMBER_PAR_PER_BOX];
// nei box
int pointer;
int k = 0;
int first_j;
FOUR_VECTOR* rB;
fp* qB;
int j = 0;
__shared__ FOUR_VECTOR rB_shared[NUMBER_PAR_PER_BOX];
__shared__ double qB_shared[NUMBER_PAR_PER_BOX];
// common
fp r2;
fp u2;
fp vij;
fp fs;
fp fxij;
fp fyij;
fp fzij;
THREE_VECTOR d;
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// Home box
//------------------------------------------------------------------------------------------------------------------------------------------------------160
//----------------------------------------------------------------------------------------------------------------------------------140
// Setup parameters
//----------------------------------------------------------------------------------------------------------------------------------140
// home box - box parameters
first_i = d_box_gpu[bx].offset;
// home box - distance, force, charge and type parameters
rA = &d_rv_gpu[first_i];
fA = &d_fv_gpu[first_i];
//----------------------------------------------------------------------------------------------------------------------------------140
// Copy to shared memory
//----------------------------------------------------------------------------------------------------------------------------------140
// home box - shared memory
while (wtx < NUMBER_PAR_PER_BOX) {
rA_shared[wtx] = rA[wtx];
wtx = wtx + NUMBER_THREADS;
}
wtx = tx;
// synchronize threads - not needed, but just to be safe
__syncthreads();
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// nei box loop
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// loop over neiing boxes of home box
for (k = 0; k < (1 + d_box_gpu[bx].nn); k++) {
//----------------------------------------50
// nei box - get pointer to the right box
//----------------------------------------50
if (k == 0) {
pointer = bx; // set first box to be processed to home box
}
else {
pointer = d_box_gpu[bx].nei[k - 1].number; // remaining boxes are nei boxes
}
//----------------------------------------------------------------------------------------------------------------------------------140
// Setup parameters
//----------------------------------------------------------------------------------------------------------------------------------140
// nei box - box parameters
first_j = d_box_gpu[pointer].offset;
// nei box - distance, (force), charge and (type) parameters
rB = &d_rv_gpu[first_j];
qB = &d_qv_gpu[first_j];
//----------------------------------------------------------------------------------------------------------------------------------140
// Setup parameters
//----------------------------------------------------------------------------------------------------------------------------------140
// nei box - shared memory
while (wtx < NUMBER_PAR_PER_BOX) {
rB_shared[wtx] = rB[wtx];
qB_shared[wtx] = qB[wtx];
wtx = wtx + NUMBER_THREADS;
}
wtx = tx;
// synchronize threads because in next section each thread accesses data brought in by different threads here
__syncthreads();
//----------------------------------------------------------------------------------------------------------------------------------140
// Calculation
//----------------------------------------------------------------------------------------------------------------------------------140
// loop for the number of particles in the home box
// for (int i=0; i<nTotal_i; i++){
while (wtx < NUMBER_PAR_PER_BOX) {
// loop for the number of particles in the current nei box
for (j = 0; j < NUMBER_PAR_PER_BOX; j++) {
// r2 = rA[wtx].v + rB[j].v - DOT(rA[wtx],rB[j]);
// u2 = a2*r2;
// vij= exp(-u2);
// fs = 2.*vij;
// d.x = rA[wtx].x - rB[j].x;
// fxij=fs*d.x;
// d.y = rA[wtx].y - rB[j].y;
// fyij=fs*d.y;
// d.z = rA[wtx].z - rB[j].z;
// fzij=fs*d.z;
// fA[wtx].v += qB[j]*vij;
// fA[wtx].x += qB[j]*fxij;
// fA[wtx].y += qB[j]*fyij;
// fA[wtx].z += qB[j]*fzij;
r2 = (fp)rA_shared[wtx].v + (fp)rB_shared[j].v - DOT((fp)rA_shared[wtx], (fp)rB_shared[j]);
u2 = a2 * r2;
vij = exp(-u2);
fs = 2 * vij;
d.x = (fp)rA_shared[wtx].x - (fp)rB_shared[j].x;
fxij = fs * d.x;
d.y = (fp)rA_shared[wtx].y - (fp)rB_shared[j].y;
fyij = fs * d.y;
d.z = (fp)rA_shared[wtx].z - (fp)rB_shared[j].z;
fzij = fs * d.z;
fA[wtx].v += (double)((fp)qB_shared[j] * vij);
fA[wtx].x += (double)((fp)qB_shared[j] * fxij);
fA[wtx].y += (double)((fp)qB_shared[j] * fyij);
fA[wtx].z += (double)((fp)qB_shared[j] * fzij);
}
// increment work thread index
wtx = wtx + NUMBER_THREADS;
}
// reset work index
wtx = tx;
// synchronize after finishing force contributions from current nei box not to cause conflicts when starting next box
__syncthreads();
//----------------------------------------------------------------------------------------------------------------------------------140
// Calculation END
//----------------------------------------------------------------------------------------------------------------------------------140
}
// // increment work block index
// wbx = wbx + NUMBER_BLOCKS;
// // synchronize - because next iteration will overwrite current shared memory
// __syncthreads();
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// nei box loop END
//------------------------------------------------------------------------------------------------------------------------------------------------------160
}
}
| f92c3f093065995e5297f36ff6eac78e95f4923b.cu | //----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------200
// plasmaKernel_gpu_2
//----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------200
__device__ void kernel_gpu_cuda(
gloop::DeviceLoop<>* loop,
par_str d_par_gpu,
dim_str d_dim_gpu,
box_str* d_box_gpu,
FOUR_VECTOR* d_rv_gpu,
fp* d_qv_gpu,
FOUR_VECTOR* d_fv_gpu)
{
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------180
// THREAD PARAMETERS
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------180
int bx = loop->logicalBlockIdx().x; // get current horizontal block index (0-n)
int tx = threadIdx.x; // get current horizontal thread index (0-n)
// int ax = bx*NUMBER_THREADS+tx;
// int wbx = bx;
int wtx = tx;
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------180
// DO FOR THE NUMBER OF BOXES
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------180
if (bx < d_dim_gpu.number_boxes) {
// while(wbx<box_indexes_counter){
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// Extract input parameters
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// parameters
fp a2 = 2.0 * d_par_gpu.alpha * d_par_gpu.alpha;
// home box
int first_i;
FOUR_VECTOR* rA;
FOUR_VECTOR* fA;
__shared__ FOUR_VECTOR rA_shared[NUMBER_PAR_PER_BOX];
// nei box
int pointer;
int k = 0;
int first_j;
FOUR_VECTOR* rB;
fp* qB;
int j = 0;
__shared__ FOUR_VECTOR rB_shared[NUMBER_PAR_PER_BOX];
__shared__ double qB_shared[NUMBER_PAR_PER_BOX];
// common
fp r2;
fp u2;
fp vij;
fp fs;
fp fxij;
fp fyij;
fp fzij;
THREE_VECTOR d;
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// Home box
//------------------------------------------------------------------------------------------------------------------------------------------------------160
//----------------------------------------------------------------------------------------------------------------------------------140
// Setup parameters
//----------------------------------------------------------------------------------------------------------------------------------140
// home box - box parameters
first_i = d_box_gpu[bx].offset;
// home box - distance, force, charge and type parameters
rA = &d_rv_gpu[first_i];
fA = &d_fv_gpu[first_i];
//----------------------------------------------------------------------------------------------------------------------------------140
// Copy to shared memory
//----------------------------------------------------------------------------------------------------------------------------------140
// home box - shared memory
while (wtx < NUMBER_PAR_PER_BOX) {
rA_shared[wtx] = rA[wtx];
wtx = wtx + NUMBER_THREADS;
}
wtx = tx;
// synchronize threads - not needed, but just to be safe
__syncthreads();
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// nei box loop
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// loop over neiing boxes of home box
for (k = 0; k < (1 + d_box_gpu[bx].nn); k++) {
//----------------------------------------50
// nei box - get pointer to the right box
//----------------------------------------50
if (k == 0) {
pointer = bx; // set first box to be processed to home box
}
else {
pointer = d_box_gpu[bx].nei[k - 1].number; // remaining boxes are nei boxes
}
//----------------------------------------------------------------------------------------------------------------------------------140
// Setup parameters
//----------------------------------------------------------------------------------------------------------------------------------140
// nei box - box parameters
first_j = d_box_gpu[pointer].offset;
// nei box - distance, (force), charge and (type) parameters
rB = &d_rv_gpu[first_j];
qB = &d_qv_gpu[first_j];
//----------------------------------------------------------------------------------------------------------------------------------140
// Setup parameters
//----------------------------------------------------------------------------------------------------------------------------------140
// nei box - shared memory
while (wtx < NUMBER_PAR_PER_BOX) {
rB_shared[wtx] = rB[wtx];
qB_shared[wtx] = qB[wtx];
wtx = wtx + NUMBER_THREADS;
}
wtx = tx;
// synchronize threads because in next section each thread accesses data brought in by different threads here
__syncthreads();
//----------------------------------------------------------------------------------------------------------------------------------140
// Calculation
//----------------------------------------------------------------------------------------------------------------------------------140
// loop for the number of particles in the home box
// for (int i=0; i<nTotal_i; i++){
while (wtx < NUMBER_PAR_PER_BOX) {
// loop for the number of particles in the current nei box
for (j = 0; j < NUMBER_PAR_PER_BOX; j++) {
// r2 = rA[wtx].v + rB[j].v - DOT(rA[wtx],rB[j]);
// u2 = a2*r2;
// vij= exp(-u2);
// fs = 2.*vij;
// d.x = rA[wtx].x - rB[j].x;
// fxij=fs*d.x;
// d.y = rA[wtx].y - rB[j].y;
// fyij=fs*d.y;
// d.z = rA[wtx].z - rB[j].z;
// fzij=fs*d.z;
// fA[wtx].v += qB[j]*vij;
// fA[wtx].x += qB[j]*fxij;
// fA[wtx].y += qB[j]*fyij;
// fA[wtx].z += qB[j]*fzij;
r2 = (fp)rA_shared[wtx].v + (fp)rB_shared[j].v - DOT((fp)rA_shared[wtx], (fp)rB_shared[j]);
u2 = a2 * r2;
vij = exp(-u2);
fs = 2 * vij;
d.x = (fp)rA_shared[wtx].x - (fp)rB_shared[j].x;
fxij = fs * d.x;
d.y = (fp)rA_shared[wtx].y - (fp)rB_shared[j].y;
fyij = fs * d.y;
d.z = (fp)rA_shared[wtx].z - (fp)rB_shared[j].z;
fzij = fs * d.z;
fA[wtx].v += (double)((fp)qB_shared[j] * vij);
fA[wtx].x += (double)((fp)qB_shared[j] * fxij);
fA[wtx].y += (double)((fp)qB_shared[j] * fyij);
fA[wtx].z += (double)((fp)qB_shared[j] * fzij);
}
// increment work thread index
wtx = wtx + NUMBER_THREADS;
}
// reset work index
wtx = tx;
// synchronize after finishing force contributions from current nei box not to cause conflicts when starting next box
__syncthreads();
//----------------------------------------------------------------------------------------------------------------------------------140
// Calculation END
//----------------------------------------------------------------------------------------------------------------------------------140
}
// // increment work block index
// wbx = wbx + NUMBER_BLOCKS;
// // synchronize - because next iteration will overwrite current shared memory
// __syncthreads();
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// nei box loop END
//------------------------------------------------------------------------------------------------------------------------------------------------------160
}
}
|
cdddca7f7513a0c7ffaac2152a569e79d6d47fd8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vector_powx.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const REAL *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
const int offset_x = 1;
const int stride_x = 1;
const REAL b = 1;
REAL *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
const int offset_y = 1;
const int stride_y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vector_powx), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,b,y,offset_y,stride_y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vector_powx), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,b,y,offset_y,stride_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vector_powx), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,b,y,offset_y,stride_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | cdddca7f7513a0c7ffaac2152a569e79d6d47fd8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vector_powx.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const REAL *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
const int offset_x = 1;
const int stride_x = 1;
const REAL b = 1;
REAL *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
const int offset_y = 1;
const int stride_y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vector_powx<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,b,y,offset_y,stride_y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vector_powx<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,b,y,offset_y,stride_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vector_powx<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,b,y,offset_y,stride_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
70c9b53b1626b0060fd66d036b301d27d381b69f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "magma.h"
#include "magma_lapack.h"
#include <sys/time.h>
#include<hiprand/hiprand.h>
#include<hiprand/hiprand_kernel.h>
// Function to measure the time in miliseconds
double time_diff(struct timeval x , struct timeval y)
{
double x_ms , y_ms , diff;
x_ms = (double)x.tv_sec*1000000 + (double)x.tv_usec;
y_ms = (double)y.tv_sec*1000000 + (double)y.tv_usec;
diff = (double)y_ms - (double)x_ms;
return diff;
}
//Function to handle cuda errors
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//Needed for magma_dgels
# define max(a,b) (((a)<(b ))?( b):(a))
// Device function which calculates the vector and constructs the matrix
// which participate in magma_dgels function.
__global__ void vecprod(double *x, double *y,double *b, double *A, int numofpoints){
double h1,h2;
int block = threadIdx.x;
if(blockIdx.x==0){
if(block<numofpoints){
h1=x[block]*x[block];
h2=y[block]*y[block];
b[block]=-(h1+h2);
}
}
if(blockIdx.x==1){
if(block<numofpoints){
A[block] = x[block];
A[block+numofpoints] = y[block];
A[block+2*numofpoints] = 1;
}
}
}
// Fitring function calculates the center and radius of the circles
void fitring(double pts[50][2], double results[], int l){
int j;
double x[l],y[l],b[l],A[l*3];
double *dev_x,*dev_y,*dev_b, *dev_A;
for(j=0; j<l; j++){
x[j]=pts[j][0];
y[j]=pts[j][1];
}
//allocating memory in device, copying x and y, and calling function vecprod to generate matrix and vector for magma_dgels
HANDLE_ERROR( hipMalloc( (void**)&dev_x, l * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_y, l * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b, l * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_A, l * 3 * sizeof(double) ) );
HANDLE_ERROR(hipMemcpy(dev_x, x, l * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_y, y, l * sizeof(double), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( vecprod), dim3(2),dim3(l), 0, 0, dev_x, dev_y, dev_b,dev_A, l);
HANDLE_ERROR(hipMemcpy(b, dev_b, l * sizeof(double),hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(A, dev_A, l * 3 * sizeof(double),hipMemcpyDeviceToHost));
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_A);
//Setting up magma parameters
double a[3];
double *mag_A, *mag_b;
magma_init();
magma_int_t m,n;
m=l;
n=3;
magma_int_t info, nb, lworkgpu,l1,l2,lhwork;
double *hwork, tmp[1];
nb = magma_get_dgeqrf_nb (m);
lworkgpu = (m-n + nb )*(1 +2* nb);
//Allocating memory on device for matrix and vector
magma_dmalloc (&mag_b , m*1);
magma_dmalloc (&mag_A , m*n);
l1 = ( magma_int_t ) MAGMA_D_REAL ( tmp [0] );
l2 = ( magma_int_t ) MAGMA_D_REAL ( tmp [0] );
lhwork = max ( max ( l1 , l2 ), lworkgpu );
magma_dmalloc_cpu (& hwork , lhwork );
//Setting matrices in device for magma_dgels
magma_dsetmatrix ( m, n, A, m, mag_A , m );
magma_dsetmatrix ( m, 1 , b, m, mag_b , m );
//magma_dgels solving the least squares problem
magma_dgels_gpu(MagmaNoTrans, m, n, 1,mag_A, m, mag_b, m, hwork, lworkgpu, &info);
//Getting matrix from device and returning results
magma_dgetmatrix ( n, 1 , mag_b , m , b, n );
results[0] = -0.5* b[0];
results[1] = -0.5* b[1];
results[2] = sqrt((b[0]*b[0]+b[1]*b[1])/4-b[2]);
}
//Fitcircles function fits circles to points
void fitCircles(double points[][2],double point[], int i, double circles[][3],int numofpoints){
int j,k,l,N;
N = numofpoints;
double pts[numofpoints][2];
// Getting the points which belong to each circle
for(j=0; j<i; j++){
l=0;
for(k=0; k<N; k++){
if(point[k]==j){
pts[l][0] = points[k][0];
pts[l][1] = points[k][1];
l++;
}
}
//if the number of points is less than 4 no circle is created
if(l<4){
circles[j][0]=0;
circles[j][1]=0;
circles[j][2]=100;
}
else {
double res[3];
//calling fitring to calculate the circle due to the points
fitring(pts,res,l);
circles[j][0] = res[0];
circles[j][1] = res[1];
circles[j][2] = res[2];
}
}
}
//circle Dist function calculates the distance of the points from each circle. Called only from device
__device__ void circleDist(double po1, double po2, double *circles1, int i, float d[]){
double xa[5],yb[5],r[5],h1;
int j;
//Calculating distance from each circle
for(j=0; j<i; j++){
xa[j] = po1 - circles1[j*3];
yb[j] = po2 - circles1[j*3 + 1];
r[j] = circles1[j*3 + 2];
h1 = xa[j]*xa[j] + yb[j]*yb[j] - r[j]*r[j];
d[j] = h1*h1;
}
}
//findPoints finds the points of a circle by calling circleDist and assigning each point to its nearest circle
__global__ void findPoints(double *points, double *circles1, double *points1, int numofpoints, int i, int *res){
int block = blockIdx.x,j,pos;
float d[5], min;
circleDist(points[2*block],points[2*block+1],circles1,i,d);
min = d[0];
pos = 0;
for(j=0; j<i; j++){
if(d[j]<min){
min = d[j];
pos = j;
}
}
//Using atomicAdd to calculate the number of changes between the threads
if (points1[block]!=pos){
atomicAdd(&res[0], 1);
}
points1[block] = pos;
}
//Randperm function used for generating random points
void randperm(int n, int perm[])
{
int i, j, t;
for(i=0; i<n; i++)
perm[i] = i;
for(i=0; i<n; i++) {
j = rand()%(n-i)+i;
t = perm[j];
perm[j] = perm[i];
perm[i] = t;
}
}
//Setting up kernel for generating random numbers
__global__ void setup_kernel ( hiprandState_t * state, unsigned long seed )
{
int id = threadIdx.x;
hiprand_init ( seed, id, 0, &state[id] );
}
//Generating random numbers uniformly distributed in [-1 1] in the device using hiprand's functions
__global__ void random(double *N, hiprandState_t* globalState)
{
hiprandState_t localState = globalState[threadIdx.x];
float random = hiprand_uniform(&localState);
globalState[threadIdx.x] = localState;
//Half of the numbers will be negative
if((threadIdx.x % 2)==0){
N[threadIdx.x] = random;
}else{
N[threadIdx.x] = -random;
}
}
//ClosestCircle function calculates the circles and the points belonging to them after calling recurcively findPoints and fitCircles functions
void closestCircles(double points[][2], int i, int maxIter, int initializeCirclesFirst, int numofevent, int numofpoints,double circles1[][3], double points1[]){
int j,k,N,numChanges,u;
N = numofpoints;
//In first attempt generate random circles
if(initializeCirclesFirst==1){
hiprandState_t* devStates;
HANDLE_ERROR( hipMalloc( (void**)&devStates, 3 * 5 * sizeof(hiprandState_t) ) );
hipLaunchKernelGGL(( setup_kernel), dim3(1),dim3(15), 0, 0, devStates,unsigned(time(NULL)));
double *dev_c1;
HANDLE_ERROR( hipMalloc( (void**)&dev_c1, 3 * 5 * sizeof(double) ) );
hipLaunchKernelGGL(( random), dim3(1),dim3(15), 0, 0, dev_c1,devStates);
HANDLE_ERROR(hipMemcpy(circles1, dev_c1, 3 * 5 * sizeof(double),hipMemcpyDeviceToHost));
numChanges = 1;
for(j=0; j<numofpoints; j++){
points1[j]=0;
}
}
//in second attempt generate random points
else{
int idx[N];
randperm(N,idx);
int cIdx = 0;
for(k=0; k<N; k++){
u=idx[k];
points1[u] = cIdx;
cIdx = cIdx+1;
if(cIdx > i-1){
cIdx = 0;
}
}
fitCircles(points, points1, i, circles1, numofpoints);
}
numChanges = 1;
while ((numChanges >0) && (maxIter>0)){
//Setting up memory and parameters to call findPoints in device
int res[1], *dev_res;
res[0] = 0;
double *dev_points, *dev_circles1, *dev_points1 ;
HANDLE_ERROR( hipMalloc( (void**)&dev_points, numofpoints * 2 * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_circles1, 5 * 3 * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_points1, numofpoints * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_res, 1 * sizeof(int) ) );
HANDLE_ERROR(hipMemcpy(dev_points, points, numofpoints * 2 * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_circles1, circles1, 5 * 3 * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_points1, points1, numofpoints * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_res, res, 1 *sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( findPoints), dim3(N),dim3(1), 0, 0, dev_points, dev_circles1, dev_points1, N, i, dev_res);
HANDLE_ERROR(hipMemcpy(points1, dev_points1, numofpoints * sizeof(double),hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(res, dev_res, 1 * sizeof(int),hipMemcpyDeviceToHost));
numChanges = res[0];
maxIter = maxIter - 1;
hipFree(dev_points);
hipFree(dev_circles1);
hipFree(dev_points1);
hipFree(dev_res);
fitCircles(points, points1, i, circles1, numofpoints);
}
}
//pruneCircles removes all circles which don't fit the criteria of minimum number of points
void pruneCircles(double circles[][3],double points[], float radiusThreshold, int i, int numofpoints, int size[]){
int j,k,l,w,h1;
double prunedC[5][3], prunedP[numofpoints], locations[numofpoints];
w=0;
for(j=0; j<numofpoints; j++){
prunedP[j] = 0;
}
for(j=0; j<i; j++){
l=0;
for(k=0; k<numofpoints; k++){
if(points[k]==j){
locations[l] = k;
l++;
}
}
if(circles[j][2]<radiusThreshold){ continue; }
if(l<4){ continue; }
for(k=0; k<l; k++){
h1 = locations[k];
prunedP[h1] =j;
}
prunedC[w][0] = circles[j][0];
prunedC[w][1] = circles[j][1];
prunedC[w][2] = circles[j][2];
w++;
}
size[0]=w+1;
for(j=0; j<w; j++){
circles[j][0] = prunedC[j][0];
circles[j][1] = prunedC[j][1];
circles[j][2] = prunedC[j][2];
}
for(j=0; j<numofpoints; j++){
points[j] = prunedP[j];
}
}
//Calculates LAD and adding an overfit penalty in the error.
__global__ void circleFitError(double *points, double *circles1, float overfitPenalty,int i, int size, int numofpoints, float *err){
int block=blockIdx.x, j;
float d[5], min;
float h1;
circleDist(points[2*block],points[2*block+1],circles1,size,d);
min = d[0];
for(j=0; j<i; j++){
if(d[j]<min){
min = d[j];
}
}
atomicAdd(&err[0],min);
if(block==0){
h1 = overfitPenalty * i * i;
atomicAdd(&err[0],h1);
}
}
//KCC algorithm calling all functions.
void kcc(double points[][2], int i, int maxIter, float radiusThreshold, float overfitPenalty, int numofevent, int numofpoints, double cir[][3], float err[],int s[]){
int size1[1],size2[1],j,u;
double circles1[5][3],circles2[5][3];
double points1[numofpoints],points2[numofpoints];
float err1[1],err2[1];
double *dev_points, *dev_points1, *dev_circles1, *dev_points2, *dev_circles2;
float *dev_err1, *dev_err2;
err1[0]=err2[0]=0.0;
closestCircles(points, i, maxIter, 1, numofevent, numofpoints,circles1,points1);
pruneCircles(circles1,points1,radiusThreshold,i,numofpoints,size1);
HANDLE_ERROR( hipMalloc( (void**)&dev_points, numofpoints * 2 * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_circles1, 5 * 3 * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_err1, 1 * sizeof(float) ) );
HANDLE_ERROR(hipMemcpy(dev_points, points, numofpoints * 2 * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_circles1, circles1, 5 * 3 * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_err1, err1, 1 * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( circleFitError), dim3(numofpoints),dim3(1), 0, 0, dev_points,dev_circles1,overfitPenalty,i,size1[0],numofpoints,dev_err1);
HANDLE_ERROR(hipMemcpy(err1, dev_err1, 1 * sizeof(float), hipMemcpyDeviceToHost));
hipFree(dev_points1);
hipFree(dev_circles1);
hipFree(dev_err1);
//Second attempt in which random points are generated first
closestCircles(points, i, maxIter, 0, numofevent, numofpoints,circles2,points2);
pruneCircles(circles2,points2,radiusThreshold,i,numofpoints,size2);
HANDLE_ERROR( hipMalloc( (void**)&dev_circles2, 5 * 3 * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_err2, 1 * sizeof(float) ) );
HANDLE_ERROR(hipMemcpy(dev_circles2, circles2, 5 * 3 * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_err2, err2, 1 * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( circleFitError), dim3(numofpoints),dim3(1), 0, 0, dev_points,dev_circles2,overfitPenalty,i,size2[0],numofpoints,dev_err2);
HANDLE_ERROR(hipMemcpy(err2, dev_err2, 1 * sizeof(float), hipMemcpyDeviceToHost));
hipFree(dev_points);
hipFree(dev_points2);
hipFree(dev_circles2);
hipFree(dev_err2);
int q;
//decide which set is returned by comparing errors
if(err1[0]<=err2[0]){
q=1;
for(j=0; j<size1[0]; j++){
cir[j][0] = circles1[j][0];
cir[j][1] = circles1[j][1];
cir[j][2] = circles1[j][2];
}
err[0] = err1[0];
s[0] - size1[0];
}else{
q=0;
for(j=0; j<size2[0]; j++){
cir[j][0] = circles2[j][0];
cir[j][1] = circles2[j][1];
cir[j][2] = circles2[j][2];
}
err[0] = err2[0];
s[0] = size2[0];
}
}
int main( int argc, char* argv[] ) {
FILE* file = fopen("batch00.dat", "r");
char line[128];
int numofevents,i,j,k;
struct timeval tv[2];
fgets(line,sizeof(line),file);
numofevents = atoi(line);
int numofpoints[numofevents];
double events[numofevents];
gettimeofday (&tv[0], NULL);
//for loop to reed progressively points from file and calling kcc 4 times for each event
for (i=0; i<numofevents; i++){
fgets(line,sizeof(line),file);
numofpoints[i] = atoi(line);
double points[numofpoints[i]][2];
for (j=0; j<numofpoints[i]; j++){
fgets(line,sizeof(line),file);
char* buffer;
buffer = strtok(line," ");
int z = 0;
while(buffer){
points[j][z] = atof(buffer);
buffer = strtok(NULL," ");
z++;
}
}
int maxK = 5;
int minK = 2;
int Ks[maxK-minK+1];
for (k=minK; k<=maxK; k++ ){
Ks[k-minK] = k;
}
float radiusThreshold = 0.1;
int maxIter = 100;
float overfitPenalty = 0.001;
double circles[5][3];
double error;
int K,size;
for (k=0; k<4; k++){
double cir[5][3];
float err[1];
int s[1];
kcc(points,Ks[k],maxIter, radiusThreshold, overfitPenalty, i, numofpoints[i],cir,err,s);
//deciding which circle set to keep by comparing errors
if(k==0){
for(j=0; j<s[0]; j++){
circles[j][0] = cir[j][0];
circles[j][1] = cir[j][1];
circles[j][2] = cir[j][2];
}
error = abs(err[0]);
size = s[0]; K=Ks[k];
}else{
if(abs(err[0])<abs(error)){
for(j=0; j<s[0]; j++){
circles[j][0] = cir[j][0];
circles[j][1] = cir[j][1];
circles[j][2] = cir[j][2];
}
error = abs(err[0]);
size = s[0]; K=Ks[k];
}
}
}
//Printing results
printf("\n\nEvent: %d \n",i);
for (k=0; k<size; k++){
printf("%.8f %.8f %.8f \n",circles[k][0],circles[k][1], circles[k][2]);
}
float LAD;
LAD = error - overfitPenalty *(float)K*(float)K;
printf("LAD: %.12f",LAD);
}
//Printing time
gettimeofday (&tv[1], NULL);
printf("\n\nTime elapsed: %.0lf microsec\n\n", time_diff(tv[0],tv[1]) );
return 0;
}
| 70c9b53b1626b0060fd66d036b301d27d381b69f.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include "magma.h"
#include "magma_lapack.h"
#include <sys/time.h>
#include<curand.h>
#include<curand_kernel.h>
// Function to measure the time in miliseconds
double time_diff(struct timeval x , struct timeval y)
{
double x_ms , y_ms , diff;
x_ms = (double)x.tv_sec*1000000 + (double)x.tv_usec;
y_ms = (double)y.tv_sec*1000000 + (double)y.tv_usec;
diff = (double)y_ms - (double)x_ms;
return diff;
}
//Function to handle cuda errors
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//Needed for magma_dgels
# define max(a,b) (((a)<(b ))?( b):(a))
// Device function which calculates the vector and constructs the matrix
// which participate in magma_dgels function.
__global__ void vecprod(double *x, double *y,double *b, double *A, int numofpoints){
double h1,h2;
int block = threadIdx.x;
if(blockIdx.x==0){
if(block<numofpoints){
h1=x[block]*x[block];
h2=y[block]*y[block];
b[block]=-(h1+h2);
}
}
if(blockIdx.x==1){
if(block<numofpoints){
A[block] = x[block];
A[block+numofpoints] = y[block];
A[block+2*numofpoints] = 1;
}
}
}
// Fitring function calculates the center and radius of the circles
void fitring(double pts[50][2], double results[], int l){
int j;
double x[l],y[l],b[l],A[l*3];
double *dev_x,*dev_y,*dev_b, *dev_A;
for(j=0; j<l; j++){
x[j]=pts[j][0];
y[j]=pts[j][1];
}
//allocating memory in device, copying x and y, and calling function vecprod to generate matrix and vector for magma_dgels
HANDLE_ERROR( cudaMalloc( (void**)&dev_x, l * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_y, l * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b, l * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_A, l * 3 * sizeof(double) ) );
HANDLE_ERROR(cudaMemcpy(dev_x, x, l * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_y, y, l * sizeof(double), cudaMemcpyHostToDevice));
vecprod<<<2,l>>>(dev_x, dev_y, dev_b,dev_A, l);
HANDLE_ERROR(cudaMemcpy(b, dev_b, l * sizeof(double),cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(A, dev_A, l * 3 * sizeof(double),cudaMemcpyDeviceToHost));
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_A);
//Setting up magma parameters
double a[3];
double *mag_A, *mag_b;
magma_init();
magma_int_t m,n;
m=l;
n=3;
magma_int_t info, nb, lworkgpu,l1,l2,lhwork;
double *hwork, tmp[1];
nb = magma_get_dgeqrf_nb (m);
lworkgpu = (m-n + nb )*(1 +2* nb);
//Allocating memory on device for matrix and vector
magma_dmalloc (&mag_b , m*1);
magma_dmalloc (&mag_A , m*n);
l1 = ( magma_int_t ) MAGMA_D_REAL ( tmp [0] );
l2 = ( magma_int_t ) MAGMA_D_REAL ( tmp [0] );
lhwork = max ( max ( l1 , l2 ), lworkgpu );
magma_dmalloc_cpu (& hwork , lhwork );
//Setting matrices in device for magma_dgels
magma_dsetmatrix ( m, n, A, m, mag_A , m );
magma_dsetmatrix ( m, 1 , b, m, mag_b , m );
//magma_dgels solving the least squares problem
magma_dgels_gpu(MagmaNoTrans, m, n, 1,mag_A, m, mag_b, m, hwork, lworkgpu, &info);
//Getting matrix from device and returning results
magma_dgetmatrix ( n, 1 , mag_b , m , b, n );
results[0] = -0.5* b[0];
results[1] = -0.5* b[1];
results[2] = sqrt((b[0]*b[0]+b[1]*b[1])/4-b[2]);
}
//Fitcircles function fits circles to points
void fitCircles(double points[][2],double point[], int i, double circles[][3],int numofpoints){
int j,k,l,N;
N = numofpoints;
double pts[numofpoints][2];
// Getting the points which belong to each circle
for(j=0; j<i; j++){
l=0;
for(k=0; k<N; k++){
if(point[k]==j){
pts[l][0] = points[k][0];
pts[l][1] = points[k][1];
l++;
}
}
//if the number of points is less than 4 no circle is created
if(l<4){
circles[j][0]=0;
circles[j][1]=0;
circles[j][2]=100;
}
else {
double res[3];
//calling fitring to calculate the circle due to the points
fitring(pts,res,l);
circles[j][0] = res[0];
circles[j][1] = res[1];
circles[j][2] = res[2];
}
}
}
//circle Dist function calculates the distance of the points from each circle. Called only from device
__device__ void circleDist(double po1, double po2, double *circles1, int i, float d[]){
double xa[5],yb[5],r[5],h1;
int j;
//Calculating distance from each circle
for(j=0; j<i; j++){
xa[j] = po1 - circles1[j*3];
yb[j] = po2 - circles1[j*3 + 1];
r[j] = circles1[j*3 + 2];
h1 = xa[j]*xa[j] + yb[j]*yb[j] - r[j]*r[j];
d[j] = h1*h1;
}
}
//findPoints finds the points of a circle by calling circleDist and assigning each point to its nearest circle
__global__ void findPoints(double *points, double *circles1, double *points1, int numofpoints, int i, int *res){
int block = blockIdx.x,j,pos;
float d[5], min;
circleDist(points[2*block],points[2*block+1],circles1,i,d);
min = d[0];
pos = 0;
for(j=0; j<i; j++){
if(d[j]<min){
min = d[j];
pos = j;
}
}
//Using atomicAdd to calculate the number of changes between the threads
if (points1[block]!=pos){
atomicAdd(&res[0], 1);
}
points1[block] = pos;
}
//Randperm function used for generating random points
void randperm(int n, int perm[])
{
int i, j, t;
for(i=0; i<n; i++)
perm[i] = i;
for(i=0; i<n; i++) {
j = rand()%(n-i)+i;
t = perm[j];
perm[j] = perm[i];
perm[i] = t;
}
}
//Setting up kernel for generating random numbers
__global__ void setup_kernel ( curandState * state, unsigned long seed )
{
int id = threadIdx.x;
curand_init ( seed, id, 0, &state[id] );
}
//Generating random numbers uniformly distributed in [-1 1] in the device using curand's functions
__global__ void random(double *N, curandState* globalState)
{
curandState localState = globalState[threadIdx.x];
float random = curand_uniform(&localState);
globalState[threadIdx.x] = localState;
//Half of the numbers will be negative
if((threadIdx.x % 2)==0){
N[threadIdx.x] = random;
}else{
N[threadIdx.x] = -random;
}
}
//ClosestCircle function calculates the circles and the points belonging to them after calling recurcively findPoints and fitCircles functions
void closestCircles(double points[][2], int i, int maxIter, int initializeCirclesFirst, int numofevent, int numofpoints,double circles1[][3], double points1[]){
int j,k,N,numChanges,u;
N = numofpoints;
//In first attempt generate random circles
if(initializeCirclesFirst==1){
curandState* devStates;
HANDLE_ERROR( cudaMalloc( (void**)&devStates, 3 * 5 * sizeof(curandState) ) );
setup_kernel<<<1,15>>>(devStates,unsigned(time(NULL)));
double *dev_c1;
HANDLE_ERROR( cudaMalloc( (void**)&dev_c1, 3 * 5 * sizeof(double) ) );
random<<<1,15>>>(dev_c1,devStates);
HANDLE_ERROR(cudaMemcpy(circles1, dev_c1, 3 * 5 * sizeof(double),cudaMemcpyDeviceToHost));
numChanges = 1;
for(j=0; j<numofpoints; j++){
points1[j]=0;
}
}
//in second attempt generate random points
else{
int idx[N];
randperm(N,idx);
int cIdx = 0;
for(k=0; k<N; k++){
u=idx[k];
points1[u] = cIdx;
cIdx = cIdx+1;
if(cIdx > i-1){
cIdx = 0;
}
}
fitCircles(points, points1, i, circles1, numofpoints);
}
numChanges = 1;
while ((numChanges >0) && (maxIter>0)){
//Setting up memory and parameters to call findPoints in device
int res[1], *dev_res;
res[0] = 0;
double *dev_points, *dev_circles1, *dev_points1 ;
HANDLE_ERROR( cudaMalloc( (void**)&dev_points, numofpoints * 2 * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_circles1, 5 * 3 * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_points1, numofpoints * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_res, 1 * sizeof(int) ) );
HANDLE_ERROR(cudaMemcpy(dev_points, points, numofpoints * 2 * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_circles1, circles1, 5 * 3 * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_points1, points1, numofpoints * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_res, res, 1 *sizeof(int), cudaMemcpyHostToDevice));
findPoints<<<N,1>>>(dev_points, dev_circles1, dev_points1, N, i, dev_res);
HANDLE_ERROR(cudaMemcpy(points1, dev_points1, numofpoints * sizeof(double),cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(res, dev_res, 1 * sizeof(int),cudaMemcpyDeviceToHost));
numChanges = res[0];
maxIter = maxIter - 1;
cudaFree(dev_points);
cudaFree(dev_circles1);
cudaFree(dev_points1);
cudaFree(dev_res);
fitCircles(points, points1, i, circles1, numofpoints);
}
}
//pruneCircles removes all circles which don't fit the criteria of minimum number of points
void pruneCircles(double circles[][3],double points[], float radiusThreshold, int i, int numofpoints, int size[]){
int j,k,l,w,h1;
double prunedC[5][3], prunedP[numofpoints], locations[numofpoints];
w=0;
for(j=0; j<numofpoints; j++){
prunedP[j] = 0;
}
for(j=0; j<i; j++){
l=0;
for(k=0; k<numofpoints; k++){
if(points[k]==j){
locations[l] = k;
l++;
}
}
if(circles[j][2]<radiusThreshold){ continue; }
if(l<4){ continue; }
for(k=0; k<l; k++){
h1 = locations[k];
prunedP[h1] =j;
}
prunedC[w][0] = circles[j][0];
prunedC[w][1] = circles[j][1];
prunedC[w][2] = circles[j][2];
w++;
}
size[0]=w+1;
for(j=0; j<w; j++){
circles[j][0] = prunedC[j][0];
circles[j][1] = prunedC[j][1];
circles[j][2] = prunedC[j][2];
}
for(j=0; j<numofpoints; j++){
points[j] = prunedP[j];
}
}
//Calculates LAD and adding an overfit penalty in the error.
__global__ void circleFitError(double *points, double *circles1, float overfitPenalty,int i, int size, int numofpoints, float *err){
int block=blockIdx.x, j;
float d[5], min;
float h1;
circleDist(points[2*block],points[2*block+1],circles1,size,d);
min = d[0];
for(j=0; j<i; j++){
if(d[j]<min){
min = d[j];
}
}
atomicAdd(&err[0],min);
if(block==0){
h1 = overfitPenalty * i * i;
atomicAdd(&err[0],h1);
}
}
//KCC algorithm calling all functions.
void kcc(double points[][2], int i, int maxIter, float radiusThreshold, float overfitPenalty, int numofevent, int numofpoints, double cir[][3], float err[],int s[]){
int size1[1],size2[1],j,u;
double circles1[5][3],circles2[5][3];
double points1[numofpoints],points2[numofpoints];
float err1[1],err2[1];
double *dev_points, *dev_points1, *dev_circles1, *dev_points2, *dev_circles2;
float *dev_err1, *dev_err2;
err1[0]=err2[0]=0.0;
closestCircles(points, i, maxIter, 1, numofevent, numofpoints,circles1,points1);
pruneCircles(circles1,points1,radiusThreshold,i,numofpoints,size1);
HANDLE_ERROR( cudaMalloc( (void**)&dev_points, numofpoints * 2 * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_circles1, 5 * 3 * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_err1, 1 * sizeof(float) ) );
HANDLE_ERROR(cudaMemcpy(dev_points, points, numofpoints * 2 * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_circles1, circles1, 5 * 3 * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_err1, err1, 1 * sizeof(float), cudaMemcpyHostToDevice));
circleFitError<<<numofpoints,1>>>(dev_points,dev_circles1,overfitPenalty,i,size1[0],numofpoints,dev_err1);
HANDLE_ERROR(cudaMemcpy(err1, dev_err1, 1 * sizeof(float), cudaMemcpyDeviceToHost));
cudaFree(dev_points1);
cudaFree(dev_circles1);
cudaFree(dev_err1);
//Second attempt in which random points are generated first
closestCircles(points, i, maxIter, 0, numofevent, numofpoints,circles2,points2);
pruneCircles(circles2,points2,radiusThreshold,i,numofpoints,size2);
HANDLE_ERROR( cudaMalloc( (void**)&dev_circles2, 5 * 3 * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_err2, 1 * sizeof(float) ) );
HANDLE_ERROR(cudaMemcpy(dev_circles2, circles2, 5 * 3 * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_err2, err2, 1 * sizeof(float), cudaMemcpyHostToDevice));
circleFitError<<<numofpoints,1>>>(dev_points,dev_circles2,overfitPenalty,i,size2[0],numofpoints,dev_err2);
HANDLE_ERROR(cudaMemcpy(err2, dev_err2, 1 * sizeof(float), cudaMemcpyDeviceToHost));
cudaFree(dev_points);
cudaFree(dev_points2);
cudaFree(dev_circles2);
cudaFree(dev_err2);
int q;
//decide which set is returned by comparing errors
if(err1[0]<=err2[0]){
q=1;
for(j=0; j<size1[0]; j++){
cir[j][0] = circles1[j][0];
cir[j][1] = circles1[j][1];
cir[j][2] = circles1[j][2];
}
err[0] = err1[0];
s[0] - size1[0];
}else{
q=0;
for(j=0; j<size2[0]; j++){
cir[j][0] = circles2[j][0];
cir[j][1] = circles2[j][1];
cir[j][2] = circles2[j][2];
}
err[0] = err2[0];
s[0] = size2[0];
}
}
int main( int argc, char* argv[] ) {
FILE* file = fopen("batch00.dat", "r");
char line[128];
int numofevents,i,j,k;
struct timeval tv[2];
fgets(line,sizeof(line),file);
numofevents = atoi(line);
int numofpoints[numofevents];
double events[numofevents];
gettimeofday (&tv[0], NULL);
//for loop to reed progressively points from file and calling kcc 4 times for each event
for (i=0; i<numofevents; i++){
fgets(line,sizeof(line),file);
numofpoints[i] = atoi(line);
double points[numofpoints[i]][2];
for (j=0; j<numofpoints[i]; j++){
fgets(line,sizeof(line),file);
char* buffer;
buffer = strtok(line," ");
int z = 0;
while(buffer){
points[j][z] = atof(buffer);
buffer = strtok(NULL," ");
z++;
}
}
int maxK = 5;
int minK = 2;
int Ks[maxK-minK+1];
for (k=minK; k<=maxK; k++ ){
Ks[k-minK] = k;
}
float radiusThreshold = 0.1;
int maxIter = 100;
float overfitPenalty = 0.001;
double circles[5][3];
double error;
int K,size;
for (k=0; k<4; k++){
double cir[5][3];
float err[1];
int s[1];
kcc(points,Ks[k],maxIter, radiusThreshold, overfitPenalty, i, numofpoints[i],cir,err,s);
//deciding which circle set to keep by comparing errors
if(k==0){
for(j=0; j<s[0]; j++){
circles[j][0] = cir[j][0];
circles[j][1] = cir[j][1];
circles[j][2] = cir[j][2];
}
error = abs(err[0]);
size = s[0]; K=Ks[k];
}else{
if(abs(err[0])<abs(error)){
for(j=0; j<s[0]; j++){
circles[j][0] = cir[j][0];
circles[j][1] = cir[j][1];
circles[j][2] = cir[j][2];
}
error = abs(err[0]);
size = s[0]; K=Ks[k];
}
}
}
//Printing results
printf("\n\nEvent: %d \n",i);
for (k=0; k<size; k++){
printf("%.8f %.8f %.8f \n",circles[k][0],circles[k][1], circles[k][2]);
}
float LAD;
LAD = error - overfitPenalty *(float)K*(float)K;
printf("LAD: %.12f",LAD);
}
//Printing time
gettimeofday (&tv[1], NULL);
printf("\n\nTime elapsed: %.0lf microsec\n\n", time_diff(tv[0],tv[1]) );
return 0;
}
|
c410c17cd747ca7e7fd0f78001b1bbe6c3d14dd0.hip | // !!! This is a file automatically generated by hipify!!!
/*
1-bit BMMA code.
Runs at 500TOPS for matrix size of 4096x4096x8192.
Borrows largely from CUDA-SDK.
By Boyuan
*/
#include <assert.h>
#include <hip/hip_runtime.h>
#include <mma.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 8
#define N 8
#define K 128
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#define CHUNK_K 1
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B
// matrix in shared memory to minimize possible bank conflicts. Before
// performing the nvcuda::wmma::mma_sync operation, the warp must load the
// matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the
// memory access pattern is not specified for that function, each lane in the
// warp can read one or multiple matrix elements from different matrix rows or
// columns. For shared memory, such access can result in bank conflicts if
// different rows / columns of the matrix map to the same bank. By shifting each
// row and column by a few bytes, we make sure that they map to different banks,
// thus reducing the number of possible bank conflicts. The number of 32
// one-byte "uint8_t" elements is chosen as the minimum possible shift because
// we must keep each row and column 256-bit aligned, as required by
// nvcuda::wmma::load_matrix_sync.
#define SKEW 0 // Updated for int4
#define checkKernelErrors(expr) \
do { \
expr; \
\
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
hipGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
using namespace nvcuda::wmma::experimental;
__global__ void apmm_w4a4(const int4 *W, const int4 *X, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int wb, int xb) {
// GEMM configuration.
int K_TILES = K_GLOBAL / 128;
int W_bit_offset = M_GLOBAL*K_GLOBAL/128;
int X_bit_offset = N_GLOBAL*K_GLOBAL/128;
int ROW_BIT = K_GLOBAL/128;
extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here.
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int b=0; b<wb; b++) {
// printf("W bit: %d, ", b);
// for(int i=0; i<1; i++) {
// for(int j=0; j<K_GLOBAL/32; j++) {
// // printf("bit: %d, W[%d][%d]: %x\n", b, i, j, *((int*)W+b*X_bit_offset + i*K_GLOBAL/32+j));
// printf("%08x ", *((int*)W+b*W_bit_offset*4 + i*K_GLOBAL/32+j));
// }
// printf("\n");
// }
// }
// }
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int b=0; b<xb; b++) {
// printf("X bit: %d, ", b);
// for(int i=0; i<1; i++) {
// for(int j=0; j<K_GLOBAL/32; j++) {
// // printf("bit: %d, W[%d][%d]: %x\n", b, i, j, *((int*)W+b*X_bit_offset + i*K_GLOBAL/32+j));
// printf("%08x ", *((int*)X+b*X_bit_offset*4 + i*K_GLOBAL/32+j));
// }
// printf("\n");
// }
// }
// }
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = block_pos / (N_GLOBAL/16) * 16;
const unsigned int block_tile_j = block_pos % (N_GLOBAL/16) * 16;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_GLOBAL) {
break;
}
typedef union {
int4 vec;
int a[4];
} U4;
wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES]
[WARP_ROW_TILES];
for(int i=0; i < WARP_COL_TILES; i++)
for(int j = 0; j < WARP_ROW_TILES; j++)
wmma::fill_fragment(c[i][j], 0);
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const int4 *warp_ptr;
if (warpId < 4) {
warp_ptr = &W[block_tile_i * ROW_BIT + W_bit_offset*warpId];
} else {
warp_ptr = &X[block_tile_j * ROW_BIT + X_bit_offset*(warpId-4)];
}
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = 64; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop.
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy
// the B matrix.
int *shmem_ptr = (int*)shmem + warpId*16*4*(CHUNK_K+SKEW) + (laneId/4)*4*(CHUNK_K+SKEW) + laneId%4;
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
// int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) +
// (laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) +
// (laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit.
int *lane_ptr = (int*)warp_ptr + laneId/4*ROW_BIT*4 + laneId%4 + tile_k*4;
*shmem_ptr = *lane_ptr;
shmem_ptr += 8*4*(CHUNK_K+SKEW);
lane_ptr += 8*ROW_BIT*4;
*shmem_ptr = *lane_ptr;
// U4 tmp_probe;
// tmp_probe.vec = *lane_ptr;
// printf("tmp_probe.a[0]: %d, tmp_probe.a[1]: %d, tmp_probe.a[2]: %d, tmp_probe.a[3]: %d\n", tmp_probe.a[0], tmp_probe.a[1], tmp_probe.a[2], tmp_probe.a[3]);
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=0; i<64; i+=16) {
// printf("Load from GL. i: %d, val: %08x %08x %08x %08x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3));
// }
// }
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][k_step];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int t=0; t<a[i].num_elements; t++) {
// printf("a[%d].x[%d]: %x\n", i, t, a[i].x[t]);
// }
// printf("shmem_idx_a: %d, k_step: %d\n", shmem_idx_a, k_step);
// }
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
}
__syncthreads();
}
// This pointer is used to access the C and D matrix tiles this warp computes.
int *shmem_warp_tile_ptr = (int*)&shmem[0][0] +
(warpId / 2) * 64 * 16 +
(warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO.
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
int *tile_ptr = shmem_warp_tile_ptr + i * 64 * 8 + j * 8;
wmma::store_matrix_sync(tile_ptr, c[i][j], 64, C_LAYOUT);
}
}
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=0; i<4; i++) {
// for(int j=0; j<4; j++) {
// printf("i: %d, j: %d, val: %d\n", i, j, *((int*)&shmem[0][0]+i*64*16+j*16));
// }
// }
// }
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
// int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId * SHMEM_STRIDE * M; // Will be used only when writing back D. Maybe moved outside the for loop. TODO.
size_t idx = threadIdx.x/16 * 64 + threadIdx.x%16;
int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+idx;
int val = 0;
int base_multiplier = 1;
#pragma unroll
for (int i=0; i<4; i++) {
int cur_multiplier = base_multiplier;
#pragma unroll
for (int j=0; j<4; j++) {
int tmp = *(shmem_warp_stream_ptr+16*j);
val += (cur_multiplier*tmp);
cur_multiplier *= 2;
}
base_multiplier *= 2;
shmem_warp_stream_ptr += 16*64;
}
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// printf("val: %d\n", val);
// }
// This warp's pointer to the C matrix data to copy memory from to shared memory.
// TODO: May be moved outside the for loop.
size_t gmem_idx = block_tile_i*N_GLOBAL + block_tile_j + (threadIdx.x/16)*N_GLOBAL + (threadIdx.x%16);
// Now that shared memory contains all the D tiles, stream them to global memory.
int *dst_gmem_warp_stream_ptr = &D[gmem_idx];
*dst_gmem_warp_stream_ptr = val;
__syncthreads();
}
}
void init_matrices(int4 *W, int4 *X, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT){
int *W_int = (int*) W;
int *X_int = (int*) X;
for(int b=0; b<W_BIT; b++) {
for(int i = 0; i < M_GLOBAL; i++) {
for(int j = 0; j < K_GLOBAL/32; j++) {
// W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF;
// W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i;
W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand();
}
}
}
for(int b = 0; b<X_BIT; b++) {
for(int i = 0; i < N_GLOBAL; i++) {
for(int j = 0; j < K_GLOBAL/32; j++) {
// X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF;
// X_int[i*K_GLOBAL/32+j] = i*M_GLOBAL + j;
X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand();
}
}
}
}
int popcnt(int i) {
// Java: use int, and use >>> instead of >>
// C or C++: use int
i = i - ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
}
int int_pow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp % 2)
result *= base;
exp /= 2;
base *= base;
}
return result;
}
void compute_ref(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT) {
int *W_int = (int*) W;
int *X_int = (int*) X;
for (int m = 0; m < M_GLOBAL; m++) {
for (int n = 0; n < N_GLOBAL; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) {
int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile];
int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
}
}
}
ref_C[m*N_GLOBAL+n]= tmp;
}
}
}
void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int X_BIT, int W_BIT, int OUT_BIT) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
int *W_int = (int*) W;
int *X_int = (int*) X;
int C_ref_before_decompose[M_GLOBAL*N_GLOBAL];
for (int m = 0; m < M_GLOBAL; m++) {
for (int n = 0; n < N_GLOBAL; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) {
int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile];
int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
}
}
}
C_ref_before_decompose[m*K_GLOBAL+n]= tmp;
}
}
for(int m=0; m<M_GLOBAL; m++) {
for(int n_tile=0; n_tile<N_GLOBAL/32; n_tile++) {
int val[OUT_BIT];
for(int b=0; b<OUT_BIT; b++) val[b] = 0;
for(int n=0; n<32; n++) {
int tmp = C_ref_before_decompose[m*K_GLOBAL+n_tile*32+n];
tmp = (tmp - 128); // Can be modified for other quantized parameters.
for(int b=0; b<OUT_BIT; b++) {
int mask = 1;
val[b] = val[b] << 1;
val[b] = val[b] | ((mask<<b) & tmp);
}
}
for(int b=0; b<OUT_BIT; b++) {
ref_C[b*M_GLOBAL*N_GLOBAL/32+m*N_GLOBAL/32+n_tile/32] = val[b];
}
}
}
}
void validate_results(int *C, int* ref_C, int M_, int N_) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
printf("Checking computed result for correctness: ");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int i = 0; i < M_; i++) {
for(int j = 0; j < N_; j++) {
int idx = i*N_+j;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
void validate_results_pack(int *C, int* ref_C, int M_, int N_, int OUT_BIT) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
printf("Checking computed result with pack for correctness: ");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int m = 0; m < M_; m++) {
for(int n_tile = 0; n_tile < N_/32; n_tile++) {
for(int b=0; b<OUT_BIT; b++) {
int idx = b*M_*N_/32 + m*N_/32+n_tile;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("m: %d, n_tile: %d, b: %d, C: %d, ref_C: %d\n", m, n_tile, b, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
// #define verify_output
int main(int argc, char **argv) {
int dev = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
int X_BIT = 4;
int W_BIT = 4;
int M_GLOBAL = 64;
// int N_GLOBAL = 64;
// int K_GLOBAL = 128;
for (int N_GLOBAL=128; N_GLOBAL<=1024; N_GLOBAL += 128 ) {
int K_GLOBAL = N_GLOBAL;
int4 *X = NULL;
int4 *W = NULL;
int *Output = NULL;
checkCudaErrors(
hipMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)* W_BIT));
checkCudaErrors(
hipMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL));
#ifdef verify_output
int4 *W_h = NULL;
int4 *X_h = NULL;
int *Output_h = NULL;
W_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT);
X_h = (int4 *)malloc(sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT);
Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
printf("Preparing validation data for GPU...\n");
init_matrices(W_h, X_h, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT);
checkCudaErrors(hipMemcpy(W, W_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(X, X_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT, hipMemcpyHostToDevice));
#endif
int SHMEM_SZ = 65536;
checkCudaErrors(hipFuncSetAttribute(
apmm_w4a4, hipFuncAttributeMaxDynamicSharedMemorySize,
SHMEM_SZ));
// Run ours NUM_PROFILES times and record time.
float bmma_ms_avg = 0.0f;
int NUM_PROFILES = 1000;
for(int iter=0; iter<NUM_PROFILES; ++iter){
float bmma_ms = 0.0f;
hipEvent_t bmma_start;
hipEvent_t bmma_end;
hipEventCreate(&bmma_start);
hipEventCreate(&bmma_end);
hipEventRecord(bmma_start);
checkKernelErrors(
hipLaunchKernelGGL(( (apmm_w4a4), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK),
SHMEM_SZ, 0, W, X, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT)));
hipEventRecord(bmma_end);
hipEventSynchronize(bmma_end);
hipEventElapsedTime(&bmma_ms, bmma_start, bmma_end);
hipEventDestroy(bmma_start);
hipEventDestroy(bmma_end);
bmma_ms_avg += bmma_ms;
}
bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES;
printf("V83, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT);
printf("Time: %f ms\n", bmma_ms_avg);
printf("TOPS: %.2f\n", (((double)(M_GLOBAL) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12);
#ifdef verify_output
printf("Validating results...\n");
checkCudaErrors(hipMemcpy(Output_h, Output, sizeof(int) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost));
int *Output_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
/* Copmpute reference matrix on CPU */
compute_ref(W_h, X_h, Output_ref, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT);
/* validation results */
validate_results(Output_h, Output_ref, M_GLOBAL, N_GLOBAL);
free(W_h);
free(X_h);
free(Output_h);
free(Output_ref);
#endif
checkCudaErrors(hipFree(reinterpret_cast<void *>(W)));
checkCudaErrors(hipFree(reinterpret_cast<void *>(X)));
checkCudaErrors(hipFree(reinterpret_cast<void *>(Output)));
}
return EXIT_SUCCESS;
}
| c410c17cd747ca7e7fd0f78001b1bbe6c3d14dd0.cu | /*
1-bit BMMA code.
Runs at 500TOPS for matrix size of 4096x4096x8192.
Borrows largely from CUDA-SDK.
By Boyuan
*/
#include <assert.h>
#include <cuda.h>
#include <mma.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 8
#define N 8
#define K 128
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#define CHUNK_K 1
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B
// matrix in shared memory to minimize possible bank conflicts. Before
// performing the nvcuda::wmma::mma_sync operation, the warp must load the
// matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the
// memory access pattern is not specified for that function, each lane in the
// warp can read one or multiple matrix elements from different matrix rows or
// columns. For shared memory, such access can result in bank conflicts if
// different rows / columns of the matrix map to the same bank. By shifting each
// row and column by a few bytes, we make sure that they map to different banks,
// thus reducing the number of possible bank conflicts. The number of 32
// one-byte "uint8_t" elements is chosen as the minimum possible shift because
// we must keep each row and column 256-bit aligned, as required by
// nvcuda::wmma::load_matrix_sync.
#define SKEW 0 // Updated for int4
#define checkKernelErrors(expr) \
do { \
expr; \
\
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
cudaGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
using namespace nvcuda::wmma::experimental;
__global__ void apmm_w4a4(const int4 *W, const int4 *X, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int wb, int xb) {
// GEMM configuration.
int K_TILES = K_GLOBAL / 128;
int W_bit_offset = M_GLOBAL*K_GLOBAL/128;
int X_bit_offset = N_GLOBAL*K_GLOBAL/128;
int ROW_BIT = K_GLOBAL/128;
extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here.
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int b=0; b<wb; b++) {
// printf("W bit: %d, ", b);
// for(int i=0; i<1; i++) {
// for(int j=0; j<K_GLOBAL/32; j++) {
// // printf("bit: %d, W[%d][%d]: %x\n", b, i, j, *((int*)W+b*X_bit_offset + i*K_GLOBAL/32+j));
// printf("%08x ", *((int*)W+b*W_bit_offset*4 + i*K_GLOBAL/32+j));
// }
// printf("\n");
// }
// }
// }
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int b=0; b<xb; b++) {
// printf("X bit: %d, ", b);
// for(int i=0; i<1; i++) {
// for(int j=0; j<K_GLOBAL/32; j++) {
// // printf("bit: %d, W[%d][%d]: %x\n", b, i, j, *((int*)W+b*X_bit_offset + i*K_GLOBAL/32+j));
// printf("%08x ", *((int*)X+b*X_bit_offset*4 + i*K_GLOBAL/32+j));
// }
// printf("\n");
// }
// }
// }
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = block_pos / (N_GLOBAL/16) * 16;
const unsigned int block_tile_j = block_pos % (N_GLOBAL/16) * 16;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_GLOBAL) {
break;
}
typedef union {
int4 vec;
int a[4];
} U4;
wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES]
[WARP_ROW_TILES];
for(int i=0; i < WARP_COL_TILES; i++)
for(int j = 0; j < WARP_ROW_TILES; j++)
wmma::fill_fragment(c[i][j], 0);
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const int4 *warp_ptr;
if (warpId < 4) {
warp_ptr = &W[block_tile_i * ROW_BIT + W_bit_offset*warpId];
} else {
warp_ptr = &X[block_tile_j * ROW_BIT + X_bit_offset*(warpId-4)];
}
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = 64; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop.
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy
// the B matrix.
int *shmem_ptr = (int*)shmem + warpId*16*4*(CHUNK_K+SKEW) + (laneId/4)*4*(CHUNK_K+SKEW) + laneId%4;
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
// int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) +
// (laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) +
// (laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit.
int *lane_ptr = (int*)warp_ptr + laneId/4*ROW_BIT*4 + laneId%4 + tile_k*4;
*shmem_ptr = *lane_ptr;
shmem_ptr += 8*4*(CHUNK_K+SKEW);
lane_ptr += 8*ROW_BIT*4;
*shmem_ptr = *lane_ptr;
// U4 tmp_probe;
// tmp_probe.vec = *lane_ptr;
// printf("tmp_probe.a[0]: %d, tmp_probe.a[1]: %d, tmp_probe.a[2]: %d, tmp_probe.a[3]: %d\n", tmp_probe.a[0], tmp_probe.a[1], tmp_probe.a[2], tmp_probe.a[3]);
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=0; i<64; i+=16) {
// printf("Load from GL. i: %d, val: %08x %08x %08x %08x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3));
// }
// }
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][k_step];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int t=0; t<a[i].num_elements; t++) {
// printf("a[%d].x[%d]: %x\n", i, t, a[i].x[t]);
// }
// printf("shmem_idx_a: %d, k_step: %d\n", shmem_idx_a, k_step);
// }
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
}
__syncthreads();
}
// This pointer is used to access the C and D matrix tiles this warp computes.
int *shmem_warp_tile_ptr = (int*)&shmem[0][0] +
(warpId / 2) * 64 * 16 +
(warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO.
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
int *tile_ptr = shmem_warp_tile_ptr + i * 64 * 8 + j * 8;
wmma::store_matrix_sync(tile_ptr, c[i][j], 64, C_LAYOUT);
}
}
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=0; i<4; i++) {
// for(int j=0; j<4; j++) {
// printf("i: %d, j: %d, val: %d\n", i, j, *((int*)&shmem[0][0]+i*64*16+j*16));
// }
// }
// }
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
// int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId * SHMEM_STRIDE * M; // Will be used only when writing back D. Maybe moved outside the for loop. TODO.
size_t idx = threadIdx.x/16 * 64 + threadIdx.x%16;
int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+idx;
int val = 0;
int base_multiplier = 1;
#pragma unroll
for (int i=0; i<4; i++) {
int cur_multiplier = base_multiplier;
#pragma unroll
for (int j=0; j<4; j++) {
int tmp = *(shmem_warp_stream_ptr+16*j);
val += (cur_multiplier*tmp);
cur_multiplier *= 2;
}
base_multiplier *= 2;
shmem_warp_stream_ptr += 16*64;
}
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// printf("val: %d\n", val);
// }
// This warp's pointer to the C matrix data to copy memory from to shared memory.
// TODO: May be moved outside the for loop.
size_t gmem_idx = block_tile_i*N_GLOBAL + block_tile_j + (threadIdx.x/16)*N_GLOBAL + (threadIdx.x%16);
// Now that shared memory contains all the D tiles, stream them to global memory.
int *dst_gmem_warp_stream_ptr = &D[gmem_idx];
*dst_gmem_warp_stream_ptr = val;
__syncthreads();
}
}
void init_matrices(int4 *W, int4 *X, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT){
int *W_int = (int*) W;
int *X_int = (int*) X;
for(int b=0; b<W_BIT; b++) {
for(int i = 0; i < M_GLOBAL; i++) {
for(int j = 0; j < K_GLOBAL/32; j++) {
// W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF;
// W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i;
W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand();
}
}
}
for(int b = 0; b<X_BIT; b++) {
for(int i = 0; i < N_GLOBAL; i++) {
for(int j = 0; j < K_GLOBAL/32; j++) {
// X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF;
// X_int[i*K_GLOBAL/32+j] = i*M_GLOBAL + j;
X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand();
}
}
}
}
int popcnt(int i) {
// Java: use int, and use >>> instead of >>
// C or C++: use int
i = i - ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
}
int int_pow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp % 2)
result *= base;
exp /= 2;
base *= base;
}
return result;
}
void compute_ref(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT) {
int *W_int = (int*) W;
int *X_int = (int*) X;
for (int m = 0; m < M_GLOBAL; m++) {
for (int n = 0; n < N_GLOBAL; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) {
int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile];
int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
}
}
}
ref_C[m*N_GLOBAL+n]= tmp;
}
}
}
void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int X_BIT, int W_BIT, int OUT_BIT) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
int *W_int = (int*) W;
int *X_int = (int*) X;
int C_ref_before_decompose[M_GLOBAL*N_GLOBAL];
for (int m = 0; m < M_GLOBAL; m++) {
for (int n = 0; n < N_GLOBAL; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) {
int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile];
int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
}
}
}
C_ref_before_decompose[m*K_GLOBAL+n]= tmp;
}
}
for(int m=0; m<M_GLOBAL; m++) {
for(int n_tile=0; n_tile<N_GLOBAL/32; n_tile++) {
int val[OUT_BIT];
for(int b=0; b<OUT_BIT; b++) val[b] = 0;
for(int n=0; n<32; n++) {
int tmp = C_ref_before_decompose[m*K_GLOBAL+n_tile*32+n];
tmp = (tmp - 128); // Can be modified for other quantized parameters.
for(int b=0; b<OUT_BIT; b++) {
int mask = 1;
val[b] = val[b] << 1;
val[b] = val[b] | ((mask<<b) & tmp);
}
}
for(int b=0; b<OUT_BIT; b++) {
ref_C[b*M_GLOBAL*N_GLOBAL/32+m*N_GLOBAL/32+n_tile/32] = val[b];
}
}
}
}
void validate_results(int *C, int* ref_C, int M_, int N_) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
printf("Checking computed result for correctness: ");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int i = 0; i < M_; i++) {
for(int j = 0; j < N_; j++) {
int idx = i*N_+j;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
void validate_results_pack(int *C, int* ref_C, int M_, int N_, int OUT_BIT) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
printf("Checking computed result with pack for correctness: ");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int m = 0; m < M_; m++) {
for(int n_tile = 0; n_tile < N_/32; n_tile++) {
for(int b=0; b<OUT_BIT; b++) {
int idx = b*M_*N_/32 + m*N_/32+n_tile;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("m: %d, n_tile: %d, b: %d, C: %d, ref_C: %d\n", m, n_tile, b, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
// #define verify_output
int main(int argc, char **argv) {
int dev = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
int X_BIT = 4;
int W_BIT = 4;
int M_GLOBAL = 64;
// int N_GLOBAL = 64;
// int K_GLOBAL = 128;
for (int N_GLOBAL=128; N_GLOBAL<=1024; N_GLOBAL += 128 ) {
int K_GLOBAL = N_GLOBAL;
int4 *X = NULL;
int4 *W = NULL;
int *Output = NULL;
checkCudaErrors(
cudaMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)* W_BIT));
checkCudaErrors(
cudaMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL));
#ifdef verify_output
int4 *W_h = NULL;
int4 *X_h = NULL;
int *Output_h = NULL;
W_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT);
X_h = (int4 *)malloc(sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT);
Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
printf("Preparing validation data for GPU...\n");
init_matrices(W_h, X_h, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT);
checkCudaErrors(cudaMemcpy(W, W_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(X, X_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT, cudaMemcpyHostToDevice));
#endif
int SHMEM_SZ = 65536;
checkCudaErrors(cudaFuncSetAttribute(
apmm_w4a4, cudaFuncAttributeMaxDynamicSharedMemorySize,
SHMEM_SZ));
// Run ours NUM_PROFILES times and record time.
float bmma_ms_avg = 0.0f;
int NUM_PROFILES = 1000;
for(int iter=0; iter<NUM_PROFILES; ++iter){
float bmma_ms = 0.0f;
cudaEvent_t bmma_start;
cudaEvent_t bmma_end;
cudaEventCreate(&bmma_start);
cudaEventCreate(&bmma_end);
cudaEventRecord(bmma_start);
checkKernelErrors(
(apmm_w4a4<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK,
SHMEM_SZ>>>(W, X, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT)));
cudaEventRecord(bmma_end);
cudaEventSynchronize(bmma_end);
cudaEventElapsedTime(&bmma_ms, bmma_start, bmma_end);
cudaEventDestroy(bmma_start);
cudaEventDestroy(bmma_end);
bmma_ms_avg += bmma_ms;
}
bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES;
printf("V83, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT);
printf("Time: %f ms\n", bmma_ms_avg);
printf("TOPS: %.2f\n", (((double)(M_GLOBAL) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12);
#ifdef verify_output
printf("Validating results...\n");
checkCudaErrors(cudaMemcpy(Output_h, Output, sizeof(int) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost));
int *Output_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
/* Copmpute reference matrix on CPU */
compute_ref(W_h, X_h, Output_ref, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT);
/* validation results */
validate_results(Output_h, Output_ref, M_GLOBAL, N_GLOBAL);
free(W_h);
free(X_h);
free(Output_h);
free(Output_ref);
#endif
checkCudaErrors(cudaFree(reinterpret_cast<void *>(W)));
checkCudaErrors(cudaFree(reinterpret_cast<void *>(X)));
checkCudaErrors(cudaFree(reinterpret_cast<void *>(Output)));
}
return EXIT_SUCCESS;
}
|
119d5d7331a8163d463b3d644b97f5a9d91e1f2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void print1DThreads() {
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
printf("Overall Thread: %d ... Block: %d, Warp: %d, Thread: %d\n",
thread_idx,
blockIdx.x,
threadIdx.x / warpSize,
threadIdx.x);
}
__global__ void print2DThreads() {
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned int idy = (blockIdx.y * blockDim.y) + threadIdx.y;
// (Number of threads in a row * y position) + x offset from start of row
const unsigned int thread_idx = ((gridDim.x * blockDim.x) * idy) + idx;
printf("Overall Thread: %d ... xGrid: %d, yGrid: %d, xBlock: %d, yBlock: %d, Thread: %d\n",
thread_idx,
gridDim.x,
gridDim.y,
blockIdx.x,
blockIdx.y,
threadIdx.x);
}
int main() {
const int num_blocks = 2;
const int num_threads = 64;
hipLaunchKernelGGL(( print1DThreads), dim3(num_blocks), dim3(num_threads), 0, 0, );
hipDeviceSynchronize();
// Number of blocks in a grid
const dim3 blocks(1, 4);
// Number of threads in a block
const dim3 threads(32, 4);
hipLaunchKernelGGL(( print2DThreads), dim3(blocks), dim3(threads), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 119d5d7331a8163d463b3d644b97f5a9d91e1f2a.cu | #include <stdio.h>
__global__ void print1DThreads() {
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
printf("Overall Thread: %d ... Block: %d, Warp: %d, Thread: %d\n",
thread_idx,
blockIdx.x,
threadIdx.x / warpSize,
threadIdx.x);
}
__global__ void print2DThreads() {
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned int idy = (blockIdx.y * blockDim.y) + threadIdx.y;
// (Number of threads in a row * y position) + x offset from start of row
const unsigned int thread_idx = ((gridDim.x * blockDim.x) * idy) + idx;
printf("Overall Thread: %d ... xGrid: %d, yGrid: %d, xBlock: %d, yBlock: %d, Thread: %d\n",
thread_idx,
gridDim.x,
gridDim.y,
blockIdx.x,
blockIdx.y,
threadIdx.x);
}
int main() {
const int num_blocks = 2;
const int num_threads = 64;
print1DThreads<<<num_blocks, num_threads>>>();
cudaDeviceSynchronize();
// Number of blocks in a grid
const dim3 blocks(1, 4);
// Number of threads in a block
const dim3 threads(32, 4);
print2DThreads<<<blocks, threads>>>();
cudaDeviceSynchronize();
return 0;
}
|
97feeeee17a4a3ea366a9c20ccd20b899d3fd19d.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.hip"
#include "support.cu"
int main (int argc, char *argv[])
{
//set standard seed
srand(217);
Timer timer;
hipError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned VecSize;
dim3 dim_grid, dim_block;
if (argc == 1) {
VecSize = 1000;
} else if (argc == 2) {
VecSize = atoi(argv[1]);
}
else {
printf("\nOh no!\nUsage: ./vecAdd <Size>");
exit(0);
}
A_sz = VecSize;
B_sz = VecSize;
C_sz = VecSize;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" size Of vector: %u x %u\n ", VecSize);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMalloc((void **)&A_d,sizeof(float)*A_sz);
hipMalloc((void **)&B_d,sizeof(float)*B_sz);
hipMalloc((void **)&C_d,sizeof(float)*C_sz);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(A_d,A_h,sizeof(float)*A_sz,hipMemcpyHostToDevice);
hipMemcpy(B_d,B_h,sizeof(float)*B_sz,hipMemcpyHostToDevice);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
basicVecAdd(A_d, B_d, C_d, VecSize); //In kernel.cu
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(C_h,C_d,sizeof(float)*C_sz,hipMemcpyDeviceToHost);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, VecSize);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
}
| 97feeeee17a4a3ea366a9c20ccd20b899d3fd19d.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.cu"
#include "support.cu"
int main (int argc, char *argv[])
{
//set standard seed
srand(217);
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned VecSize;
dim3 dim_grid, dim_block;
if (argc == 1) {
VecSize = 1000;
} else if (argc == 2) {
VecSize = atoi(argv[1]);
}
else {
printf("\nOh no!\nUsage: ./vecAdd <Size>");
exit(0);
}
A_sz = VecSize;
B_sz = VecSize;
C_sz = VecSize;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" size Of vector: %u x %u\n ", VecSize);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMalloc((void **)&A_d,sizeof(float)*A_sz);
cudaMalloc((void **)&B_d,sizeof(float)*B_sz);
cudaMalloc((void **)&C_d,sizeof(float)*C_sz);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(A_d,A_h,sizeof(float)*A_sz,cudaMemcpyHostToDevice);
cudaMemcpy(B_d,B_h,sizeof(float)*B_sz,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
basicVecAdd(A_d, B_d, C_d, VecSize); //In kernel.cu
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(C_h,C_d,sizeof(float)*C_sz,cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, VecSize);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
21d5507d94e97f70900f91a2d75fb30abffa1842.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
enum {
Nx = 3,
Ny = 4,
Nz = 5,
};
texture<float, hipTextureType2D, hipReadModeElementType> texture2D;
texture<float, hipTextureType3D, hipReadModeElementType> texture3D;
texture<int2, hipTextureType2D, hipReadModeElementType> texture2D_d;
__global__ void kernel2D() {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid == 0) {
for(int ix = 0; ix < Nx; ix++) {
for(int iy = 0; iy < Ny; iy++) {
const float val = tex2D(texture2D, iy, ix);
const int2 v = tex2D(texture2D_d, iy, ix);
const double val_d = __hiloint2double( v.y, v.x );
printf("%f %f\n", val, val_d);
}
}
}
}
__global__ void kernel3D() {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid == 0) {
for(int ix = 0; ix < Nx; ix++) {
for(int iy = 0; iy < Ny; iy++) {
for(int iz = 0; iz < Nz; iz++) {
const float val = tex3D(texture3D, iz, iy, ix);
printf("%f\n", val);
}
}
}
}
}
int main() {
float host_2D[Nx][Ny]; //H W
float host_3D[Nx][Ny][Nz]; //D H W
double host_2D_d[Nx][Ny]; //H W
int cnt = 0;
for(int ix = 0; ix < Nx; ix++) {
for(int iy = 0; iy < Ny; iy++) {
host_2D[ix][iy] = cnt;
host_2D_d[ix][iy] = cnt;
cnt++;
}
}
cnt = 0;
for(int ix = 0; ix < Nx; ix++) {
for(int iy = 0; iy < Ny; iy++) {
for(int iz = 0; iz < Nz; iz++) {
host_3D[ix][iy][iz] = cnt;
cnt++;
}
}
}
hipArray *cu_2D = nullptr, *cu_3D = nullptr, *cu_2D_d = nullptr;
hipChannelFormatDesc cdesc = hipCreateChannelDesc<float>();
hipChannelFormatDesc cdesc_d = hipCreateChannelDesc<int2>();
hipMallocArray(&cu_2D, &cdesc, Ny, Nx);
hipMalloc3DArray(&cu_3D, &cdesc, make_hipExtent(Nz, Ny, Nx) );
hipMallocArray(&cu_2D_d, &cdesc_d, Ny, Nx);
const size_t size2d = Nx * Ny * sizeof(float);
hipMemcpyToArray(cu_2D, 0, 0, host_2D, size2d, hipMemcpyHostToDevice);
const size_t size2d_d = Nx * Ny * sizeof(double);
hipMemcpyToArray(cu_2D_d, 0, 0, host_2D_d, size2d_d, hipMemcpyHostToDevice);
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr(host_3D, Nz * sizeof(float), Nz, Ny);
copyParams.dstArray = cu_3D;
copyParams.extent = make_hipExtent(Nz, Ny, Nx); //width height depth
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
texture2D.normalized = false;
texture3D.normalized = false;
hipBindTextureToArray(texture2D, cu_2D, cdesc);
hipBindTextureToArray(texture3D, cu_3D, cdesc);
hipBindTextureToArray(texture2D_d, cu_2D_d, cdesc_d);
hipLaunchKernelGGL(( kernel2D), dim3(32), dim3(10), 0, 0, );
hipLaunchKernelGGL(( kernel3D), dim3(32), dim3(10), 0, 0, );
hipDeviceSynchronize();
hipFreeArray(cu_2D);
hipFreeArray(cu_3D);
} | 21d5507d94e97f70900f91a2d75fb30abffa1842.cu | #include <stdio.h>
enum {
Nx = 3,
Ny = 4,
Nz = 5,
};
texture<float, cudaTextureType2D, cudaReadModeElementType> texture2D;
texture<float, cudaTextureType3D, cudaReadModeElementType> texture3D;
texture<int2, cudaTextureType2D, cudaReadModeElementType> texture2D_d;
__global__ void kernel2D() {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid == 0) {
for(int ix = 0; ix < Nx; ix++) {
for(int iy = 0; iy < Ny; iy++) {
const float val = tex2D(texture2D, iy, ix);
const int2 v = tex2D(texture2D_d, iy, ix);
const double val_d = __hiloint2double( v.y, v.x );
printf("%f %f\n", val, val_d);
}
}
}
}
__global__ void kernel3D() {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid == 0) {
for(int ix = 0; ix < Nx; ix++) {
for(int iy = 0; iy < Ny; iy++) {
for(int iz = 0; iz < Nz; iz++) {
const float val = tex3D(texture3D, iz, iy, ix);
printf("%f\n", val);
}
}
}
}
}
int main() {
float host_2D[Nx][Ny]; //H W
float host_3D[Nx][Ny][Nz]; //D H W
double host_2D_d[Nx][Ny]; //H W
int cnt = 0;
for(int ix = 0; ix < Nx; ix++) {
for(int iy = 0; iy < Ny; iy++) {
host_2D[ix][iy] = cnt;
host_2D_d[ix][iy] = cnt;
cnt++;
}
}
cnt = 0;
for(int ix = 0; ix < Nx; ix++) {
for(int iy = 0; iy < Ny; iy++) {
for(int iz = 0; iz < Nz; iz++) {
host_3D[ix][iy][iz] = cnt;
cnt++;
}
}
}
cudaArray *cu_2D = nullptr, *cu_3D = nullptr, *cu_2D_d = nullptr;
cudaChannelFormatDesc cdesc = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc cdesc_d = cudaCreateChannelDesc<int2>();
cudaMallocArray(&cu_2D, &cdesc, Ny, Nx);
cudaMalloc3DArray(&cu_3D, &cdesc, make_cudaExtent(Nz, Ny, Nx) );
cudaMallocArray(&cu_2D_d, &cdesc_d, Ny, Nx);
const size_t size2d = Nx * Ny * sizeof(float);
cudaMemcpyToArray(cu_2D, 0, 0, host_2D, size2d, cudaMemcpyHostToDevice);
const size_t size2d_d = Nx * Ny * sizeof(double);
cudaMemcpyToArray(cu_2D_d, 0, 0, host_2D_d, size2d_d, cudaMemcpyHostToDevice);
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr(host_3D, Nz * sizeof(float), Nz, Ny);
copyParams.dstArray = cu_3D;
copyParams.extent = make_cudaExtent(Nz, Ny, Nx); //width height depth
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
texture2D.normalized = false;
texture3D.normalized = false;
cudaBindTextureToArray(texture2D, cu_2D, cdesc);
cudaBindTextureToArray(texture3D, cu_3D, cdesc);
cudaBindTextureToArray(texture2D_d, cu_2D_d, cdesc_d);
kernel2D<<<32, 10>>>();
kernel3D<<<32, 10>>>();
cudaDeviceSynchronize();
cudaFreeArray(cu_2D);
cudaFreeArray(cu_3D);
} |
98111648966f16acf280ba9a4536509a5068ed3c.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA Device Query
#include <stdio.h>
// Print device properties
void printDevProp(hipDeviceProp_t devProp)
{
// printf("Major revision number: %d\n", devProp.major);
// printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
hipGetDeviceCount(&devCount);
//printf("CUDA Device Query...\n");
//printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
//printf("\nPress any key to exit...");
//char c;
//scanf("%c", &c);
return 0;
}
| 98111648966f16acf280ba9a4536509a5068ed3c.cu | // CUDA Device Query
#include <stdio.h>
// Print device properties
void printDevProp(cudaDeviceProp devProp)
{
// printf("Major revision number: %d\n", devProp.major);
// printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
cudaGetDeviceCount(&devCount);
//printf("CUDA Device Query...\n");
//printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
//printf("\nPress any key to exit...");
//char c;
//scanf("%c", &c);
return 0;
}
|
9d5c2a3409f3a78d5cdee03faa3d22a41a2c865c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Macro.h"
#include "CUFLU.h"
#if ( defined GPU && MODEL == HYDRO )
#include "CUFLU_Shared_FluUtility.cu"
// parallel reduction routine
#define RED_NTHREAD DT_FLU_BLOCK_SIZE
#define RED_MAX
#ifdef DT_FLU_USE_SHUFFLE
# include "../../GPU_Utility/CUUTI_BlockReduction_Shuffle.cu"
#else
# include "../../GPU_Utility/CUUTI_BlockReduction_WarpSync.cu"
#endif
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_dtSolver_HydroCFL
// Description : Estimate the evolution time-step (dt) required for the hydro solver
//
// Note : 1. This function should be applied to both physical and comoving coordinates and always
// return the evolution time-step (dt) actually used in various solvers
// --> Physical coordinates : dt = physical time interval
// Comoving coordinates : dt = delta(scale_factor) / ( Hubble_parameter*scale_factor^3 )
// --> We convert dt back to the physical time interval, which equals "delta(scale_factor)"
// in the comoving coordinates, in Mis_GetTimeStep()
// 2. time-step is estimated by the stability criterion from the von Neumann stability analysis
//
// Parameter : g_dt_Array : Global memory array to store the minimum dt in each target patch
// g_Flu_Array : Global memory array storing the prepared fluid data of each target patch
// dh : Grid size
// Safety : dt safety factor
// Gamma : Ratio of specific heats
// MinPres : Minimum allowed pressure
//
// Return : g_dt_Array
//-------------------------------------------------------------------------------------------------------
__global__ void CUFLU_dtSolver_HydroCFL( real g_dt_Array[], const real g_Flu_Array[][NCOMP_FLUID][ CUBE(PS1) ],
const real dh, const real Safety, const real Gamma, const real MinPres )
{
const uint bx = blockIdx.x;
const uint ID = threadIdx.x;
const real Gamma_m1 = Gamma - (real)1.0;
const bool CheckMinPres_Yes = true;
real fluid[NCOMP_FLUID], _Rho, Vx, Vy, Vz, Pres, Cs, MaxV, MaxCFL;
uint t;
// get the maximum CFL speed evaluated by each thread
t = ID;
MaxCFL = (real)0.0;
while ( t < CUBE(PS1) )
{
for (int v=0; v<NCOMP_FLUID; v++) fluid[v] = g_Flu_Array[bx][v][t];
_Rho = (real)1.0 / fluid[DENS];
Vx = FABS( fluid[MOMX] )*_Rho;
Vy = FABS( fluid[MOMY] )*_Rho;
Vz = FABS( fluid[MOMZ] )*_Rho;
Pres = CUFLU_GetPressure( fluid[DENS], fluid[MOMX], fluid[MOMY], fluid[MOMZ], fluid[ENGY],
Gamma_m1, CheckMinPres_Yes, MinPres );
Cs = SQRT( Gamma*Pres*_Rho );
# if ( FLU_SCHEME == RTVD || FLU_SCHEME == CTU || FLU_SCHEME == WAF )
MaxV = FMAX( Vx, Vy );
MaxV = FMAX( Vz, MaxV );
MaxCFL = FMAX( MaxV+Cs, MaxCFL );
# elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP )
MaxV = Vx + Vy + Vz;
MaxCFL = FMAX( MaxV+(real)3.0*Cs, MaxCFL );
# endif
t += DT_FLU_BLOCK_SIZE;
} // while ( t < CUBE(PS1) )
// perform parallel reduction to get the maximum CFL speed in each thread block
# ifdef DT_FLU_USE_SHUFFLE
MaxCFL = BlockReduction_Shuffle ( MaxCFL );
# else
MaxCFL = BlockReduction_WarpSync( MaxCFL );
# endif
// store the minimum dt in each patch back to the global memory
if ( ID == 0 ) g_dt_Array[bx] = Safety*dh/MaxCFL;
} // FUNCTION : CUFLU_dtSolver_HydroCFL
#endif // #if ( defined GPU && MODEL == HYDRO )
| 9d5c2a3409f3a78d5cdee03faa3d22a41a2c865c.cu | #include "Macro.h"
#include "CUFLU.h"
#if ( defined GPU && MODEL == HYDRO )
#include "CUFLU_Shared_FluUtility.cu"
// parallel reduction routine
#define RED_NTHREAD DT_FLU_BLOCK_SIZE
#define RED_MAX
#ifdef DT_FLU_USE_SHUFFLE
# include "../../GPU_Utility/CUUTI_BlockReduction_Shuffle.cu"
#else
# include "../../GPU_Utility/CUUTI_BlockReduction_WarpSync.cu"
#endif
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_dtSolver_HydroCFL
// Description : Estimate the evolution time-step (dt) required for the hydro solver
//
// Note : 1. This function should be applied to both physical and comoving coordinates and always
// return the evolution time-step (dt) actually used in various solvers
// --> Physical coordinates : dt = physical time interval
// Comoving coordinates : dt = delta(scale_factor) / ( Hubble_parameter*scale_factor^3 )
// --> We convert dt back to the physical time interval, which equals "delta(scale_factor)"
// in the comoving coordinates, in Mis_GetTimeStep()
// 2. time-step is estimated by the stability criterion from the von Neumann stability analysis
//
// Parameter : g_dt_Array : Global memory array to store the minimum dt in each target patch
// g_Flu_Array : Global memory array storing the prepared fluid data of each target patch
// dh : Grid size
// Safety : dt safety factor
// Gamma : Ratio of specific heats
// MinPres : Minimum allowed pressure
//
// Return : g_dt_Array
//-------------------------------------------------------------------------------------------------------
__global__ void CUFLU_dtSolver_HydroCFL( real g_dt_Array[], const real g_Flu_Array[][NCOMP_FLUID][ CUBE(PS1) ],
const real dh, const real Safety, const real Gamma, const real MinPres )
{
const uint bx = blockIdx.x;
const uint ID = threadIdx.x;
const real Gamma_m1 = Gamma - (real)1.0;
const bool CheckMinPres_Yes = true;
real fluid[NCOMP_FLUID], _Rho, Vx, Vy, Vz, Pres, Cs, MaxV, MaxCFL;
uint t;
// get the maximum CFL speed evaluated by each thread
t = ID;
MaxCFL = (real)0.0;
while ( t < CUBE(PS1) )
{
for (int v=0; v<NCOMP_FLUID; v++) fluid[v] = g_Flu_Array[bx][v][t];
_Rho = (real)1.0 / fluid[DENS];
Vx = FABS( fluid[MOMX] )*_Rho;
Vy = FABS( fluid[MOMY] )*_Rho;
Vz = FABS( fluid[MOMZ] )*_Rho;
Pres = CUFLU_GetPressure( fluid[DENS], fluid[MOMX], fluid[MOMY], fluid[MOMZ], fluid[ENGY],
Gamma_m1, CheckMinPres_Yes, MinPres );
Cs = SQRT( Gamma*Pres*_Rho );
# if ( FLU_SCHEME == RTVD || FLU_SCHEME == CTU || FLU_SCHEME == WAF )
MaxV = FMAX( Vx, Vy );
MaxV = FMAX( Vz, MaxV );
MaxCFL = FMAX( MaxV+Cs, MaxCFL );
# elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP )
MaxV = Vx + Vy + Vz;
MaxCFL = FMAX( MaxV+(real)3.0*Cs, MaxCFL );
# endif
t += DT_FLU_BLOCK_SIZE;
} // while ( t < CUBE(PS1) )
// perform parallel reduction to get the maximum CFL speed in each thread block
# ifdef DT_FLU_USE_SHUFFLE
MaxCFL = BlockReduction_Shuffle ( MaxCFL );
# else
MaxCFL = BlockReduction_WarpSync( MaxCFL );
# endif
// store the minimum dt in each patch back to the global memory
if ( ID == 0 ) g_dt_Array[bx] = Safety*dh/MaxCFL;
} // FUNCTION : CUFLU_dtSolver_HydroCFL
#endif // #if ( defined GPU && MODEL == HYDRO )
|
aaecb98368edd59ceeeb59449f7b8f2cde6ab3cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef LU_INVERSE_PLAIN_KERNELS_CU
#define LU_INVERSE_PLAIN_KERNELS_CU
#include "hamc_common.h"
// to be called with a single thread
__global__ void make_GF2_identity_gpu(HAMC_DATA_TYPE_t *A, int n)
{
//int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int tid = 0; tid < n; tid++) {
for(int j = 0; j < n; j++) {
if(tid == j) {
A[tid*n + j] = 1;
}
else {
A[tid*n + j] = 0;
}
}
}
}
// Forward Substitution to be used after LU Decomposition
// A - input matrix (modified from LU decomposition)
// B - identity matrix of size n
// n - size of matrix A
__global__ void GF2_Forward_substitute(HAMC_DATA_TYPE_t *A,
HAMC_DATA_TYPE_t *B, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) { // cols
for (int i = tid - 1; i >= 0; i--) { // rows from bottom to top
B[i*n + tid] = A[i*n + tid];
for (int k = i+1; k < tid; k++) {
B[i*n + tid] ^= B[k*n + tid] & A[i*n + k];
}
}
}
}
// Forward Substitution to be used after LU Decomposition
// A - input matrix (modified from LU decomposition)
// B - identity matrix of size n
// n - size of matrix A
__global__ void GF2_Forward_substitutev2(HAMC_DATA_TYPE_t *A,
HAMC_DATA_TYPE_t *B, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) { // cols
for (int i = 0 ; i <= tid - 1; i++) { // rows
int row = tid - 1 - i;
B[row*n + tid] = A[row*n + tid];
for (int k = row+1; k < tid; k++) {
B[row*n + tid] ^= B[k*n + tid] & A[row*n + k];
}
}
}
}
// Backward Substition to be used after Forward Substitution
__global__ void GF2_Backward_substitute(HAMC_DATA_TYPE_t *A,
HAMC_DATA_TYPE_t *B, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int j = n - 1; j >= 0; j--) { // cols from right to left
if (tid < n) { // rows from top to bottom
//IA->data[i*n + j] = A->data[i*n + j];
for (int k = j+1; k < n; k++) {
B[tid*n + j] ^= B[tid*n + k] & A[k*n + j];
}
}
}
}
// Backward Substition to be used after Forward Substitution
__global__ void GF2_Backward_substitutev2(HAMC_DATA_TYPE_t *A,
HAMC_DATA_TYPE_t *B, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int j = 0; j <= n - 1; j++) { // cols from right to left
if (tid < n) { // rows from top to bottom
int col = n - 1 - j;
//IA->data[i*n + j] = A->data[i*n + j];
for (int k = col+1; k < n; k++) {
B[tid*n + col] ^= B[tid*n + k] & A[k*n + col];
}
}
}
}
// This kernel swaps cols given an IPIV
// A - matrix with cols to swap
// IPIV - pivot vector
// n - size of matrix A
__global__ void GF2_swap_cols(HAMC_DATA_TYPE_t *A, int *IPIV, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int k = n - 1; k >= 0; k--) { // cols from right to left
if (tid < n) {
HAMC_DATA_TYPE_t *C1 = &A[k];
HAMC_DATA_TYPE_t *C2 = &A[IPIV[k]];
HAMC_DATA_TYPE_t temp = C1[tid*n];
C1[tid*n] = C2[tid*n];
C2[tid*n] = temp;
}
}
}
// This kernel swaps cols given an IPIV
// A - matrix with cols to swap
// IPIV - pivot vector
// n - size of matrix A
__global__ void GF2_swap_colsv2(HAMC_DATA_TYPE_t *A, int *IPIV, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int k = 0; k <= n - 1; k++) { // cols from right to left
int row = n - 1 - k;
if (tid < n) {
HAMC_DATA_TYPE_t *C1 = &A[row];
HAMC_DATA_TYPE_t *C2 = &A[IPIV[row]];
HAMC_DATA_TYPE_t temp = C1[tid*n];
C1[tid*n] = C2[tid*n];
C2[tid*n] = temp;
}
}
}
__global__ void GF2_LU_decompose_update_trailing_row( HAMC_DATA_TYPE_t *A,
int n, int k)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Update trailing matrix C ^= A & B
// where A is A(k+1:n, k), B is A(k, k+1 : n), C is A(k+1: n, k+1:n)
int m = n - k - 1;
if (tid < m) {
for (int j = 0; j < m; j++) { // cols
HAMC_DATA_TYPE_t *Arow = &A[(k + 1) * n + k];
HAMC_DATA_TYPE_t *Brow = &A[k * n + k + 1];
HAMC_DATA_TYPE_t *Crow = &A[(k + 1) * n + (k + 1)];
Crow[tid * n + j] ^= Arow[tid * n] & Brow[j];
}
}
}
// Update trailing matrix rows
// A - matrix to update trailing rows
// n - size of matrix A
// k - row
// update trailing matrix C ^= A & B,
// where A is A(k+1:n, k), B is A(k, k+1 : n), C is A(k+1: n, k+1:n)
//
// This kernel expects you to supply the col to be operated on.
__global__ void GF2_LU_decompose_update_trailing_row_index( HAMC_DATA_TYPE_t *A,
int n, int k, int j)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Update trailing matrix C ^= A & B
// where A is A(k+1:n, k), B is A(k, k+1 : n), C is A(k+1: n, k+1:n)
int m = n - k - 1;
if (tid < m) {
//printf("tid: %d\n", tid);
HAMC_DATA_TYPE_t *Arow = &A[(k + 1) * n + k];
HAMC_DATA_TYPE_t *Brow = &A[k * n + k + 1];
HAMC_DATA_TYPE_t *Crow = &A[(k + 1) * n + (k + 1)];
Crow[tid * n + j] ^= Arow[tid * n] & Brow[j];
}
}
// A is input matrix
// IPIV is integer pivot indeces vector should be sizeof(int)*A->rows,
// n is ld (latent dimension) should be A->rows or A->cols
__global__ void GF2_LU_decompose_find_max_row( HAMC_DATA_TYPE_t *A, int *IPIV,
int n, int k)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// find max
if (tid == 0) {
for (int i = 0; i < n - k; i++) {
HAMC_DATA_TYPE_t *Arow = &A[k * n + k];
if (Arow[i*n] == 1) {
IPIV[k] = i + k;
return;
}
}
}
}
// A is input matrix
// IPIV is integer pivot indeces vector should be sizeof(int)*A->rows,
// n is ld (latent dimension) should be A->rows or A->cols
__global__ void GF2_LU_decompose_pivot_row( HAMC_DATA_TYPE_t *A, int *IPIV,
int n, int k)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// if pivot row is changed for kth row, swap row k with pivot row
if (k != IPIV[k]) {
// have each thread handle a separate column element
// Make sure you have at least as many threads as n!!!
if (tid < n) {
HAMC_DATA_TYPE_t *R1 = &A[k * n]; // kth row
HAMC_DATA_TYPE_t *R2 = &A[IPIV[k] * n]; // pivot row
HAMC_DATA_TYPE_t temp = R1[tid];
R1[tid] = R2[tid];
R2[tid] = temp;
}
}
}
#endif /* LU_INVERSE_PLAIN_KERNELS_CU */ | aaecb98368edd59ceeeb59449f7b8f2cde6ab3cd.cu |
#ifndef LU_INVERSE_PLAIN_KERNELS_CU
#define LU_INVERSE_PLAIN_KERNELS_CU
#include "hamc_common.h"
// to be called with a single thread
__global__ void make_GF2_identity_gpu(HAMC_DATA_TYPE_t *A, int n)
{
//int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int tid = 0; tid < n; tid++) {
for(int j = 0; j < n; j++) {
if(tid == j) {
A[tid*n + j] = 1;
}
else {
A[tid*n + j] = 0;
}
}
}
}
// Forward Substitution to be used after LU Decomposition
// A - input matrix (modified from LU decomposition)
// B - identity matrix of size n
// n - size of matrix A
__global__ void GF2_Forward_substitute(HAMC_DATA_TYPE_t *A,
HAMC_DATA_TYPE_t *B, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) { // cols
for (int i = tid - 1; i >= 0; i--) { // rows from bottom to top
B[i*n + tid] = A[i*n + tid];
for (int k = i+1; k < tid; k++) {
B[i*n + tid] ^= B[k*n + tid] & A[i*n + k];
}
}
}
}
// Forward Substitution to be used after LU Decomposition
// A - input matrix (modified from LU decomposition)
// B - identity matrix of size n
// n - size of matrix A
__global__ void GF2_Forward_substitutev2(HAMC_DATA_TYPE_t *A,
HAMC_DATA_TYPE_t *B, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) { // cols
for (int i = 0 ; i <= tid - 1; i++) { // rows
int row = tid - 1 - i;
B[row*n + tid] = A[row*n + tid];
for (int k = row+1; k < tid; k++) {
B[row*n + tid] ^= B[k*n + tid] & A[row*n + k];
}
}
}
}
// Backward Substition to be used after Forward Substitution
__global__ void GF2_Backward_substitute(HAMC_DATA_TYPE_t *A,
HAMC_DATA_TYPE_t *B, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int j = n - 1; j >= 0; j--) { // cols from right to left
if (tid < n) { // rows from top to bottom
//IA->data[i*n + j] = A->data[i*n + j];
for (int k = j+1; k < n; k++) {
B[tid*n + j] ^= B[tid*n + k] & A[k*n + j];
}
}
}
}
// Backward Substition to be used after Forward Substitution
__global__ void GF2_Backward_substitutev2(HAMC_DATA_TYPE_t *A,
HAMC_DATA_TYPE_t *B, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int j = 0; j <= n - 1; j++) { // cols from right to left
if (tid < n) { // rows from top to bottom
int col = n - 1 - j;
//IA->data[i*n + j] = A->data[i*n + j];
for (int k = col+1; k < n; k++) {
B[tid*n + col] ^= B[tid*n + k] & A[k*n + col];
}
}
}
}
// This kernel swaps cols given an IPIV
// A - matrix with cols to swap
// IPIV - pivot vector
// n - size of matrix A
__global__ void GF2_swap_cols(HAMC_DATA_TYPE_t *A, int *IPIV, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int k = n - 1; k >= 0; k--) { // cols from right to left
if (tid < n) {
HAMC_DATA_TYPE_t *C1 = &A[k];
HAMC_DATA_TYPE_t *C2 = &A[IPIV[k]];
HAMC_DATA_TYPE_t temp = C1[tid*n];
C1[tid*n] = C2[tid*n];
C2[tid*n] = temp;
}
}
}
// This kernel swaps cols given an IPIV
// A - matrix with cols to swap
// IPIV - pivot vector
// n - size of matrix A
__global__ void GF2_swap_colsv2(HAMC_DATA_TYPE_t *A, int *IPIV, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int k = 0; k <= n - 1; k++) { // cols from right to left
int row = n - 1 - k;
if (tid < n) {
HAMC_DATA_TYPE_t *C1 = &A[row];
HAMC_DATA_TYPE_t *C2 = &A[IPIV[row]];
HAMC_DATA_TYPE_t temp = C1[tid*n];
C1[tid*n] = C2[tid*n];
C2[tid*n] = temp;
}
}
}
__global__ void GF2_LU_decompose_update_trailing_row( HAMC_DATA_TYPE_t *A,
int n, int k)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Update trailing matrix C ^= A & B
// where A is A(k+1:n, k), B is A(k, k+1 : n), C is A(k+1: n, k+1:n)
int m = n - k - 1;
if (tid < m) {
for (int j = 0; j < m; j++) { // cols
HAMC_DATA_TYPE_t *Arow = &A[(k + 1) * n + k];
HAMC_DATA_TYPE_t *Brow = &A[k * n + k + 1];
HAMC_DATA_TYPE_t *Crow = &A[(k + 1) * n + (k + 1)];
Crow[tid * n + j] ^= Arow[tid * n] & Brow[j];
}
}
}
// Update trailing matrix rows
// A - matrix to update trailing rows
// n - size of matrix A
// k - row
// update trailing matrix C ^= A & B,
// where A is A(k+1:n, k), B is A(k, k+1 : n), C is A(k+1: n, k+1:n)
//
// This kernel expects you to supply the col to be operated on.
__global__ void GF2_LU_decompose_update_trailing_row_index( HAMC_DATA_TYPE_t *A,
int n, int k, int j)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Update trailing matrix C ^= A & B
// where A is A(k+1:n, k), B is A(k, k+1 : n), C is A(k+1: n, k+1:n)
int m = n - k - 1;
if (tid < m) {
//printf("tid: %d\n", tid);
HAMC_DATA_TYPE_t *Arow = &A[(k + 1) * n + k];
HAMC_DATA_TYPE_t *Brow = &A[k * n + k + 1];
HAMC_DATA_TYPE_t *Crow = &A[(k + 1) * n + (k + 1)];
Crow[tid * n + j] ^= Arow[tid * n] & Brow[j];
}
}
// A is input matrix
// IPIV is integer pivot indeces vector should be sizeof(int)*A->rows,
// n is ld (latent dimension) should be A->rows or A->cols
__global__ void GF2_LU_decompose_find_max_row( HAMC_DATA_TYPE_t *A, int *IPIV,
int n, int k)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// find max
if (tid == 0) {
for (int i = 0; i < n - k; i++) {
HAMC_DATA_TYPE_t *Arow = &A[k * n + k];
if (Arow[i*n] == 1) {
IPIV[k] = i + k;
return;
}
}
}
}
// A is input matrix
// IPIV is integer pivot indeces vector should be sizeof(int)*A->rows,
// n is ld (latent dimension) should be A->rows or A->cols
__global__ void GF2_LU_decompose_pivot_row( HAMC_DATA_TYPE_t *A, int *IPIV,
int n, int k)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// if pivot row is changed for kth row, swap row k with pivot row
if (k != IPIV[k]) {
// have each thread handle a separate column element
// Make sure you have at least as many threads as n!!!
if (tid < n) {
HAMC_DATA_TYPE_t *R1 = &A[k * n]; // kth row
HAMC_DATA_TYPE_t *R2 = &A[IPIV[k] * n]; // pivot row
HAMC_DATA_TYPE_t temp = R1[tid];
R1[tid] = R2[tid];
R2[tid] = temp;
}
}
}
#endif /* LU_INVERSE_PLAIN_KERNELS_CU */ |
8074a15ef71aed1c612a10ac01c57251887186db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/conv_dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ConvDropoutForward(const int n, const int feature_map_size, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype* in_data = in + index*feature_map_size;
Dtype* out_data = out + index*feature_map_size;
const bool judge = (mask[index] > threshold);
for (int i = 0; i < feature_map_size; i++){
out_data[i] = in_data[i] * judge * scale;
}
// caffe_gpu_scale(feature_map_size, Dtype((mask[index] > threshold) * scale), in_data, out_data);
// out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void ConvDropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int mask_count = rand_vec_.count();
const int feature_map_size = bottom[0]->height() * bottom[0]->width();
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(mask_count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ConvDropoutForward<Dtype>), dim3(CAFFE_GET_BLOCKS(mask_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
mask_count, feature_map_size, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void ConvDropoutBackward(const int n, const int feature_map_size, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype* in_data = in_diff + index*feature_map_size;
Dtype* out_data = out_diff + index*feature_map_size;
const bool judge = (mask[index] > threshold);
for (int i = 0; i < feature_map_size; i++){
out_data[i] = in_data[i] * judge * scale;
}
// caffe_gpu_scale(feature_map_size, Dtype((mask[index] > threshold) * scale), in_data, out_data);
// out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
void ConvDropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->phase_ == TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = bottom[0]->count();
const int mask_count = rand_vec_.count();
const int feature_map_size = bottom[0]->height() * bottom[0]->width();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ConvDropoutBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(mask_count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
mask_count, feature_map_size, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConvDropoutLayer);
} // namespace caffe
| 8074a15ef71aed1c612a10ac01c57251887186db.cu | #include <vector>
#include "caffe/layers/conv_dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ConvDropoutForward(const int n, const int feature_map_size, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype* in_data = in + index*feature_map_size;
Dtype* out_data = out + index*feature_map_size;
const bool judge = (mask[index] > threshold);
for (int i = 0; i < feature_map_size; i++){
out_data[i] = in_data[i] * judge * scale;
}
// caffe_gpu_scale(feature_map_size, Dtype((mask[index] > threshold) * scale), in_data, out_data);
// out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void ConvDropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int mask_count = rand_vec_.count();
const int feature_map_size = bottom[0]->height() * bottom[0]->width();
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(mask_count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
ConvDropoutForward<Dtype><<<CAFFE_GET_BLOCKS(mask_count), CAFFE_CUDA_NUM_THREADS>>>(
mask_count, feature_map_size, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void ConvDropoutBackward(const int n, const int feature_map_size, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype* in_data = in_diff + index*feature_map_size;
Dtype* out_data = out_diff + index*feature_map_size;
const bool judge = (mask[index] > threshold);
for (int i = 0; i < feature_map_size; i++){
out_data[i] = in_data[i] * judge * scale;
}
// caffe_gpu_scale(feature_map_size, Dtype((mask[index] > threshold) * scale), in_data, out_data);
// out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
void ConvDropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->phase_ == TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = bottom[0]->count();
const int mask_count = rand_vec_.count();
const int feature_map_size = bottom[0]->height() * bottom[0]->width();
// NOLINT_NEXT_LINE(whitespace/operators)
ConvDropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(mask_count),
CAFFE_CUDA_NUM_THREADS>>>(
mask_count, feature_map_size, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConvDropoutLayer);
} // namespace caffe
|
4a2ccd509c79ba2d142a114aa16410feaaa483ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Refactor firstParallel so that it can run on the GPU.
*/
__global__ void firstParallel()
{
printf("This should be running in parallel.\n");
}
int main()
{
/*
* Refactor this call to firstParallel to execute in parallel
* on the GPU.
*/
hipLaunchKernelGGL(( firstParallel), dim3(5),dim3(5), 0, 0, );
/*
* Some code is needed below so that the CPU will wait
* for the GPU kernels to complete before proceeding.
*/
hipDeviceSynchronize();
}
| 4a2ccd509c79ba2d142a114aa16410feaaa483ed.cu | #include <stdio.h>
/*
* Refactor firstParallel so that it can run on the GPU.
*/
__global__ void firstParallel()
{
printf("This should be running in parallel.\n");
}
int main()
{
/*
* Refactor this call to firstParallel to execute in parallel
* on the GPU.
*/
firstParallel<<<5,5>>>();
/*
* Some code is needed below so that the CPU will wait
* for the GPU kernels to complete before proceeding.
*/
cudaDeviceSynchronize();
}
|
444475781dab26f05fbb24e8ffc403e1a976f5f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "arquivo1.h"
#include "comm/comm.h"
__global__ void calculate(int *d_buffer){
int ix = blockIdx.x*blockDim.x + threadIdx.x;
d_buffer[ix] += d_buffer[ix];
}
extern "C" void funcao1(){
int N =500;
//printf("Funcao 1 \n");
int *buffer, *d_buffer ;
int i;
//clock_t start,finish;
//double totaltime;
dim3 grid, block;
block.x = 1024;
grid.x = (N + block.x - 1) / block.x;
buffer = (int*) malloc(sizeof(int)*N*N);
hipMalloc(&d_buffer,sizeof(int)*N*N);
for(i=0;i<N*N;i++){
buffer[i] = i+1;
//printf("%d\t",buffer[i]);
}
//printf("\n");
hipMemcpy(d_buffer,buffer,N*N*sizeof(int),hipMemcpyHostToDevice);
int j;
//float mean = 0.0f;
for(i=0;i<11;i++){
//start = clock();
//sendMessage("funcao1","funcao3", INT, (void*)d_buffer, N*N);
//finish = clock();
//totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
//if(i>0) mean+=totaltime;
//printf("Tempo iterao: %d Tempo: %f\n",i,totaltime);
for(j=0;j<80000;j++){
hipLaunchKernelGGL(( calculate), dim3(grid),dim3(block), 0, 0, d_buffer);
//compute_0(buffer,N*N);
}
sendMessage("funcao1","funcao2", INT, (void*)d_buffer, N*N);
//sendMessage("funcao1","funcao2", INT, (void*)buffer, N*N);
}
//printf("Mdia final: %f\n",mean/10);
//printf("Mensagem enviando...\n");
//sendMessage("funcao1","funcao2", INT, (void*)buffer, 10);
//sendMessage("funcao1","funcao3", INT, (void*)buffer, 10);
//printf("Mensagem enviada...\n");
}
| 444475781dab26f05fbb24e8ffc403e1a976f5f9.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "arquivo1.h"
#include "comm/comm.h"
__global__ void calculate(int *d_buffer){
int ix = blockIdx.x*blockDim.x + threadIdx.x;
d_buffer[ix] += d_buffer[ix];
}
extern "C" void funcao1(){
int N =500;
//printf("Funcao 1 \n");
int *buffer, *d_buffer ;
int i;
//clock_t start,finish;
//double totaltime;
dim3 grid, block;
block.x = 1024;
grid.x = (N + block.x - 1) / block.x;
buffer = (int*) malloc(sizeof(int)*N*N);
cudaMalloc(&d_buffer,sizeof(int)*N*N);
for(i=0;i<N*N;i++){
buffer[i] = i+1;
//printf("%d\t",buffer[i]);
}
//printf("\n");
cudaMemcpy(d_buffer,buffer,N*N*sizeof(int),cudaMemcpyHostToDevice);
int j;
//float mean = 0.0f;
for(i=0;i<11;i++){
//start = clock();
//sendMessage("funcao1","funcao3", INT, (void*)d_buffer, N*N);
//finish = clock();
//totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
//if(i>0) mean+=totaltime;
//printf("Tempo iteração: %d Tempo: %f\n",i,totaltime);
for(j=0;j<80000;j++){
calculate<<<grid,block>>>(d_buffer);
//compute_0(buffer,N*N);
}
sendMessage("funcao1","funcao2", INT, (void*)d_buffer, N*N);
//sendMessage("funcao1","funcao2", INT, (void*)buffer, N*N);
}
//printf("Média final: %f\n",mean/10);
//printf("Mensagem enviando...\n");
//sendMessage("funcao1","funcao2", INT, (void*)buffer, 10);
//sendMessage("funcao1","funcao3", INT, (void*)buffer, 10);
//printf("Mensagem enviada...\n");
}
|
9375b75dc5fb6b3d55a792c64d8d0678b03258d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "../../../gemtc.cu"
#include<stdio.h>
#include<stdlib.h>
#define BIN_COUNT 256
#define NUM_RUNS 6
#define AVG_RUNS 5
#include <helper_functions.h>
#include <hip/hip_runtime.h>
int main(int argc, char **argv){
int NUM_TASKS, LOOP_SIZE;
uint byteCount = 1024;
int Overfill =1;
if(argc>2){
NUM_TASKS = atoi(argv[1]);
LOOP_SIZE = atoi(argv[2]);
}else{
printf("This test requires four parameters:\n");
printf(" int NUM_TASKS, int LOOP_SIZE, int MATRIX_SIZE, int STATIC_VALUE\n");
printf("where NUM_TASKS is the total numer of vector add tasks to be sent to gemtc\n");
printf(" LOOP_SIZE is the number of tasks should be sent to gemtc before waiting for results\n");
exit(1);
}
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
StopWatchInterface *hTimer = NULL;
int iter,warps;
int blocks = devProp.multiProcessorCount;
sdkCreateTimer(&hTimer);
for(iter=0; iter < NUM_RUNS; iter++) {
gemtcSetup(25600, Overfill);
int d_size = sizeof(unsigned int) * byteCount;
int h_size = sizeof(int) * BIN_COUNT;
int size = 1 + d_size + h_size;
int j;
int k;
uint *h_params = (uint *) malloc(size);
double dAvgSecs;
if(Overfill==1){
warps = devProp.maxThreadsPerBlock/32;
}
if(Overfill==0){
int coresPerSM = _ConvertSMVer2Cores(devProp.major, devProp.minor);
warps = coresPerSM/16; //A warp runs on 16 cores
}
if(Overfill==2){
warps =1;
blocks = 1;
}
srand(2009);
h_params[0] = byteCount;
for (uint i = 1; i <= byteCount; i++)
{
h_params[i] = rand() % 256;
}
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for(k=0; k < AVG_RUNS ; k++) {
for(j=0; j<NUM_TASKS/LOOP_SIZE; j++){
int i;
for(i=0; i<LOOP_SIZE; i++){
uint *d_params = (uint *) gemtcGPUMalloc(size);
gemtcMemcpyHostToDevice(d_params, h_params, size);
gemtcPush(34, 32, i+j*LOOP_SIZE, d_params);
}
for(i=0; i<LOOP_SIZE; i++){
void *ret=NULL;
int id;
while(ret==NULL){
gemtcPoll(&id, &ret);
}
// Copy back the results
gemtcMemcpyDeviceToHost(h_params, ret, size);
// Free the device pointer
gemtcGPUFree(ret);
}
}
}
free(h_params);
sdkStopTimer(&hTimer);
dAvgSecs = 1.0e-3 * (double)sdkGetTimerValue(&hTimer) / (double) AVG_RUNS;
dAvgSecs = dAvgSecs/(NUM_TASKS/LOOP_SIZE);
printf("%u\t%.4f\t%.5f\n",
byteCount,(1.0e-6 * warps * (double)byteCount / dAvgSecs), dAvgSecs);
byteCount *= 10;
gemtcCleanup();
}
//printf("Completed\n");
sdkDeleteTimer(&hTimer);
return 0;
}
| 9375b75dc5fb6b3d55a792c64d8d0678b03258d3.cu | #include "../../../gemtc.cu"
#include<stdio.h>
#include<stdlib.h>
#define BIN_COUNT 256
#define NUM_RUNS 6
#define AVG_RUNS 5
#include <helper_functions.h>
#include <cuda_runtime.h>
int main(int argc, char **argv){
int NUM_TASKS, LOOP_SIZE;
uint byteCount = 1024;
int Overfill =1;
if(argc>2){
NUM_TASKS = atoi(argv[1]);
LOOP_SIZE = atoi(argv[2]);
}else{
printf("This test requires four parameters:\n");
printf(" int NUM_TASKS, int LOOP_SIZE, int MATRIX_SIZE, int STATIC_VALUE\n");
printf("where NUM_TASKS is the total numer of vector add tasks to be sent to gemtc\n");
printf(" LOOP_SIZE is the number of tasks should be sent to gemtc before waiting for results\n");
exit(1);
}
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
StopWatchInterface *hTimer = NULL;
int iter,warps;
int blocks = devProp.multiProcessorCount;
sdkCreateTimer(&hTimer);
for(iter=0; iter < NUM_RUNS; iter++) {
gemtcSetup(25600, Overfill);
int d_size = sizeof(unsigned int) * byteCount;
int h_size = sizeof(int) * BIN_COUNT;
int size = 1 + d_size + h_size;
int j;
int k;
uint *h_params = (uint *) malloc(size);
double dAvgSecs;
if(Overfill==1){
warps = devProp.maxThreadsPerBlock/32;
}
if(Overfill==0){
int coresPerSM = _ConvertSMVer2Cores(devProp.major, devProp.minor);
warps = coresPerSM/16; //A warp runs on 16 cores
}
if(Overfill==2){
warps =1;
blocks = 1;
}
srand(2009);
h_params[0] = byteCount;
for (uint i = 1; i <= byteCount; i++)
{
h_params[i] = rand() % 256;
}
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for(k=0; k < AVG_RUNS ; k++) {
for(j=0; j<NUM_TASKS/LOOP_SIZE; j++){
int i;
for(i=0; i<LOOP_SIZE; i++){
uint *d_params = (uint *) gemtcGPUMalloc(size);
gemtcMemcpyHostToDevice(d_params, h_params, size);
gemtcPush(34, 32, i+j*LOOP_SIZE, d_params);
}
for(i=0; i<LOOP_SIZE; i++){
void *ret=NULL;
int id;
while(ret==NULL){
gemtcPoll(&id, &ret);
}
// Copy back the results
gemtcMemcpyDeviceToHost(h_params, ret, size);
// Free the device pointer
gemtcGPUFree(ret);
}
}
}
free(h_params);
sdkStopTimer(&hTimer);
dAvgSecs = 1.0e-3 * (double)sdkGetTimerValue(&hTimer) / (double) AVG_RUNS;
dAvgSecs = dAvgSecs/(NUM_TASKS/LOOP_SIZE);
printf("%u\t%.4f\t%.5f\n",
byteCount,(1.0e-6 * warps * (double)byteCount / dAvgSecs), dAvgSecs);
byteCount *= 10;
gemtcCleanup();
}
//printf("Completed\n");
sdkDeleteTimer(&hTimer);
return 0;
}
|
016c1a1c0e7dc14f6d5f9217613fd4dcc5994c66.hip | // !!! This is a file automatically generated by hipify!!!
#include <hipfft.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <stdio.h>
using namespace std;
#define CHANNEL_NUM 10 //FFT
const int dataH = 512; //
const int dataW = 512; //
hipfftHandle fftplanfwd; //
__global__ void SetFFTInput(hipfftComplex* input, int H, int W, int Nb)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int ix = 0, ib = 0;
if (i < H * Nb){
ix = i % H;
ib = i / H + 10;
if(ix < ib){
for(int j = 0; j < ib; j++){
input[i * W + j].y = 1;
}
}
}
}
int main(void){
/* */
printf("planmany_cuda31.cu...\n");
printf("CPU...\n");
hipfftComplex *h_Data = (hipfftComplex*)malloc(dataH*CHANNEL_NUM*dataW* sizeof(hipfftComplex));//cudaMallocHost
hipfftComplex *h_resultFFT = (hipfftComplex*)malloc(dataH*CHANNEL_NUM*dataW* sizeof(hipfftComplex));
/* */
printf("GPU...\n");
/* */
hipfftComplex *d_Data;//deviceGPUcpuGPU
hipfftComplex *fd_Data;//deviceGPU,R2CcufftComplex
checkCudaErrors(hipMalloc((void**)&d_Data, dataH*CHANNEL_NUM*dataW* sizeof(hipfftComplex)));
checkCudaErrors(hipMemset(d_Data, 0, dataH*CHANNEL_NUM * dataW* sizeof(hipfftComplex))); // 0
checkCudaErrors(hipMalloc((void**)&fd_Data, dataH*CHANNEL_NUM*dataW* sizeof(hipfftComplex))); // R2C
checkCudaErrors(hipMemset(fd_Data, 0, dataH*CHANNEL_NUM*dataW* sizeof(hipfftComplex))); // 0
//
printf("...\n");
for (int k = 0; k < CHANNEL_NUM; k++){
for (int i = 0; i < dataH; i++){
for (int j = 0; j < dataW; j++){
h_Data[(i + k * dataH) * dataW + j].x = 0;//float(rand()%255);
h_Data[(i + k * dataH) * dataW + j].y = 0;//float(rand()%255);
if(i < (4 + k) && j < (4 + k)){
h_Data[(i + k * dataH)*dataW + j].x = 1;//float(rand()%255);
}
}
}
}
int threadsPerBlock = 256;
int blocksPerGrid =
(dataH * CHANNEL_NUM + threadsPerBlock - 1) / threadsPerBlock;hipLaunchKernelGGL((
SetFFTInput), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_Data, dataH, dataW, CHANNEL_NUM);
hipDeviceSynchronize();
//event
float time_elapsed = 0;
hipEvent_t start, stop;
hipEventCreate(&start); //Event
hipEventCreate(&stop);
const int rank = 2;//
int n[rank] = { dataH, dataW };//n*m
int*inembed = n;//sizecudaMemcpyHostToDevice
int istride = 1;//1
int idist = n[0] * n[1];//1
int*onembed = n;//size
int ostride = 1;//DFT1
int odist = n[0] * n[1];//
int batch = CHANNEL_NUM;//
//cufftPlanMany
checkCudaErrors(
hipfftPlanMany(&fftplanfwd, rank, n, inembed, istride, idist, onembed, ostride, odist, HIPFFT_C2C, batch));//FFT
printf("CPUGPU...\n");
// checkCudaErrors(
// hipMemcpy(d_Data, h_Data, dataW * dataH*CHANNEL_NUM * sizeof(hipfftComplex), hipMemcpyHostToDevice));
// //printf("R2C-FFT...\n");
printf("...\n");
hipEventRecord(start, 0); //
checkCudaErrors(
hipfftExecC2C(fftplanfwd, d_Data, fd_Data, HIPFFT_FORWARD));
hipEventRecord(stop, 0); //
hipEventSynchronize(start); //Waits for an event to complete.
hipEventSynchronize(stop); //Waits for an event to complete.Record
hipEventElapsedTime(&time_eaAlapsed, start, stop); //
hipDeviceSynchronize();
printf("GPUCPU...\n");
checkCudaErrors(
hipMemcpy(h_resultFFT, fd_Data, dataW *dataH*CHANNEL_NUM * sizeof(hipfftComplex), hipMemcpyDeviceToHost));//fft
printf("CPU...\n");
FILE *fp;
fp = fopen("test.txt", "w");
for (int i = 0; i < dataH*CHANNEL_NUM*dataW; i++){
fprintf(fp, "%.10f\n", h_resultFFT[i].x*h_resultFFT[i].x + h_resultFFT[i].y*h_resultFFT[i].y);
// cout << "h_resultFFT[" << i << "]=" << h_resultFFT[i].x << " + " << h_resultFFT[i].y << " i" << endl;
}
fclose(fp);
hipEventDestroy(start); //destory the event
hipEventDestroy(stop);
printf("%f(ms)\n", time_elapsed);
/* */
checkCudaErrors(hipfftDestroy(fftplanfwd));
/* */
checkCudaErrors(hipFree(d_Data));
checkCudaErrors(hipFree(fd_Data));
free(h_Data);
free(h_resultFFT);
return 0;
} | 016c1a1c0e7dc14f6d5f9217613fd4dcc5994c66.cu | #include <cufft.h>
#include <iostream>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <stdio.h>
using namespace std;
#define CHANNEL_NUM 10 //通道数、FFT次数
const int dataH = 512; //图像高度
const int dataW = 512; //图像宽度
cufftHandle fftplanfwd; //创建句柄
__global__ void SetFFTInput(cufftComplex* input, int H, int W, int Nb)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int ix = 0, ib = 0;
if (i < H * Nb){
ix = i % H;
ib = i / H + 10;
if(ix < ib){
for(int j = 0; j < ib; j++){
input[i * W + j].y = 1;
}
}
}
}
int main(void){
/* 开辟主机端的内存空间 */
printf("文件名planmany_cuda31.cu...\n");
printf("分配CPU内存空间...\n");
cufftComplex *h_Data = (cufftComplex*)malloc(dataH*CHANNEL_NUM*dataW* sizeof(cufftComplex));//可用cudaMallocHost设置
cufftComplex *h_resultFFT = (cufftComplex*)malloc(dataH*CHANNEL_NUM*dataW* sizeof(cufftComplex));
/* 开辟设备端的内存空间 */
printf("分配GPU内存空间...\n");
/* 定义设备端的内存空间 */
cufftComplex *d_Data;//device表示GPU内存,存储从cpu拷贝到GPU的数据
cufftComplex *fd_Data;//device表示GPU内存,R2C后存入cufftComplex类型数据
checkCudaErrors(cudaMalloc((void**)&d_Data, dataH*CHANNEL_NUM*dataW* sizeof(cufftComplex)));
checkCudaErrors(cudaMemset(d_Data, 0, dataH*CHANNEL_NUM * dataW* sizeof(cufftComplex))); // 初始为0
checkCudaErrors(cudaMalloc((void**)&fd_Data, dataH*CHANNEL_NUM*dataW* sizeof(cufftComplex))); // 开辟R2C后的设备内存
checkCudaErrors(cudaMemset(fd_Data, 0, dataH*CHANNEL_NUM*dataW* sizeof(cufftComplex))); // 初始为0
//随机初始化测试数据
printf("初始化测试数据...\n");
for (int k = 0; k < CHANNEL_NUM; k++){
for (int i = 0; i < dataH; i++){
for (int j = 0; j < dataW; j++){
h_Data[(i + k * dataH) * dataW + j].x = 0;//float(rand()%255);
h_Data[(i + k * dataH) * dataW + j].y = 0;//float(rand()%255);
if(i < (4 + k) && j < (4 + k)){
h_Data[(i + k * dataH)*dataW + j].x = 1;//float(rand()%255);
}
}
}
}
int threadsPerBlock = 256;
int blocksPerGrid =
(dataH * CHANNEL_NUM + threadsPerBlock - 1) / threadsPerBlock;
SetFFTInput<<<blocksPerGrid, threadsPerBlock>>>(d_Data, dataH, dataW, CHANNEL_NUM);
cudaDeviceSynchronize();
//使用event计算时间
float time_elapsed = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start); //创建Event
cudaEventCreate(&stop);
const int rank = 2;//维数
int n[rank] = { dataH, dataW };//n*m
int*inembed = n;//输入的数组sizecudaMemcpyHostToDevice
int istride = 1;//数组内数据连续,为1
int idist = n[0] * n[1];//1个数组的内存大小
int*onembed = n;//输出是一个数组的size
int ostride = 1;//每点DFT后数据连续则为1
int odist = n[0] * n[1];//输出第一个数组与第二个数组的距离,即两个数组的首元素的距离
int batch = CHANNEL_NUM;//批量处理的批数
//采用cufftPlanMany方法
checkCudaErrors(
cufftPlanMany(&fftplanfwd, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_C2C, batch));//针对多信号同时进行FFT
printf("拷贝CPU数据到GPU中...\n");
// checkCudaErrors(
// cudaMemcpy(d_Data, h_Data, dataW * dataH*CHANNEL_NUM * sizeof(cufftComplex), cudaMemcpyHostToDevice));
// //printf("执行R2C-FFT...\n");
printf("开始计时...\n");
cudaEventRecord(start, 0); //记录当前时间
checkCudaErrors(
cufftExecC2C(fftplanfwd, d_Data, fd_Data, CUFFT_FORWARD));
cudaEventRecord(stop, 0); //记录当前时间
cudaEventSynchronize(start); //Waits for an event to complete.
cudaEventSynchronize(stop); //Waits for an event to complete.Record之前的任务
cudaEventElapsedTime(&time_eaAlapsed, start, stop); //计算时间差
cudaDeviceSynchronize();
printf("拷贝GPU数据返回到CPU中...\n");
checkCudaErrors(
cudaMemcpy(h_resultFFT, fd_Data, dataW *dataH*CHANNEL_NUM * sizeof(cufftComplex), cudaMemcpyDeviceToHost));//将fft后的数据拷贝回主机
printf("显示返回到CPU中的数据...\n");
FILE *fp;
fp = fopen("test.txt", "w");
for (int i = 0; i < dataH*CHANNEL_NUM*dataW; i++){
fprintf(fp, "%.10f\n", h_resultFFT[i].x*h_resultFFT[i].x + h_resultFFT[i].y*h_resultFFT[i].y);
// cout << "h_resultFFT[" << i << "]=" << h_resultFFT[i].x << " + " << h_resultFFT[i].y << " i" << endl;
}
fclose(fp);
cudaEventDestroy(start); //destory the event
cudaEventDestroy(stop);
printf("执行时间:%f(ms)\n", time_elapsed);
/* 销毁句柄 */
checkCudaErrors(cufftDestroy(fftplanfwd));
/* 释放设备空间 */
checkCudaErrors(cudaFree(d_Data));
checkCudaErrors(cudaFree(fd_Data));
free(h_Data);
free(h_resultFFT);
return 0;
} |
d7fba32258967717557cc68b65337fe65acfefa4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <random>
#include <vector>
#include <ctime>
#include <iostream>
__global__ void NeuronTimestep(
int numNeur,
int numExcit,
float *d_v,
float *d_u,
float *d_I,
bool *d_cf,
float *d_driven)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < numExcit)
{
d_cf[i] = false;
for (int dt = 0; dt < 4; dt++)
{
float dv = (0.7 * (d_v[i] + 60)*(d_v[i] + 40) - d_u[i] + d_I[i] + d_driven[i]) / 100;
float du = (0.03 * (-2*(d_v[i] + 60) - d_u[i]));
d_v[i] += 0.25*dv;
d_u[i] += 0.25*du;
if (d_v[i] > 35)
{
d_cf[i] = true;
d_v[i] = -50;
d_u[i] += 100;
break;
}
d_I[i] = 0;
}
}
else if (i < numNeur)
{
d_cf[i] = false;
for (int dt = 0; dt < 4; dt++)
{
float dv = (1.2 * (d_v[i] + 75)*(d_v[i] + 45) - d_u[i] + d_I[i] + d_driven[i]) / 150;
float du = (0.01 * (5 * (d_v[i] + 75) - d_u[i]));
d_v[i] += 0.25*dv;
d_u[i] += 0.25*du;
if (d_v[i] > 50)
{
d_cf[i] = true;
d_v[i] = -56;
d_u[i] += 130;
break;
}
}
d_I[i] = 0;
}
}
__global__ void CommunicationPhase(
int numEdge,
bool *d_cf,
int *d_source,
int *d_target,
float *d_weight,
float *d_I)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < numEdge)
{
if (d_cf[d_source[i]])
{
atomicAdd(&d_I[d_target[i]], d_weight[i]);
}
}
}
int main()
{
int numNeurons = 1000;
int numExcit = 800;
int T = 2000;
int equilizationTime = 100;
int transientTime = 300;
/* CUDA Parameters */
int numThreads = 512;
/* Neurons */
float *h_v, *d_v, *h_u, *d_u, *h_I, *d_I, *h_driven, *d_driven;
bool *d_cf, *h_cf;
h_v = new float[numNeurons];
h_u = new float[numNeurons];
h_I = new float[numNeurons];
h_cf = new bool[numNeurons];
h_driven = new float[numNeurons];
bool **SpikeTrainYard = new bool*[T];
float **VoltageTrace = new float *[T];
for (int i = 0; i < numNeurons; i++)
{
h_v[i] = -60;
h_u[i] = 0;
h_I[i] = 0;
h_cf[i] = false;
if (i < 100)
{
h_driven[i] = 75;
}
else
{
h_driven[i] = 0;
}
}
for (int t = 0; t < T; t++)
{
SpikeTrainYard[t] = new bool[numNeurons];
VoltageTrace[t] = new float[numNeurons];
}
/* Edges */
std::vector<int> h_source; int *d_source;
std::vector<int> h_target; int *d_target;
std::vector<float> h_weight; float *d_weight;
std::mt19937 rd(time(NULL));
std::uniform_real_distribution<float> dist(0.0, 1.0);
for (int n = 0; n < numNeurons; n++)
{
for (int m = 0; m < numNeurons; m++)
{
if (n != m)
{
if (dist(rd) < .2)
{
h_source.push_back(n);
h_target.push_back(m);
if (n < numExcit)
{
h_weight.push_back(dist(rd) * 300);
}
else
{
h_weight.push_back(dist(rd) * -400);
}
}
}
}
}
int numEdges = h_source.size();
/* CUDA Memory Functions */
hipMalloc((void**)&d_v, numNeurons * sizeof(float));
hipMalloc((void**)&d_u, numNeurons * sizeof(float));
hipMalloc((void**)&d_I, numNeurons * sizeof(float));
hipMalloc((void**)&d_driven, numNeurons * sizeof(float));
hipMalloc((void**)&d_cf, numNeurons * sizeof(bool));
hipMalloc((void**)&d_source, numEdges * sizeof(int));
hipMalloc((void**)&d_target, numEdges * sizeof(int));
hipMalloc((void**)&d_weight, numEdges * sizeof(float));
hipMemcpy(d_v, h_v, numNeurons * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_u, h_u, numNeurons * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_I, h_I, numNeurons * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_driven, h_driven, numNeurons * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_source, h_source.data(), numEdges * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_target, h_target.data(), numEdges * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_weight, h_weight.data(), numEdges * sizeof(float), hipMemcpyHostToDevice);
/* Run Simulation */
for (int t = 0; t < equilizationTime; t++)
{
/* Run Timesteps, No Communication */
hipLaunchKernelGGL(( NeuronTimestep) , dim3((numNeurons + numThreads - 1) / numThreads) , dim3(numThreads) , 0, 0,
numNeurons,
numExcit,
d_v,
d_u,
d_I,
d_cf,
d_driven);
}
for (int t = 0; t < transientTime; t++)
{
/* Run Timesteps, Communication, No Writing */
NeuronTimestep << <(numNeurons + numThreads - 1) / numThreads, numThreads >> >(
numNeurons,
numExcit,
d_v,
d_u,
d_I,
d_cf,
d_driven);
CommunicationPhase << <(numEdges + numThreads - 1) / numThreads, numThreads >> >(
numEdges,
d_cf,
d_source,
d_target,
d_weight,
d_I);
}
for (int t = 0; t < T; t++)
{
/* Run Timesteps, Communication, Write Results*/
NeuronTimestep << <(numNeurons + numThreads - 1) / numThreads, numThreads >> >(
numNeurons,
numExcit,
d_v,
d_u,
d_I,
d_cf,
d_driven);
hipLaunchKernelGGL(( CommunicationPhase), dim3((numEdges + numThreads - 1) / numThreads), dim3(numThreads), 0, 0,
numEdges,
d_cf,
d_source,
d_target,
d_weight,
d_I);
hipMemcpy(SpikeTrainYard[t], d_cf, numNeurons * sizeof(bool), hipMemcpyDeviceToHost);
hipMemcpy(VoltageTrace[t], d_v, numNeurons * sizeof(float), hipMemcpyDeviceToHost);
}
/* Analyzing Run */
std::vector<std::vector<int>> Firings;
for (int t = 0; t < T; t++)
{
for (int n = 0; n < numNeurons; n++)
{
if (SpikeTrainYard[t][n] == true)
{
std::vector<int> v;
v.push_back(t);
v.push_back(n);
Firings.push_back(v);
}
}
}
std::cout << "There were " << Firings.size() << " firings." << std::endl;
/* Clean Up Code */
hipDeviceReset();
for (int t = 0; t < T; t++)
{
delete[] SpikeTrainYard[t];
delete[] VoltageTrace[t];
}
delete[] h_v; delete[] h_u; delete[] h_I; delete[] h_cf; delete[] SpikeTrainYard; delete[] h_driven;
delete[] VoltageTrace;
return 0;
}
| d7fba32258967717557cc68b65337fe65acfefa4.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <random>
#include <vector>
#include <ctime>
#include <iostream>
__global__ void NeuronTimestep(
int numNeur,
int numExcit,
float *d_v,
float *d_u,
float *d_I,
bool *d_cf,
float *d_driven)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < numExcit)
{
d_cf[i] = false;
for (int dt = 0; dt < 4; dt++)
{
float dv = (0.7 * (d_v[i] + 60)*(d_v[i] + 40) - d_u[i] + d_I[i] + d_driven[i]) / 100;
float du = (0.03 * (-2*(d_v[i] + 60) - d_u[i]));
d_v[i] += 0.25*dv;
d_u[i] += 0.25*du;
if (d_v[i] > 35)
{
d_cf[i] = true;
d_v[i] = -50;
d_u[i] += 100;
break;
}
d_I[i] = 0;
}
}
else if (i < numNeur)
{
d_cf[i] = false;
for (int dt = 0; dt < 4; dt++)
{
float dv = (1.2 * (d_v[i] + 75)*(d_v[i] + 45) - d_u[i] + d_I[i] + d_driven[i]) / 150;
float du = (0.01 * (5 * (d_v[i] + 75) - d_u[i]));
d_v[i] += 0.25*dv;
d_u[i] += 0.25*du;
if (d_v[i] > 50)
{
d_cf[i] = true;
d_v[i] = -56;
d_u[i] += 130;
break;
}
}
d_I[i] = 0;
}
}
__global__ void CommunicationPhase(
int numEdge,
bool *d_cf,
int *d_source,
int *d_target,
float *d_weight,
float *d_I)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < numEdge)
{
if (d_cf[d_source[i]])
{
atomicAdd(&d_I[d_target[i]], d_weight[i]);
}
}
}
int main()
{
int numNeurons = 1000;
int numExcit = 800;
int T = 2000;
int equilizationTime = 100;
int transientTime = 300;
/* CUDA Parameters */
int numThreads = 512;
/* Neurons */
float *h_v, *d_v, *h_u, *d_u, *h_I, *d_I, *h_driven, *d_driven;
bool *d_cf, *h_cf;
h_v = new float[numNeurons];
h_u = new float[numNeurons];
h_I = new float[numNeurons];
h_cf = new bool[numNeurons];
h_driven = new float[numNeurons];
bool **SpikeTrainYard = new bool*[T];
float **VoltageTrace = new float *[T];
for (int i = 0; i < numNeurons; i++)
{
h_v[i] = -60;
h_u[i] = 0;
h_I[i] = 0;
h_cf[i] = false;
if (i < 100)
{
h_driven[i] = 75;
}
else
{
h_driven[i] = 0;
}
}
for (int t = 0; t < T; t++)
{
SpikeTrainYard[t] = new bool[numNeurons];
VoltageTrace[t] = new float[numNeurons];
}
/* Edges */
std::vector<int> h_source; int *d_source;
std::vector<int> h_target; int *d_target;
std::vector<float> h_weight; float *d_weight;
std::mt19937 rd(time(NULL));
std::uniform_real_distribution<float> dist(0.0, 1.0);
for (int n = 0; n < numNeurons; n++)
{
for (int m = 0; m < numNeurons; m++)
{
if (n != m)
{
if (dist(rd) < .2)
{
h_source.push_back(n);
h_target.push_back(m);
if (n < numExcit)
{
h_weight.push_back(dist(rd) * 300);
}
else
{
h_weight.push_back(dist(rd) * -400);
}
}
}
}
}
int numEdges = h_source.size();
/* CUDA Memory Functions */
cudaMalloc((void**)&d_v, numNeurons * sizeof(float));
cudaMalloc((void**)&d_u, numNeurons * sizeof(float));
cudaMalloc((void**)&d_I, numNeurons * sizeof(float));
cudaMalloc((void**)&d_driven, numNeurons * sizeof(float));
cudaMalloc((void**)&d_cf, numNeurons * sizeof(bool));
cudaMalloc((void**)&d_source, numEdges * sizeof(int));
cudaMalloc((void**)&d_target, numEdges * sizeof(int));
cudaMalloc((void**)&d_weight, numEdges * sizeof(float));
cudaMemcpy(d_v, h_v, numNeurons * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_u, h_u, numNeurons * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_I, h_I, numNeurons * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_driven, h_driven, numNeurons * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_source, h_source.data(), numEdges * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_target, h_target.data(), numEdges * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_weight, h_weight.data(), numEdges * sizeof(float), cudaMemcpyHostToDevice);
/* Run Simulation */
for (int t = 0; t < equilizationTime; t++)
{
/* Run Timesteps, No Communication */
NeuronTimestep <<<(numNeurons + numThreads - 1) / numThreads , numThreads >>>(
numNeurons,
numExcit,
d_v,
d_u,
d_I,
d_cf,
d_driven);
}
for (int t = 0; t < transientTime; t++)
{
/* Run Timesteps, Communication, No Writing */
NeuronTimestep << <(numNeurons + numThreads - 1) / numThreads, numThreads >> >(
numNeurons,
numExcit,
d_v,
d_u,
d_I,
d_cf,
d_driven);
CommunicationPhase << <(numEdges + numThreads - 1) / numThreads, numThreads >> >(
numEdges,
d_cf,
d_source,
d_target,
d_weight,
d_I);
}
for (int t = 0; t < T; t++)
{
/* Run Timesteps, Communication, Write Results*/
NeuronTimestep << <(numNeurons + numThreads - 1) / numThreads, numThreads >> >(
numNeurons,
numExcit,
d_v,
d_u,
d_I,
d_cf,
d_driven);
CommunicationPhase<<<(numEdges + numThreads - 1) / numThreads, numThreads>>>(
numEdges,
d_cf,
d_source,
d_target,
d_weight,
d_I);
cudaMemcpy(SpikeTrainYard[t], d_cf, numNeurons * sizeof(bool), cudaMemcpyDeviceToHost);
cudaMemcpy(VoltageTrace[t], d_v, numNeurons * sizeof(float), cudaMemcpyDeviceToHost);
}
/* Analyzing Run */
std::vector<std::vector<int>> Firings;
for (int t = 0; t < T; t++)
{
for (int n = 0; n < numNeurons; n++)
{
if (SpikeTrainYard[t][n] == true)
{
std::vector<int> v;
v.push_back(t);
v.push_back(n);
Firings.push_back(v);
}
}
}
std::cout << "There were " << Firings.size() << " firings." << std::endl;
/* Clean Up Code */
cudaDeviceReset();
for (int t = 0; t < T; t++)
{
delete[] SpikeTrainYard[t];
delete[] VoltageTrace[t];
}
delete[] h_v; delete[] h_u; delete[] h_I; delete[] h_cf; delete[] SpikeTrainYard; delete[] h_driven;
delete[] VoltageTrace;
return 0;
}
|
4779506cb73024574eac1949b55c39bfd0e7b486.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
// Scan, limited to 1 block, upto 1024 threads;
__global__
void scan(unsigned int *g_odata, unsigned int *g_idata, int n) {
extern __shared__ unsigned int temp[]; // allocated on invocation
int thid = threadIdx.x;
int pout = 0, pin = 1;
int Ndim=n;
// Load input into shared memory.
// This is exclusive scan, so shift right by one
// and set first element to 0
if(thid>=n)
return;
temp[pout*Ndim + thid] = (thid > 0) ? g_idata[thid-1] : 0; // Exclusive scan
// temp[pout*n + thid]=g_idata[thid]; // Inclusive
__syncthreads();
for (int offset = 1; offset < n; offset *= 2)
{
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (thid >= offset)
temp[pout*Ndim+thid] = temp[pin*Ndim+thid] + temp[pin*Ndim+thid - offset]; // Code on CUDA tutorial page is Wrong!
else
temp[pout*Ndim+thid] = temp[pin*Ndim+thid];
__syncthreads();
}
g_odata[thid] = temp[pout*Ndim+thid]; // write output
}
void scan_small(unsigned int * d_cdf, unsigned int * d_input, int N){
int Nblock=1;
int Nthread=N;
unsigned int sizeN=N*sizeof(unsigned int);
hipLaunchKernelGGL(( scan), dim3(Nblock),dim3(Nthread),2*sizeN, 0, d_cdf,d_input,N);
}
int main(){
const int N=100;
unsigned int sizeN=N*sizeof(unsigned int);
unsigned int *h_input=new unsigned int[N];
unsigned int *h_cdf=new unsigned int[N]();
for(int i=0;i<N;++i){
h_input[i]=1;
}
unsigned int * d_input, *d_cdf;
hipMalloc(&d_input, sizeN);
hipMalloc(&d_cdf, sizeN);
hipMemcpy(d_input,h_input,sizeN,hipMemcpyHostToDevice);
scan_small(d_cdf,d_input,N);
hipMemcpy(h_cdf,d_cdf,sizeN,hipMemcpyDeviceToHost);
unsigned int acc=0;
for(int i=0;i<N;++i){
printf("%u ", acc);
acc += h_input[i];
}
printf("\n");
for(int i=0;i<N;++i){
printf("%u ", h_cdf[i]);
}
hipFree(d_input); hipFree(d_cdf);
delete[] h_input; delete[] h_cdf;
} | 4779506cb73024574eac1949b55c39bfd0e7b486.cu | #include <iostream>
using namespace std;
// Scan, limited to 1 block, upto 1024 threads;
__global__
void scan(unsigned int *g_odata, unsigned int *g_idata, int n) {
extern __shared__ unsigned int temp[]; // allocated on invocation
int thid = threadIdx.x;
int pout = 0, pin = 1;
int Ndim=n;
// Load input into shared memory.
// This is exclusive scan, so shift right by one
// and set first element to 0
if(thid>=n)
return;
temp[pout*Ndim + thid] = (thid > 0) ? g_idata[thid-1] : 0; // Exclusive scan
// temp[pout*n + thid]=g_idata[thid]; // Inclusive
__syncthreads();
for (int offset = 1; offset < n; offset *= 2)
{
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (thid >= offset)
temp[pout*Ndim+thid] = temp[pin*Ndim+thid] + temp[pin*Ndim+thid - offset]; // Code on CUDA tutorial page is Wrong!
else
temp[pout*Ndim+thid] = temp[pin*Ndim+thid];
__syncthreads();
}
g_odata[thid] = temp[pout*Ndim+thid]; // write output
}
void scan_small(unsigned int * d_cdf, unsigned int * d_input, int N){
int Nblock=1;
int Nthread=N;
unsigned int sizeN=N*sizeof(unsigned int);
scan<<<Nblock,Nthread,2*sizeN>>>(d_cdf,d_input,N);
}
int main(){
const int N=100;
unsigned int sizeN=N*sizeof(unsigned int);
unsigned int *h_input=new unsigned int[N];
unsigned int *h_cdf=new unsigned int[N]();
for(int i=0;i<N;++i){
h_input[i]=1;
}
unsigned int * d_input, *d_cdf;
cudaMalloc(&d_input, sizeN);
cudaMalloc(&d_cdf, sizeN);
cudaMemcpy(d_input,h_input,sizeN,cudaMemcpyHostToDevice);
scan_small(d_cdf,d_input,N);
cudaMemcpy(h_cdf,d_cdf,sizeN,cudaMemcpyDeviceToHost);
unsigned int acc=0;
for(int i=0;i<N;++i){
printf("%u ", acc);
acc += h_input[i];
}
printf("\n");
for(int i=0;i<N;++i){
printf("%u ", h_cdf[i]);
}
cudaFree(d_input); cudaFree(d_cdf);
delete[] h_input; delete[] h_cdf;
} |
1b6562a60781c56a6d87a070d8ce5815239cdf74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Part3: implement the kernel
__global__ void reverseArrayBlock(int *d_out, int *d_in)
{
int inOffset = blockDim.x * blockIdx.x;
int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x);
int in = inOffset + threadIdx.x;
int out = outOffset + (blockDim.x - 1 - threadIdx.x);
d_out[out] = d_in[in];
}
| 1b6562a60781c56a6d87a070d8ce5815239cdf74.cu |
// Part3: implement the kernel
__global__ void reverseArrayBlock(int *d_out, int *d_in)
{
int inOffset = blockDim.x * blockIdx.x;
int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x);
int in = inOffset + threadIdx.x;
int out = outOffset + (blockDim.x - 1 - threadIdx.x);
d_out[out] = d_in[in];
}
|
778289719b293ac6f7582fc89f8006f0e0c9c135.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include "string.h"
#include "sobel.h"
#define DEFAULT_THRESHOLD 12000
#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 32
unsigned int *read_ppm( char *filename, int & xsize, int & ysize, int & maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
// fprintf(stderr, "read_ppm( %s )\n", filename);
int fd = open( filename, O_RDONLY);
if (fd == -1){
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
int num = read(fd, chars, 1000);
if (chars[0] != 'P' || chars[1] != '6'){
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#'){ // comment line!
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
// fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
xsize = width;
ysize = height;
maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int) * 3);
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if (maxval > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// TODO really read
char duh[80];
char *line = chars;
// find the start of the pixel data. no doubt stupid
sprintf(duh, "%d\0", xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", maxval);
line = strstr(line, duh);
// fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
lseek(fd, offset, SEEK_SET); // move to the correct offset
long numread = read(fd, buf, bufsize);
// fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
close(fd);
int pixels = xsize * ysize * 3;
for (int i=0; i<pixels; i++) pic[i] = (int) buf[i];
return pic; // success
}
void write_ppm( const char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
fp = fopen(filename, "w");
if (!fp) {
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize * 3;
for (int i=0; i<numpix; i+=3) {
fprintf(fp, "%c%c%c", (unsigned char) pic[i], (unsigned char) pic[i+1], (unsigned char) pic[i+2]);
}
fclose(fp);
}
void write_ppm_from_bools( const char *filename, int xsize, int ysize, int maxval, bool *pic)
{
FILE *fp;
fp = fopen(filename, "w");
if (!fp) {
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
int val = 0;
if(pic[i]){ val = 255; }
fprintf(fp, "%c%c%c", (unsigned char) val, (unsigned char) val, (unsigned char) val);
}
fclose(fp);
}
__global__ void sobel(unsigned int *pic, bool *isEdge, int xsize, int ysize){
int j = 3*(blockIdx.x*blockDim.x + threadIdx.x) + 1; // col
int i = blockIdx.y*blockDim.y + threadIdx.y; // row
int sum1, sum2, magnitude;
int thresh = DEFAULT_THRESHOLD;
if(i >= 1 && i < ysize-1 && j >= 1 && j < xsize*3-1){
sum1 = pic[ (i-1)*xsize*3 + j+3 ] - pic[ (i-1)*xsize*3 + j-3 ]
+ 2 * pic[ (i)*xsize*3 + j+3 ] - 2 * pic[ (i)*xsize*3 + j-3 ]
+ pic[ (i+1)*xsize*3 + j+3 ] - pic[ (i+1)*xsize*3 + j-3 ];
sum2 = pic[ (i-1)*xsize*3 + j-3 ] + 2 * pic[ (i-1)*xsize*3 + j] + pic[ (i-1)*xsize*3 + j+3 ]
- pic[ (i+1)*xsize*3 + j-3 ] - 2 * pic[ (i+1)*xsize*3 + j ] - pic[ (i+1)*xsize*3 + j+3 ];
magnitude = sum1*sum1 + sum2*sum2;
int offset = i*xsize + blockIdx.x*blockDim.x + threadIdx.x;
if (magnitude > thresh){
isEdge[offset] = true;
}
else {
isEdge[offset] = false;
}
}
}
__global__ void gcount_perpixel(unsigned int *pic, bool *isEdge, int *result, int xsize, int ysize, int *count, int startX, int startY){
int col = startX + threadIdx.x*3;
int row = startY + threadIdx.y;
int offset = row*xsize*3 + col; // location of green value
if( col < xsize*3 && row < ysize){
int r = pic[offset];
int g = pic[offset+1];
int b = pic[offset+2];
int thresh = 10;
if(g-thresh > r && g-thresh > b){
atomicAdd(count, 1);
r = 140;
b=g=0;
}
if(isEdge[row*xsize + col/3]){
result[offset] = 255;
result[offset+1] = 255;
result[offset+2] = 255;
}
else {
result[offset] = r;
result[offset+1] = g;
result[offset+2] = b;
}
}
}
__global__ void gcount(unsigned int *pic, bool *isEdge, int *result, int xsize, int ysize, int *count){
int cols = 16;
int rows = 16;
//TODO: I could speed this up by transposing the matrix and skipping unneeded rows
int col = cols*3*(blockIdx.x*blockDim.x + threadIdx.x); // col
int row = rows*(blockIdx.y*blockDim.y + threadIdx.y); // row
// get average color
float r = 0;
float g = 0;
float b = 0;
int edgeCount = 0;
for(int i = 0; i < cols*3; i+=3){
for(int j = 0; j < rows; j++){
int offset = (row+j)*xsize*3 + (col+i); // location of red value
if( col < xsize*3 && row < ysize){
r += pic[offset];
g += pic[offset+1];
b += pic[offset+2];
edgeCount += (int)isEdge[(row+j)*xsize + (col/3+i)];
}
}
}
if(edgeCount > 25){
dim3 grid(1, 1);
dim3 block(16, 16);
hipLaunchKernelGGL(( gcount_perpixel), dim3(grid), dim3(block), 0, 0, pic, isEdge, result, xsize, ysize, count, col, row);
} else {
r = r / (float)(cols*rows);
g = g / (float)(cols*rows);
b = b / (float)(cols*rows);
int thresh = 10;
if(g-thresh > r && g-thresh > b){
atomicAdd(count, cols*rows);
r=255;
b=g=0;
}
for(int i = 0; i < cols; i++){
for(int j = 0; j < rows; j++){
int offset = (row+j)*xsize*3 + (col+i*3); // location of red value
if( col < xsize*3 && row < ysize){
if(isEdge[(row+j)*xsize + (col/3+i)]){
result[offset] = 255;
result[offset+1] = 255;
result[offset+2] = 255;
}
else {
result[offset] = (int)r;
result[offset+1] = (int)g;
result[offset+2] = (int)b;
}
}
}
}
}
}
void checkCudaError(const char* task){
hipError_t err = hipGetLastError();
if (err != hipSuccess){
fprintf(stderr, "Oops! (error code %s happened at \"%s\")!\n", hipGetErrorString(err), task);
exit(EXIT_FAILURE);
}
// fprintf(stderr, "Success! Completed \"%s\"!\n", task);
}
int main(int argc, char *argv[]){
char* filename = strdup( argv[1] );
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, xsize, ysize, maxval); // define variables and read in image file
// LOADING AND SETUP CODE ===================================================
int numbytes = xsize * ysize * 3 * sizeof( int ); // 3x because 3 floats for R, G, B channels
int numbools = xsize * ysize * sizeof( bool ); // edge detection boolean size
hipEvent_t start_event, stop_event; //
float elapsed_time_par;
unsigned int *d_pic = NULL; // pointer for device picture array
bool *isEdge = (bool *) malloc( numbools ); // host and device edge boolean array
bool *d_isEdge = NULL;
int *result = (int *) malloc( numbytes ); // host and device result image array
int *d_result = NULL;
// SEQUENTIAL SOBEL ===================================================
bool *seqIsEdge = (bool *) malloc( numbools );
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
hipEventRecord(start_event, 0);
for(int i = 0; i < ysize; i++){
for (int j = 0; j < xsize; j++){
int col = j*3 + 1;
int sum1, sum2, magnitude;
int thresh = DEFAULT_THRESHOLD;
if(i >= 1 && i < ysize-1 && col >= 1 && col < xsize*3-1){
sum1 = pic[ (i-1)*xsize*3 + col+3 ] - pic[ (i-1)*xsize*3 + col-3 ]
+ 2 * pic[ (i)*xsize*3 + col+3 ] - 2 * pic[ (i)*xsize*3 + col-3 ]
+ pic[ (i+1)*xsize*3 + col+3 ] - pic[ (i+1)*xsize*3 + col-3 ];
sum2 = pic[ (i-1)*xsize*3 + col-3 ] + 2 * pic[ (i-1)*xsize*3 + col] + pic[ (i-1)*xsize*3 + col+3 ]
- pic[ (i+1)*xsize*3 + col-3 ] - 2 * pic[ (i+1)*xsize*3 + col ] - pic[ (i+1)*xsize*3 + col+3 ];
magnitude = sum1*sum1 + sum2*sum2;
int offset = i*xsize + j;
if (magnitude > thresh){
seqIsEdge[offset] = true;
}
else {
seqIsEdge[offset] = false;
}
}
}
}
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&elapsed_time_par, start_event, stop_event);
fprintf(stderr, " Edge Detection Sequential Runtime: %f ms\n", elapsed_time_par);
hipMalloc((void **) &d_pic, numbytes); // allocate input image space on device
checkCudaError("allocate d_pic");
hipMemcpy(d_pic, pic, xsize * ysize * sizeof(unsigned int) * 3 , hipMemcpyHostToDevice); // copy input image to device
checkCudaError("copy d_pic");
hipMalloc((void **) &d_isEdge, numbools); // allocate isEdge space on device
checkCudaError("allocate d_isEdge");
dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grid(ceil(xsize/ (float)BLOCK_SIZE_X ), ceil(ysize/ (float)BLOCK_SIZE_Y ));
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
hipEventRecord(start_event, 0);
// Launch edge detection kernel function
// takes in pic array, returns boolean isEdge
hipLaunchKernelGGL(( sobel), dim3(grid), dim3(block), 0, 0, d_pic, d_isEdge, xsize, ysize);
checkCudaError("kernel launch");
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&elapsed_time_par, start_event, stop_event);
fprintf(stderr, " Edge Detection Parallel Runtime: %f ms\n", elapsed_time_par);
// GREEN PIXEL COUNTING CODE ================================================
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
hipEventRecord(start_event, 0);
int count = 0;
for(int i = 0; i < ysize; i++){
for (int j = 0; j < xsize; j++){
//TODO: I could speed this up by transposing the matrix and skipping unneeded rows
int col = j*3; // col
int row = i; // row
// get average color
float r = 0;
float g = 0;
float b = 0;
for(int i = 0; i < 3; i+=3){
for(int j = 0; j < 1; j++){
int offset = (row+j)*xsize*3 + (col+i); // location of red value
if( col < xsize*3 && row < ysize){
r += pic[offset];
g += pic[offset+1];
b += pic[offset+2];
}
}
}
int thresh = 10;
if(g-thresh > r && g-thresh > b){
count++;
r=255;
b=g=0;
}
for(int i = 0; i < 1; i++){
for(int j = 0; j < 1; j++){
int offset = (row+j)*xsize*3 + (col+i*3); // location of red value
if( col < xsize*3 && row < ysize){
if(isEdge[(row+j)*xsize + (col/3+i)]){
result[offset] = 255;
result[offset+1] = 255;
result[offset+2] = 255;
}
else {
result[offset] = (int)r;
result[offset+1] = (int)g;
result[offset+2] = (int)b;
}
}
}
}
}
}
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&elapsed_time_par, start_event, stop_event);
fprintf(stderr, " Pixel Counting Sequential Runtime: %f ms\n", elapsed_time_par);
hipMalloc((void **) &d_result, numbytes); // allocate result image space on device
checkCudaError("allocate d_result");
dim3 grid2(ceil((xsize/16)/ (float)BLOCK_SIZE_X ), ceil((ysize/16)/ (float)BLOCK_SIZE_Y ));
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
hipEventRecord(start_event, 0);
int *num_pix_found;
hipMallocManaged(&num_pix_found, 4); // allocate space for num_pix_found on device
*num_pix_found = 0;
// Launch pixel count kernel function
// takes in input pic array and boolean isEdge, returns num_pix_found and result image array
hipLaunchKernelGGL(( gcount), dim3(grid2), dim3(block), 0, 0, d_pic, d_isEdge, d_result, xsize, ysize, num_pix_found);
checkCudaError("kernel launch");
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&elapsed_time_par, start_event, stop_event);
fprintf(stderr, " Pixel Counting Parallel Runtime: %f ms\n", elapsed_time_par);
// fprintf(stderr, "Count Runtime: %f ms\n", elapsed_time_par);
hipMemcpy(result, d_result, numbytes, hipMemcpyDeviceToHost); // copy result image to host
checkCudaError("copy d_result");
fprintf(stderr, " file: %s, num_pix_found: %d, cm^2: %d\n",filename, *num_pix_found, *num_pix_found / 467); // there are 466.667 pixels per cm^2
hipFree(d_pic);
hipFree(d_isEdge);
hipFree(d_result);
write_ppm("sobel.ppm", xsize, ysize, 255, result); // write result image file
}
| 778289719b293ac6f7582fc89f8006f0e0c9c135.cu | #include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include "string.h"
#include "sobel.h"
#define DEFAULT_THRESHOLD 12000
#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 32
unsigned int *read_ppm( char *filename, int & xsize, int & ysize, int & maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
// fprintf(stderr, "read_ppm( %s )\n", filename);
int fd = open( filename, O_RDONLY);
if (fd == -1){
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
int num = read(fd, chars, 1000);
if (chars[0] != 'P' || chars[1] != '6'){
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#'){ // comment line!
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
// fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
xsize = width;
ysize = height;
maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int) * 3);
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if (maxval > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// TODO really read
char duh[80];
char *line = chars;
// find the start of the pixel data. no doubt stupid
sprintf(duh, "%d\0", xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", maxval);
line = strstr(line, duh);
// fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
lseek(fd, offset, SEEK_SET); // move to the correct offset
long numread = read(fd, buf, bufsize);
// fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
close(fd);
int pixels = xsize * ysize * 3;
for (int i=0; i<pixels; i++) pic[i] = (int) buf[i];
return pic; // success
}
void write_ppm( const char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
fp = fopen(filename, "w");
if (!fp) {
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize * 3;
for (int i=0; i<numpix; i+=3) {
fprintf(fp, "%c%c%c", (unsigned char) pic[i], (unsigned char) pic[i+1], (unsigned char) pic[i+2]);
}
fclose(fp);
}
void write_ppm_from_bools( const char *filename, int xsize, int ysize, int maxval, bool *pic)
{
FILE *fp;
fp = fopen(filename, "w");
if (!fp) {
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
int val = 0;
if(pic[i]){ val = 255; }
fprintf(fp, "%c%c%c", (unsigned char) val, (unsigned char) val, (unsigned char) val);
}
fclose(fp);
}
__global__ void sobel(unsigned int *pic, bool *isEdge, int xsize, int ysize){
int j = 3*(blockIdx.x*blockDim.x + threadIdx.x) + 1; // col
int i = blockIdx.y*blockDim.y + threadIdx.y; // row
int sum1, sum2, magnitude;
int thresh = DEFAULT_THRESHOLD;
if(i >= 1 && i < ysize-1 && j >= 1 && j < xsize*3-1){
sum1 = pic[ (i-1)*xsize*3 + j+3 ] - pic[ (i-1)*xsize*3 + j-3 ]
+ 2 * pic[ (i)*xsize*3 + j+3 ] - 2 * pic[ (i)*xsize*3 + j-3 ]
+ pic[ (i+1)*xsize*3 + j+3 ] - pic[ (i+1)*xsize*3 + j-3 ];
sum2 = pic[ (i-1)*xsize*3 + j-3 ] + 2 * pic[ (i-1)*xsize*3 + j] + pic[ (i-1)*xsize*3 + j+3 ]
- pic[ (i+1)*xsize*3 + j-3 ] - 2 * pic[ (i+1)*xsize*3 + j ] - pic[ (i+1)*xsize*3 + j+3 ];
magnitude = sum1*sum1 + sum2*sum2;
int offset = i*xsize + blockIdx.x*blockDim.x + threadIdx.x;
if (magnitude > thresh){
isEdge[offset] = true;
}
else {
isEdge[offset] = false;
}
}
}
__global__ void gcount_perpixel(unsigned int *pic, bool *isEdge, int *result, int xsize, int ysize, int *count, int startX, int startY){
int col = startX + threadIdx.x*3;
int row = startY + threadIdx.y;
int offset = row*xsize*3 + col; // location of green value
if( col < xsize*3 && row < ysize){
int r = pic[offset];
int g = pic[offset+1];
int b = pic[offset+2];
int thresh = 10;
if(g-thresh > r && g-thresh > b){
atomicAdd(count, 1);
r = 140;
b=g=0;
}
if(isEdge[row*xsize + col/3]){
result[offset] = 255;
result[offset+1] = 255;
result[offset+2] = 255;
}
else {
result[offset] = r;
result[offset+1] = g;
result[offset+2] = b;
}
}
}
__global__ void gcount(unsigned int *pic, bool *isEdge, int *result, int xsize, int ysize, int *count){
int cols = 16;
int rows = 16;
//TODO: I could speed this up by transposing the matrix and skipping unneeded rows
int col = cols*3*(blockIdx.x*blockDim.x + threadIdx.x); // col
int row = rows*(blockIdx.y*blockDim.y + threadIdx.y); // row
// get average color
float r = 0;
float g = 0;
float b = 0;
int edgeCount = 0;
for(int i = 0; i < cols*3; i+=3){
for(int j = 0; j < rows; j++){
int offset = (row+j)*xsize*3 + (col+i); // location of red value
if( col < xsize*3 && row < ysize){
r += pic[offset];
g += pic[offset+1];
b += pic[offset+2];
edgeCount += (int)isEdge[(row+j)*xsize + (col/3+i)];
}
}
}
if(edgeCount > 25){
dim3 grid(1, 1);
dim3 block(16, 16);
gcount_perpixel<<<grid, block>>>(pic, isEdge, result, xsize, ysize, count, col, row);
} else {
r = r / (float)(cols*rows);
g = g / (float)(cols*rows);
b = b / (float)(cols*rows);
int thresh = 10;
if(g-thresh > r && g-thresh > b){
atomicAdd(count, cols*rows);
r=255;
b=g=0;
}
for(int i = 0; i < cols; i++){
for(int j = 0; j < rows; j++){
int offset = (row+j)*xsize*3 + (col+i*3); // location of red value
if( col < xsize*3 && row < ysize){
if(isEdge[(row+j)*xsize + (col/3+i)]){
result[offset] = 255;
result[offset+1] = 255;
result[offset+2] = 255;
}
else {
result[offset] = (int)r;
result[offset+1] = (int)g;
result[offset+2] = (int)b;
}
}
}
}
}
}
void checkCudaError(const char* task){
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Oops! (error code %s happened at \"%s\")!\n", cudaGetErrorString(err), task);
exit(EXIT_FAILURE);
}
// fprintf(stderr, "Success! Completed \"%s\"!\n", task);
}
int main(int argc, char *argv[]){
char* filename = strdup( argv[1] );
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, xsize, ysize, maxval); // define variables and read in image file
// LOADING AND SETUP CODE ===================================================
int numbytes = xsize * ysize * 3 * sizeof( int ); // 3x because 3 floats for R, G, B channels
int numbools = xsize * ysize * sizeof( bool ); // edge detection boolean size
cudaEvent_t start_event, stop_event; //
float elapsed_time_par;
unsigned int *d_pic = NULL; // pointer for device picture array
bool *isEdge = (bool *) malloc( numbools ); // host and device edge boolean array
bool *d_isEdge = NULL;
int *result = (int *) malloc( numbytes ); // host and device result image array
int *d_result = NULL;
// SEQUENTIAL SOBEL ===================================================
bool *seqIsEdge = (bool *) malloc( numbools );
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
for(int i = 0; i < ysize; i++){
for (int j = 0; j < xsize; j++){
int col = j*3 + 1;
int sum1, sum2, magnitude;
int thresh = DEFAULT_THRESHOLD;
if(i >= 1 && i < ysize-1 && col >= 1 && col < xsize*3-1){
sum1 = pic[ (i-1)*xsize*3 + col+3 ] - pic[ (i-1)*xsize*3 + col-3 ]
+ 2 * pic[ (i)*xsize*3 + col+3 ] - 2 * pic[ (i)*xsize*3 + col-3 ]
+ pic[ (i+1)*xsize*3 + col+3 ] - pic[ (i+1)*xsize*3 + col-3 ];
sum2 = pic[ (i-1)*xsize*3 + col-3 ] + 2 * pic[ (i-1)*xsize*3 + col] + pic[ (i-1)*xsize*3 + col+3 ]
- pic[ (i+1)*xsize*3 + col-3 ] - 2 * pic[ (i+1)*xsize*3 + col ] - pic[ (i+1)*xsize*3 + col+3 ];
magnitude = sum1*sum1 + sum2*sum2;
int offset = i*xsize + j;
if (magnitude > thresh){
seqIsEdge[offset] = true;
}
else {
seqIsEdge[offset] = false;
}
}
}
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time_par, start_event, stop_event);
fprintf(stderr, " Edge Detection Sequential Runtime: %f ms\n", elapsed_time_par);
cudaMalloc((void **) &d_pic, numbytes); // allocate input image space on device
checkCudaError("allocate d_pic");
cudaMemcpy(d_pic, pic, xsize * ysize * sizeof(unsigned int) * 3 , cudaMemcpyHostToDevice); // copy input image to device
checkCudaError("copy d_pic");
cudaMalloc((void **) &d_isEdge, numbools); // allocate isEdge space on device
checkCudaError("allocate d_isEdge");
dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grid(ceil(xsize/ (float)BLOCK_SIZE_X ), ceil(ysize/ (float)BLOCK_SIZE_Y ));
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
// Launch edge detection kernel function
// takes in pic array, returns boolean isEdge
sobel<<<grid, block>>>(d_pic, d_isEdge, xsize, ysize);
checkCudaError("kernel launch");
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time_par, start_event, stop_event);
fprintf(stderr, " Edge Detection Parallel Runtime: %f ms\n", elapsed_time_par);
// GREEN PIXEL COUNTING CODE ================================================
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
int count = 0;
for(int i = 0; i < ysize; i++){
for (int j = 0; j < xsize; j++){
//TODO: I could speed this up by transposing the matrix and skipping unneeded rows
int col = j*3; // col
int row = i; // row
// get average color
float r = 0;
float g = 0;
float b = 0;
for(int i = 0; i < 3; i+=3){
for(int j = 0; j < 1; j++){
int offset = (row+j)*xsize*3 + (col+i); // location of red value
if( col < xsize*3 && row < ysize){
r += pic[offset];
g += pic[offset+1];
b += pic[offset+2];
}
}
}
int thresh = 10;
if(g-thresh > r && g-thresh > b){
count++;
r=255;
b=g=0;
}
for(int i = 0; i < 1; i++){
for(int j = 0; j < 1; j++){
int offset = (row+j)*xsize*3 + (col+i*3); // location of red value
if( col < xsize*3 && row < ysize){
if(isEdge[(row+j)*xsize + (col/3+i)]){
result[offset] = 255;
result[offset+1] = 255;
result[offset+2] = 255;
}
else {
result[offset] = (int)r;
result[offset+1] = (int)g;
result[offset+2] = (int)b;
}
}
}
}
}
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time_par, start_event, stop_event);
fprintf(stderr, " Pixel Counting Sequential Runtime: %f ms\n", elapsed_time_par);
cudaMalloc((void **) &d_result, numbytes); // allocate result image space on device
checkCudaError("allocate d_result");
dim3 grid2(ceil((xsize/16)/ (float)BLOCK_SIZE_X ), ceil((ysize/16)/ (float)BLOCK_SIZE_Y ));
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
int *num_pix_found;
cudaMallocManaged(&num_pix_found, 4); // allocate space for num_pix_found on device
*num_pix_found = 0;
// Launch pixel count kernel function
// takes in input pic array and boolean isEdge, returns num_pix_found and result image array
gcount<<<grid2, block>>>(d_pic, d_isEdge, d_result, xsize, ysize, num_pix_found);
checkCudaError("kernel launch");
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time_par, start_event, stop_event);
fprintf(stderr, " Pixel Counting Parallel Runtime: %f ms\n", elapsed_time_par);
// fprintf(stderr, "Count Runtime: %f ms\n", elapsed_time_par);
cudaMemcpy(result, d_result, numbytes, cudaMemcpyDeviceToHost); // copy result image to host
checkCudaError("copy d_result");
fprintf(stderr, " file: %s, num_pix_found: %d, cm^2: %d\n",filename, *num_pix_found, *num_pix_found / 467); // there are 466.667 pixels per cm^2
cudaFree(d_pic);
cudaFree(d_isEdge);
cudaFree(d_result);
write_ppm("sobel.ppm", xsize, ysize, 255, result); // write result image file
}
|
d0507a8605da24ce4cccf2fed8610c9c40c87bca.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (C) 2020 ByteDance Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <cmath>
#include <nestedtensor/csrc/cuda/attention.h>
#include <c10/util/Half.h>
namespace nteffectivetransformer {
namespace cuda {
// Reduce code comes from Nvidia's DeepLearningExamples
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v1/fastertransformer/cuda/open_attention.cu#L29-L101
/**
* Multi-head attetion open sourced
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : 0;
val = warpReduceMax(val);
return val;
}
__inline__ __device__
int target_index(int id1, int id2, int id3, int id4,
int dim_1, int dim_2, int dim_3, int dim_4)
{
return id1 * (dim_2 * dim_3 * dim_4) +
id3 * (dim_2 * dim_4) + id2 * dim_4 + id4;
}
/// ***************************** add bias & pad *****************************
template<typename T>
__global__
void add_QKV_bias_padding(
T* Q, const T* bias_Q,
T* K, const T* bias_K,
T* V, const T* bias_V,
T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = batch_idx[blockIdx.x];
int seq_id = word_idx[blockIdx.x];
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id,
batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x;
T* src_ptr = (T*)Q;
T* dst_ptr = (T*)q_buf_;
const T* bias_ptr = (const T*)bias_Q;
dst_ptr[target_id] = src_ptr[tid] + __ldg(&bias_ptr[bias_id]);
src_ptr = (T*)K;
dst_ptr = (T*)k_buf_;
bias_ptr = (const T*)bias_K;
dst_ptr[target_id] = src_ptr[tid] + __ldg(&bias_ptr[bias_id]);
src_ptr = (T*)V;
dst_ptr = (T*)v_buf_;
bias_ptr = (const T*)bias_V;
dst_ptr[target_id] = src_ptr[tid] + __ldg(&bias_ptr[bias_id]);
}
template<typename T>
void add_QKV_bias_padding_kernelLauncher(
T* Q, const T* bias_Q,
T* K, const T* bias_K,
T* V, const T* bias_V,
T* q_buf_, T* k_buf_, T* v_buf_,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const hipStream_t stream)
{
dim3 grid;
dim3 block;
grid.x = valid_word_num;
block.x = head_num * size_per_head;
hipLaunchKernelGGL(( add_QKV_bias_padding<float>), dim3(grid), dim3(block), 0, stream,
Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_, v_buf_,
batch_size, seq_len, head_num, size_per_head, batch_idx, word_idx);
}
template void add_QKV_bias_padding_kernelLauncher<float>(
float* Q, const float* bias_Q,
float* K, const float* bias_K,
float* V, const float* bias_V,
float* q_buf_, float* k_buf_, float* v_buf_,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const hipStream_t stream);
/// *********************************** fin ***********************************
/// ************************** softmax for attention **************************
// softmax kernel code is copied from
// https://raw.githubusercontent.com/NVIDIA/FasterTransformer/main/fastertransformer/cuda/attention_kernels.cu
template <typename T>
__global__
void softmax_kernel(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len,
const T scalar)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
int mask_offset = batch_id * seq_len * seq_len;
__shared__ float s_sum, s_max;
for(int i = 0; i < seq_len; ++i)
{
float qk = threadIdx.x < seq_len ? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scalar + mask_val): -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk / s_sum);
qk_offset += seq_len;
mask_offset += seq_len;
}
}
template <typename T>
__global__
void softmax_kernel_v2(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num,
const int seq_len, const float scalar)
{
int batch_id = blockIdx.x / head_num / seq_len;
int seq_id = blockIdx.x % seq_len;
int qk_offset = blockIdx.x * seq_len;
int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len;
__shared__ float s_sum, s_max;
float qk = threadIdx.x < seq_len ? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scalar + mask_val) : -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum);
}
//grid = (seq_len/word_per_thread, batch_size, head_num)
//block.x = max(32, (seq_len + 31)/32*32)
template <typename T>
__global__
void softmax_kernel_v3(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len, const T scalar)
{
bool qual = threadIdx.x < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
float tmp = -1e20f;
int qk_offset;
__shared__ float s_mean, s_max;
if (qual){
qk_offset = ((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len + threadIdx.x;
int mask_offset = (blockIdx.y * seq_len + seq_id) * seq_len + threadIdx.x;
float qk = static_cast<float>(qk_buf_[qk_offset]);
float mask_val = static_cast<float>(__ldg(&attr_mask[mask_offset]));
mask_val = (1.0f - mask_val) * -10000.0f;
tmp = qk * static_cast<float>(scalar) + mask_val;
}
float max_val = blockReduceMax<float>(tmp);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
float qk_tmp = qual ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if (threadIdx.x == 0){
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if(qual)
qk_buf_[qk_offset] = (T)(qk_tmp * s_mean);
}
}
//grid = (seq_len/word_per_thread, batch_size, head_num)
//block.x = max(32, (seq_len/2 + 31)/32*32)
//seq_len % 2 == 0
template <>
__global__
void softmax_kernel_v3(half* qk_buf_, const half* attr_mask,
const int batch_size, const int head_num,
const int seq_len, const half scalar)
{
int threadIdx2 = threadIdx.x << 1;
bool qual = threadIdx2 < seq_len;
half2* qk_buf_half2Ptr = (half2*) qk_buf_;
const half2* attr_mask_half2Ptr = (const half2*) attr_mask;
__shared__ float s_mean, s_max;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
int qk_offset;
half2 tmp = __float2half2_rn(0.0f);
float max_val = -1e20f;
half2 qk;
if (qual){
qk_offset = ((((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len) >> 1) + threadIdx.x;
int mask_offset = (((blockIdx.y * seq_len + seq_id) * seq_len) >> 1) + threadIdx.x;
qk = qk_buf_half2Ptr[qk_offset];
half2 mask_val = __ldg(&attr_mask_half2Ptr[mask_offset]);
half2 mask_val_tmp = __hmul2(__hsub2(__float2half2_rn(1.0f), mask_val), __float2half2_rn(-10000.0f));
tmp = __hadd2(__hmul2(__half2half2(scalar), qk), mask_val_tmp);
max_val = fmax((float)c10::Half(tmp.x), (float)c10::Half(tmp.y));
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
if (qual){
tmp = h2exp(__hsub2(tmp, __float2half2_rn(s_max)));
}
float sum_val = blockDim.x <= 32 ? warpReduceSum((float)(c10::Half(tmp.x) + c10::Half(tmp.y))) : blockReduceSum<float>((float)(c10::Half(tmp.x) + c10::Half(tmp.y)));
if (threadIdx.x == 0){
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if(qual){
qk = __hmul2(tmp, __float2half2_rn(s_mean));
qk_buf_half2Ptr[qk_offset] = qk;
}
}
}
template <typename T>
__global__
void softmax_kernel_v3_LE32(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len, const T scalar)
{
bool qual = threadIdx.x < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
int qk_offset;
__shared__ float s_mean, s_max;
float tmp = -1e20f;
if (qual){
qk_offset = ((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len + threadIdx.x;
int mask_offset = (blockIdx.y * seq_len + seq_id) * seq_len + threadIdx.x;
float qk = static_cast<float>(qk_buf_[qk_offset]);
float mask_val = static_cast<float>(__ldg(&attr_mask[mask_offset]));
mask_val = (1.0f - mask_val) * -10000.0f;
tmp = static_cast<float>(qk) * static_cast<float>(scalar) + mask_val;
}
float max_val = warpReduceMax<float>(tmp);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
tmp = qual ? __expf(tmp - s_max) : 0.0f;
float sum_val = warpReduceSum<float>(tmp);
if (threadIdx.x == 0){
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if(qual)
qk_buf_[qk_offset] = (T)(tmp * s_mean);
}
}
// Changed this align with prior API
// Renamed and switched head_num with seq_len
template<typename T>
void softmax_kernel_kernelLauncher(
T* buffer,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const T scalar,
hipStream_t stream)
{
dim3 grid, block;
//deal with odd seq_len
if (seq_len % 2 != 0){
if(seq_len <= 32)
block.x = 32;
else if(seq_len > 32 && seq_len <= 64)
block.x = 64;
else if(seq_len > 64 && seq_len <= 128)
block.x = 128;
else if(seq_len > 128 && seq_len <= 256)
block.x = 256;
else if(seq_len > 256 && seq_len <= 512)
block.x = 512;
else
block.x = 1024;
if(batch_size * head_num <= 120)
{
grid.x = batch_size * head_num * seq_len;
hipLaunchKernelGGL(( softmax_kernel_v2<T>), dim3(grid), dim3(block), 0, stream, buffer, attr_mask, batch_size, head_num, seq_len, scalar);
}
else
{
grid.x = batch_size * head_num;
hipLaunchKernelGGL(( softmax_kernel<T>), dim3(grid), dim3(block), 0, stream, buffer, attr_mask, batch_size, head_num, seq_len, scalar);
}
}
//deal with even seq_len
else{
grid.x = seq_len;
if (batch_size * head_num > 360)
grid.x = ceil(float(seq_len)/32.0f);
grid.y = batch_size;
grid.z = head_num;
if (seq_len <= 32){
block.x = 32;
hipLaunchKernelGGL(( softmax_kernel_v3_LE32<T>), dim3(grid), dim3(block), 0, stream, buffer, attr_mask, batch_size, head_num, seq_len, scalar);
}
else{
if (sizeof(T) == 2){
// We should be able to only need have the blocks
// but there is a bug that is triggered if we use less.
// This requires a closer auditing of the kernel.
// block.x = (seq_len/2 + 31)/32*32;
block.x = (seq_len + 31)/32*32;
hipLaunchKernelGGL(( softmax_kernel_v3), dim3(grid), dim3(block), 0, stream, buffer, attr_mask, batch_size, head_num, seq_len, scalar);
}
else{
block.x = (seq_len + 31)/32*32;
hipLaunchKernelGGL(( softmax_kernel_v3<T>), dim3(grid), dim3(block), 0, stream, buffer, attr_mask, batch_size, head_num, seq_len, scalar);
}
}
grid.x = grid.y = grid.z = 1;
}
}
template void softmax_kernel_kernelLauncher<float>(
float* qk_buf_, const float* attr_mask,
const int batch_size, const int head_num, const int seq_len,
const float scaler,
const hipStream_t stream);
template void softmax_kernel_kernelLauncher<c10::Half>(
c10::Half* qk_buf_, const c10::Half* attr_mask,
const int batch_size, const int head_num, const int seq_len,
const c10::Half scaler,
const hipStream_t stream);
/// *********************************** fin ***********************************
/// ****************** transpose & rm padding for attention *******************
template<typename T>
__global__
void transpose_rm_padding(
T* src, T* dst,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx)
{
int head_id = threadIdx.y;
int tid = threadIdx.x;
int batch_id = batch_idx[blockIdx.x];
int word_id = word_idx[blockIdx.x];
int src_offset = batch_id * head_num * seq_len * size_per_head +
head_id * seq_len * size_per_head +
word_id * size_per_head +
tid;
int dst_offset = blockIdx.x * head_num * size_per_head +
head_id * size_per_head +
tid;
T* src_ptr = (T*)src;
T* dst_ptr = (T*)dst;
dst_ptr[dst_offset] = src_ptr[src_offset];
}
template <typename T>
void transpose_rm_padding_kernelLauncher(
T* src, T* dst,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const hipStream_t stream)
{
dim3 grid(valid_word_num);
dim3 block(size_per_head, head_num);
hipLaunchKernelGGL(( transpose_rm_padding<float>), dim3(grid), dim3(block), 0, stream,
src, dst,
batch_size, seq_len, head_num, size_per_head,
batch_idx, word_idx);
}
template void transpose_rm_padding_kernelLauncher<float>(
float* src, float* dst,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const hipStream_t stream);
/// *********************************** fin ***********************************
}//namespace cuda
}//namespace nteffectivetransformer
| d0507a8605da24ce4cccf2fed8610c9c40c87bca.cu | /*
* Copyright (C) 2020 ByteDance Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cmath>
#include <nestedtensor/csrc/cuda/attention.h>
#include <c10/util/Half.h>
namespace nteffectivetransformer {
namespace cuda {
// Reduce code comes from Nvidia's DeepLearningExamples
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v1/fastertransformer/cuda/open_attention.cu#L29-L101
/**
* Multi-head attetion open sourced
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : 0;
val = warpReduceMax(val);
return val;
}
__inline__ __device__
int target_index(int id1, int id2, int id3, int id4,
int dim_1, int dim_2, int dim_3, int dim_4)
{
return id1 * (dim_2 * dim_3 * dim_4) +
id3 * (dim_2 * dim_4) + id2 * dim_4 + id4;
}
/// ***************************** add bias & pad *****************************
template<typename T>
__global__
void add_QKV_bias_padding(
T* Q, const T* bias_Q,
T* K, const T* bias_K,
T* V, const T* bias_V,
T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = batch_idx[blockIdx.x];
int seq_id = word_idx[blockIdx.x];
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id,
batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x;
T* src_ptr = (T*)Q;
T* dst_ptr = (T*)q_buf_;
const T* bias_ptr = (const T*)bias_Q;
dst_ptr[target_id] = src_ptr[tid] + __ldg(&bias_ptr[bias_id]);
src_ptr = (T*)K;
dst_ptr = (T*)k_buf_;
bias_ptr = (const T*)bias_K;
dst_ptr[target_id] = src_ptr[tid] + __ldg(&bias_ptr[bias_id]);
src_ptr = (T*)V;
dst_ptr = (T*)v_buf_;
bias_ptr = (const T*)bias_V;
dst_ptr[target_id] = src_ptr[tid] + __ldg(&bias_ptr[bias_id]);
}
template<typename T>
void add_QKV_bias_padding_kernelLauncher(
T* Q, const T* bias_Q,
T* K, const T* bias_K,
T* V, const T* bias_V,
T* q_buf_, T* k_buf_, T* v_buf_,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const cudaStream_t stream)
{
dim3 grid;
dim3 block;
grid.x = valid_word_num;
block.x = head_num * size_per_head;
add_QKV_bias_padding<float><<<grid, block, 0, stream>>>(
Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_, v_buf_,
batch_size, seq_len, head_num, size_per_head, batch_idx, word_idx);
}
template void add_QKV_bias_padding_kernelLauncher<float>(
float* Q, const float* bias_Q,
float* K, const float* bias_K,
float* V, const float* bias_V,
float* q_buf_, float* k_buf_, float* v_buf_,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const cudaStream_t stream);
/// *********************************** fin ***********************************
/// ************************** softmax for attention **************************
// softmax kernel code is copied from
// https://raw.githubusercontent.com/NVIDIA/FasterTransformer/main/fastertransformer/cuda/attention_kernels.cu
template <typename T>
__global__
void softmax_kernel(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len,
const T scalar)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
int mask_offset = batch_id * seq_len * seq_len;
__shared__ float s_sum, s_max;
for(int i = 0; i < seq_len; ++i)
{
float qk = threadIdx.x < seq_len ? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scalar + mask_val): -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk / s_sum);
qk_offset += seq_len;
mask_offset += seq_len;
}
}
template <typename T>
__global__
void softmax_kernel_v2(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num,
const int seq_len, const float scalar)
{
int batch_id = blockIdx.x / head_num / seq_len;
int seq_id = blockIdx.x % seq_len;
int qk_offset = blockIdx.x * seq_len;
int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len;
__shared__ float s_sum, s_max;
float qk = threadIdx.x < seq_len ? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scalar + mask_val) : -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum);
}
//grid = (seq_len/word_per_thread, batch_size, head_num)
//block.x = max(32, (seq_len + 31)/32*32)
template <typename T>
__global__
void softmax_kernel_v3(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len, const T scalar)
{
bool qual = threadIdx.x < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
float tmp = -1e20f;
int qk_offset;
__shared__ float s_mean, s_max;
if (qual){
qk_offset = ((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len + threadIdx.x;
int mask_offset = (blockIdx.y * seq_len + seq_id) * seq_len + threadIdx.x;
float qk = static_cast<float>(qk_buf_[qk_offset]);
float mask_val = static_cast<float>(__ldg(&attr_mask[mask_offset]));
mask_val = (1.0f - mask_val) * -10000.0f;
tmp = qk * static_cast<float>(scalar) + mask_val;
}
float max_val = blockReduceMax<float>(tmp);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
float qk_tmp = qual ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if (threadIdx.x == 0){
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if(qual)
qk_buf_[qk_offset] = (T)(qk_tmp * s_mean);
}
}
//grid = (seq_len/word_per_thread, batch_size, head_num)
//block.x = max(32, (seq_len/2 + 31)/32*32)
//seq_len % 2 == 0
template <>
__global__
void softmax_kernel_v3(half* qk_buf_, const half* attr_mask,
const int batch_size, const int head_num,
const int seq_len, const half scalar)
{
int threadIdx2 = threadIdx.x << 1;
bool qual = threadIdx2 < seq_len;
half2* qk_buf_half2Ptr = (half2*) qk_buf_;
const half2* attr_mask_half2Ptr = (const half2*) attr_mask;
__shared__ float s_mean, s_max;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
int qk_offset;
half2 tmp = __float2half2_rn(0.0f);
float max_val = -1e20f;
half2 qk;
if (qual){
qk_offset = ((((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len) >> 1) + threadIdx.x;
int mask_offset = (((blockIdx.y * seq_len + seq_id) * seq_len) >> 1) + threadIdx.x;
qk = qk_buf_half2Ptr[qk_offset];
half2 mask_val = __ldg(&attr_mask_half2Ptr[mask_offset]);
half2 mask_val_tmp = __hmul2(__hsub2(__float2half2_rn(1.0f), mask_val), __float2half2_rn(-10000.0f));
tmp = __hadd2(__hmul2(__half2half2(scalar), qk), mask_val_tmp);
max_val = fmax((float)c10::Half(tmp.x), (float)c10::Half(tmp.y));
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
if (qual){
tmp = h2exp(__hsub2(tmp, __float2half2_rn(s_max)));
}
float sum_val = blockDim.x <= 32 ? warpReduceSum((float)(c10::Half(tmp.x) + c10::Half(tmp.y))) : blockReduceSum<float>((float)(c10::Half(tmp.x) + c10::Half(tmp.y)));
if (threadIdx.x == 0){
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if(qual){
qk = __hmul2(tmp, __float2half2_rn(s_mean));
qk_buf_half2Ptr[qk_offset] = qk;
}
}
}
template <typename T>
__global__
void softmax_kernel_v3_LE32(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len, const T scalar)
{
bool qual = threadIdx.x < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
int qk_offset;
__shared__ float s_mean, s_max;
float tmp = -1e20f;
if (qual){
qk_offset = ((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len + threadIdx.x;
int mask_offset = (blockIdx.y * seq_len + seq_id) * seq_len + threadIdx.x;
float qk = static_cast<float>(qk_buf_[qk_offset]);
float mask_val = static_cast<float>(__ldg(&attr_mask[mask_offset]));
mask_val = (1.0f - mask_val) * -10000.0f;
tmp = static_cast<float>(qk) * static_cast<float>(scalar) + mask_val;
}
float max_val = warpReduceMax<float>(tmp);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
tmp = qual ? __expf(tmp - s_max) : 0.0f;
float sum_val = warpReduceSum<float>(tmp);
if (threadIdx.x == 0){
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if(qual)
qk_buf_[qk_offset] = (T)(tmp * s_mean);
}
}
// Changed this align with prior API
// Renamed and switched head_num with seq_len
template<typename T>
void softmax_kernel_kernelLauncher(
T* buffer,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const T scalar,
cudaStream_t stream)
{
dim3 grid, block;
//deal with odd seq_len
if (seq_len % 2 != 0){
if(seq_len <= 32)
block.x = 32;
else if(seq_len > 32 && seq_len <= 64)
block.x = 64;
else if(seq_len > 64 && seq_len <= 128)
block.x = 128;
else if(seq_len > 128 && seq_len <= 256)
block.x = 256;
else if(seq_len > 256 && seq_len <= 512)
block.x = 512;
else
block.x = 1024;
if(batch_size * head_num <= 120)
{
grid.x = batch_size * head_num * seq_len;
softmax_kernel_v2<T><<<grid, block, 0, stream>>>(buffer, attr_mask, batch_size, head_num, seq_len, scalar);
}
else
{
grid.x = batch_size * head_num;
softmax_kernel<T><<<grid, block, 0, stream>>>(buffer, attr_mask, batch_size, head_num, seq_len, scalar);
}
}
//deal with even seq_len
else{
grid.x = seq_len;
if (batch_size * head_num > 360)
grid.x = ceil(float(seq_len)/32.0f);
grid.y = batch_size;
grid.z = head_num;
if (seq_len <= 32){
block.x = 32;
softmax_kernel_v3_LE32<T><<<grid, block, 0, stream>>>(buffer, attr_mask, batch_size, head_num, seq_len, scalar);
}
else{
if (sizeof(T) == 2){
// We should be able to only need have the blocks
// but there is a bug that is triggered if we use less.
// This requires a closer auditing of the kernel.
// block.x = (seq_len/2 + 31)/32*32;
block.x = (seq_len + 31)/32*32;
softmax_kernel_v3<<<grid, block, 0, stream>>>(buffer, attr_mask, batch_size, head_num, seq_len, scalar);
}
else{
block.x = (seq_len + 31)/32*32;
softmax_kernel_v3<T><<<grid, block, 0, stream>>>(buffer, attr_mask, batch_size, head_num, seq_len, scalar);
}
}
grid.x = grid.y = grid.z = 1;
}
}
template void softmax_kernel_kernelLauncher<float>(
float* qk_buf_, const float* attr_mask,
const int batch_size, const int head_num, const int seq_len,
const float scaler,
const cudaStream_t stream);
template void softmax_kernel_kernelLauncher<c10::Half>(
c10::Half* qk_buf_, const c10::Half* attr_mask,
const int batch_size, const int head_num, const int seq_len,
const c10::Half scaler,
const cudaStream_t stream);
/// *********************************** fin ***********************************
/// ****************** transpose & rm padding for attention *******************
template<typename T>
__global__
void transpose_rm_padding(
T* src, T* dst,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx)
{
int head_id = threadIdx.y;
int tid = threadIdx.x;
int batch_id = batch_idx[blockIdx.x];
int word_id = word_idx[blockIdx.x];
int src_offset = batch_id * head_num * seq_len * size_per_head +
head_id * seq_len * size_per_head +
word_id * size_per_head +
tid;
int dst_offset = blockIdx.x * head_num * size_per_head +
head_id * size_per_head +
tid;
T* src_ptr = (T*)src;
T* dst_ptr = (T*)dst;
dst_ptr[dst_offset] = src_ptr[src_offset];
}
template <typename T>
void transpose_rm_padding_kernelLauncher(
T* src, T* dst,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const cudaStream_t stream)
{
dim3 grid(valid_word_num);
dim3 block(size_per_head, head_num);
transpose_rm_padding<float><<<grid, block, 0, stream>>>(
src, dst,
batch_size, seq_len, head_num, size_per_head,
batch_idx, word_idx);
}
template void transpose_rm_padding_kernelLauncher<float>(
float* src, float* dst,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const cudaStream_t stream);
/// *********************************** fin ***********************************
}//namespace cuda
}//namespace nteffectivetransformer
|
c0dc73a31d804315378cc60115bf661c79af9844.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void fp_maxpool(float* output, float* input, const int kernel_size, const int size, const int n_size, const int in_channel, bool SAME)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int totalPos = blockDim.x * gridDim.x;
const int N = kernel_size * kernel_size * n_size * n_size * in_channel; // total number of connections in this convolution
const int padding = (kernel_size - 1) / 2; // number of padding for both ends
int input_row, input_col;
// distribute certain number of connections to each thread regardless of detailed position and shape
for(int n = N * pos / totalPos; n < N * (pos+1) / totalPos; n++){
int idx = n;
const int i_kernel_row = ((idx /= 1 ) % kernel_size);
const int i_kernel_col = ((idx /= kernel_size ) % kernel_size);
const int i_channel = ((idx /= kernel_size ) % in_channel);
const int i_row = ((idx /= in_channel ) % n_size);
const int i_col = ((idx /= n_size ) % n_size);
float maxidx = (float)-100;
// corresponding position of the input matrix and size of output matrix
if (SAME){ // SAME padding scheme implemented
input_row = i_kernel_row + i_row - padding;
input_col = i_kernel_col + i_col - padding;
}
else{
input_row = i_kernel_row + i_row;
input_col = i_kernel_col + i_col;
}
if(input_row >= 0 && input_row < size && input_col >=0 && input_col < size){
if (input[((i_channel % in_channel) * size + input_col) * size + input_row] > maxidx)
output[((i_channel % in_channel) * n_size + i_col) * n_size + i_row] = input[((i_channel % in_channel) * size + input_col) * size + input_row];
}
}
} | c0dc73a31d804315378cc60115bf661c79af9844.cu | #include "includes.h"
__global__ void fp_maxpool(float* output, float* input, const int kernel_size, const int size, const int n_size, const int in_channel, bool SAME)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int totalPos = blockDim.x * gridDim.x;
const int N = kernel_size * kernel_size * n_size * n_size * in_channel; // total number of connections in this convolution
const int padding = (kernel_size - 1) / 2; // number of padding for both ends
int input_row, input_col;
// distribute certain number of connections to each thread regardless of detailed position and shape
for(int n = N * pos / totalPos; n < N * (pos+1) / totalPos; n++){
int idx = n;
const int i_kernel_row = ((idx /= 1 ) % kernel_size);
const int i_kernel_col = ((idx /= kernel_size ) % kernel_size);
const int i_channel = ((idx /= kernel_size ) % in_channel);
const int i_row = ((idx /= in_channel ) % n_size);
const int i_col = ((idx /= n_size ) % n_size);
float maxidx = (float)-100;
// corresponding position of the input matrix and size of output matrix
if (SAME){ // SAME padding scheme implemented
input_row = i_kernel_row + i_row - padding;
input_col = i_kernel_col + i_col - padding;
}
else{
input_row = i_kernel_row + i_row;
input_col = i_kernel_col + i_col;
}
if(input_row >= 0 && input_row < size && input_col >=0 && input_col < size){
if (input[((i_channel % in_channel) * size + input_col) * size + input_row] > maxidx)
output[((i_channel % in_channel) * n_size + i_col) * n_size + i_row] = input[((i_channel % in_channel) * size + input_col) * size + input_row];
}
}
} |
66a0f4359a7b9e49efcb3f2f6a4de00b4b46530e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inference/embedding_feature_combiner.hpp>
#include <utils.cuh>
#include <utils.hpp>
#include <algorithm>
#include <functional>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
template <typename TypeEmbedding>
__global__ void embedding_feature_combine_kernel(const float* input, TypeEmbedding* output, const int* row_ptrs,
int batch_size, int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto &block = cooperative_groups::this_thread_block();
// each block partition corresponding to one sample
const int bid = block.group_index().x;
// each thread corresponding to one element in the embedding vector
const int tid = block.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num = row_ptrs[feature_row_index+1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j)*embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index*embedding_vec_size + tid] = tmp;
} // end for
} // end if
}
template <>
__global__ void embedding_feature_combine_kernel(const float* input, __half* output, const int* row_ptrs,
int batch_size, int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto &block = cooperative_groups::this_thread_block();
// each block partition corresponding to one sample
const int bid = block.group_index().x;
// each thread corresponding to one element in the embedding vector
const int tid = block.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num = row_ptrs[feature_row_index+1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j)*embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index*embedding_vec_size + tid] = __float2half(tmp);
} // end for
} // end if
}
template <typename TypeEmbedding, int TileSize>
__global__ void embedding_feature_combine_tiled_kernel(const float* input, TypeEmbedding* output, const int* row_ptrs,
int batch_size, int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto &block = cooperative_groups::this_thread_block();
const auto &tile = cooperative_groups::tiled_partition<TileSize>(block);
// each block partition corresponding to one sample
const int bid = block.group_index().x * tile.meta_group_size() + tile.meta_group_rank();
// each thread corresponding to one element in the embedding vector
const int tid = tile.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num = row_ptrs[feature_row_index+1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j)*embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index*embedding_vec_size + tid] = tmp;
} // end for
} // end if
}
template <int TileSize>
__global__ void embedding_feature_combine_tiled_kernel(const float* input, __half* output, const int* row_ptrs,
int batch_size, int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto &block = cooperative_groups::this_thread_block();
const auto &tile = cooperative_groups::tiled_partition<TileSize>(block);
// each block partition corresponding to one sample
const int bid = block.group_index().x * tile.meta_group_size() + tile.meta_group_rank();
// each thread corresponding to one element in the embedding vector
const int tid = tile.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num = row_ptrs[feature_row_index+1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j)*embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index*embedding_vec_size + tid] = __float2half(tmp);
} // end for
} // end if
}
template <typename TypeEmbedding>
void launch_embedding_feature_combine_kernel(const float* input, TypeEmbedding* output, const int* row_ptrs,
int batch_size, int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type, hipStream_t stream) {
if (embedding_vec_size <= 2) {
hipLaunchKernelGGL(( embedding_feature_combine_tiled_kernel<TypeEmbedding, 2>)
, dim3((batch_size - 1) / 32 + 1), dim3(64), 0, stream, input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 4) {
hipLaunchKernelGGL(( embedding_feature_combine_tiled_kernel<TypeEmbedding, 4>)
, dim3((batch_size - 1) / 16 + 1), dim3(64), 0, stream, input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 8) {
hipLaunchKernelGGL(( embedding_feature_combine_tiled_kernel<TypeEmbedding, 8>)
, dim3((batch_size - 1) / 8 + 1), dim3(64), 0, stream, input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 16) {
hipLaunchKernelGGL(( embedding_feature_combine_tiled_kernel<TypeEmbedding, 16>)
, dim3((batch_size - 1) / 4 + 1), dim3(64), 0, stream, input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 32) {
hipLaunchKernelGGL(( embedding_feature_combine_tiled_kernel<TypeEmbedding, 32>)
, dim3((batch_size - 1) / 2 + 1), dim3(64), 0, stream, input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else {
// each thread corresponds to one element in an embedding vector
hipLaunchKernelGGL(( embedding_feature_combine_kernel), dim3(batch_size), dim3(embedding_vec_size), 0, stream, input, output, row_ptrs, batch_size, slot_num, embedding_vec_size, combiner_type);
}
}
} // end of namespace
template <typename TypeEmbedding>
EmbeddingFeatureCombiner<TypeEmbedding>::EmbeddingFeatureCombiner(const std::shared_ptr<Tensor2<float>>& in_tensor,
const std::shared_ptr<Tensor2<int>>& row_ptrs_tensor,
Tensor2<TypeEmbedding>& out_tensor, int batch_size, int slot_num, EmbeddingFeatureCombiner_t combiner_type,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource), slot_num_(slot_num), batch_size_(batch_size), combiner_type_(combiner_type) {
try {
// error input checking
const auto& in_dims = in_tensor->get_dimensions();
const auto& row_ptrs_dims =row_ptrs_tensor->get_dimensions();
if ((int)in_dims.size() != 2)
CK_THROW_(Error_t::WrongInput, "The input tensor must be 2D");
for (auto i : in_dims) {
if (i == 0) {
CK_THROW_(Error_t::WrongInput, "The input dims can not be 0");
}
}
if ((int)row_ptrs_dims.size() != 1)
CK_THROW_(Error_t::WrongInput, "The row pointers tensor must be 1D");
if ((int)row_ptrs_dims[0] != batch_size * slot_num + 1)
CK_THROW_(Error_t::WrongInput, "The dimension of row pointers tensor mismatch number of samples");
embedding_vec_size_ = in_dims[1];
std::vector<size_t> out_dims {static_cast<size_t>(batch_size_), static_cast<size_t>(slot_num_), static_cast<size_t>(embedding_vec_size_)};
blobs_buff->reserve(out_dims, &out_tensor);
out_tensors_.push_back(out_tensor);
in_tensors_.push_back(in_tensor);
row_ptrs_tensors_.push_back(row_ptrs_tensor);
const Tensor2<float>* iptr1 = in_tensor.get();
Tensor2<float>* iptr2 = in_tensors_[0].get();
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
template <typename TypeEmbedding>
void EmbeddingFeatureCombiner<TypeEmbedding>::fprop(bool is_train) {
if (is_train)
CK_THROW_(Error_t::IllegalCall, "The fprop() of EmbeddingFeatureCombiner should only be used for inference");
CudaDeviceContext context(get_device_id());
float* input = in_tensors_[0]->get_ptr();
TypeEmbedding* output = out_tensors_[0].get_ptr();
int* row_ptrs = row_ptrs_tensors_[0]->get_ptr();
auto in_dims = in_tensors_[0]->get_dimensions();
auto out_dims = out_tensors_[0].get_dimensions();
launch_embedding_feature_combine_kernel(input, output, row_ptrs, batch_size_, slot_num_, embedding_vec_size_, combiner_type_, get_gpu().get_stream());
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template class EmbeddingFeatureCombiner<float>;
template class EmbeddingFeatureCombiner<__half>;
} // namespace HugeCTR
| 66a0f4359a7b9e49efcb3f2f6a4de00b4b46530e.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inference/embedding_feature_combiner.hpp>
#include <utils.cuh>
#include <utils.hpp>
#include <algorithm>
#include <functional>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
template <typename TypeEmbedding>
__global__ void embedding_feature_combine_kernel(const float* input, TypeEmbedding* output, const int* row_ptrs,
int batch_size, int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto &block = cooperative_groups::this_thread_block();
// each block partition corresponding to one sample
const int bid = block.group_index().x;
// each thread corresponding to one element in the embedding vector
const int tid = block.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num = row_ptrs[feature_row_index+1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j)*embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index*embedding_vec_size + tid] = tmp;
} // end for
} // end if
}
template <>
__global__ void embedding_feature_combine_kernel(const float* input, __half* output, const int* row_ptrs,
int batch_size, int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto &block = cooperative_groups::this_thread_block();
// each block partition corresponding to one sample
const int bid = block.group_index().x;
// each thread corresponding to one element in the embedding vector
const int tid = block.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num = row_ptrs[feature_row_index+1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j)*embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index*embedding_vec_size + tid] = __float2half(tmp);
} // end for
} // end if
}
template <typename TypeEmbedding, int TileSize>
__global__ void embedding_feature_combine_tiled_kernel(const float* input, TypeEmbedding* output, const int* row_ptrs,
int batch_size, int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto &block = cooperative_groups::this_thread_block();
const auto &tile = cooperative_groups::tiled_partition<TileSize>(block);
// each block partition corresponding to one sample
const int bid = block.group_index().x * tile.meta_group_size() + tile.meta_group_rank();
// each thread corresponding to one element in the embedding vector
const int tid = tile.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num = row_ptrs[feature_row_index+1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j)*embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index*embedding_vec_size + tid] = tmp;
} // end for
} // end if
}
template <int TileSize>
__global__ void embedding_feature_combine_tiled_kernel(const float* input, __half* output, const int* row_ptrs,
int batch_size, int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto &block = cooperative_groups::this_thread_block();
const auto &tile = cooperative_groups::tiled_partition<TileSize>(block);
// each block partition corresponding to one sample
const int bid = block.group_index().x * tile.meta_group_size() + tile.meta_group_rank();
// each thread corresponding to one element in the embedding vector
const int tid = tile.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num = row_ptrs[feature_row_index+1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j)*embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index*embedding_vec_size + tid] = __float2half(tmp);
} // end for
} // end if
}
template <typename TypeEmbedding>
void launch_embedding_feature_combine_kernel(const float* input, TypeEmbedding* output, const int* row_ptrs,
int batch_size, int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type, cudaStream_t stream) {
if (embedding_vec_size <= 2) {
embedding_feature_combine_tiled_kernel<TypeEmbedding, 2>
<<< (batch_size - 1) / 32 + 1, 64, 0, stream>>>(input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 4) {
embedding_feature_combine_tiled_kernel<TypeEmbedding, 4>
<<< (batch_size - 1) / 16 + 1, 64, 0, stream>>>(input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 8) {
embedding_feature_combine_tiled_kernel<TypeEmbedding, 8>
<<< (batch_size - 1) / 8 + 1, 64, 0, stream>>>(input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 16) {
embedding_feature_combine_tiled_kernel<TypeEmbedding, 16>
<<< (batch_size - 1) / 4 + 1, 64, 0, stream>>>(input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 32) {
embedding_feature_combine_tiled_kernel<TypeEmbedding, 32>
<<< (batch_size - 1) / 2 + 1, 64, 0, stream>>>(input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else {
// each thread corresponds to one element in an embedding vector
embedding_feature_combine_kernel<<<batch_size, embedding_vec_size, 0, stream>>>(input, output, row_ptrs, batch_size, slot_num, embedding_vec_size, combiner_type);
}
}
} // end of namespace
template <typename TypeEmbedding>
EmbeddingFeatureCombiner<TypeEmbedding>::EmbeddingFeatureCombiner(const std::shared_ptr<Tensor2<float>>& in_tensor,
const std::shared_ptr<Tensor2<int>>& row_ptrs_tensor,
Tensor2<TypeEmbedding>& out_tensor, int batch_size, int slot_num, EmbeddingFeatureCombiner_t combiner_type,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource), slot_num_(slot_num), batch_size_(batch_size), combiner_type_(combiner_type) {
try {
// error input checking
const auto& in_dims = in_tensor->get_dimensions();
const auto& row_ptrs_dims =row_ptrs_tensor->get_dimensions();
if ((int)in_dims.size() != 2)
CK_THROW_(Error_t::WrongInput, "The input tensor must be 2D");
for (auto i : in_dims) {
if (i == 0) {
CK_THROW_(Error_t::WrongInput, "The input dims can not be 0");
}
}
if ((int)row_ptrs_dims.size() != 1)
CK_THROW_(Error_t::WrongInput, "The row pointers tensor must be 1D");
if ((int)row_ptrs_dims[0] != batch_size * slot_num + 1)
CK_THROW_(Error_t::WrongInput, "The dimension of row pointers tensor mismatch number of samples");
embedding_vec_size_ = in_dims[1];
std::vector<size_t> out_dims {static_cast<size_t>(batch_size_), static_cast<size_t>(slot_num_), static_cast<size_t>(embedding_vec_size_)};
blobs_buff->reserve(out_dims, &out_tensor);
out_tensors_.push_back(out_tensor);
in_tensors_.push_back(in_tensor);
row_ptrs_tensors_.push_back(row_ptrs_tensor);
const Tensor2<float>* iptr1 = in_tensor.get();
Tensor2<float>* iptr2 = in_tensors_[0].get();
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
template <typename TypeEmbedding>
void EmbeddingFeatureCombiner<TypeEmbedding>::fprop(bool is_train) {
if (is_train)
CK_THROW_(Error_t::IllegalCall, "The fprop() of EmbeddingFeatureCombiner should only be used for inference");
CudaDeviceContext context(get_device_id());
float* input = in_tensors_[0]->get_ptr();
TypeEmbedding* output = out_tensors_[0].get_ptr();
int* row_ptrs = row_ptrs_tensors_[0]->get_ptr();
auto in_dims = in_tensors_[0]->get_dimensions();
auto out_dims = out_tensors_[0].get_dimensions();
launch_embedding_feature_combine_kernel(input, output, row_ptrs, batch_size_, slot_num_, embedding_vec_size_, combiner_type_, get_gpu().get_stream());
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template class EmbeddingFeatureCombiner<float>;
template class EmbeddingFeatureCombiner<__half>;
} // namespace HugeCTR
|
ce3560c0203a2c250e6cec72ec790ab53d471d67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include "linalg/batched/gemv.cuh"
#include "random/rng.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
namespace Batched {
template <typename T>
struct BatchGemvInputs {
T tolerance;
int m, n, batchSize;
unsigned long long int seed;
};
template <typename T, typename IdxType = int>
::std::ostream &operator<<(::std::ostream &os, const BatchGemvInputs<T> &dims) {
return os;
}
template <typename Type>
__global__ void naiveBatchGemvKernel(Type *y, const Type *A, const Type *x,
int m, int n) {
int batch = blockIdx.y;
int row = blockIdx.x;
int col = threadIdx.x;
if (row < m && col < n) {
auto prod = A[batch * m * n + row * n + col] * x[batch * n + col];
atomicAdd(y + batch * m + row, prod);
}
}
template <typename Type>
void naiveBatchGemv(Type *y, const Type *A, const Type *x, int m, int n,
int batchSize, hipStream_t stream) {
static int TPB = ceildiv(n, WarpSize) * WarpSize;
dim3 nblks(m, batchSize);
hipLaunchKernelGGL(( naiveBatchGemvKernel<Type>), dim3(nblks), dim3(TPB), 0, stream, y, A, x, m, n);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
class BatchGemvTest : public ::testing::TestWithParam<BatchGemvInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<BatchGemvInputs<T>>::GetParam();
Random::Rng r(params.seed);
int len = params.batchSize * params.m * params.n;
int vecleny = params.batchSize * params.m;
int veclenx = params.batchSize * params.n;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(A, len);
allocate(x, veclenx);
allocate(out_ref, vecleny);
allocate(out, vecleny);
r.uniform(A, len, T(-1.0), T(1.0), stream);
r.uniform(x, veclenx, T(-1.0), T(1.0), stream);
CUDA_CHECK(hipMemsetAsync(out_ref, 0, sizeof(T) * vecleny, stream));
naiveBatchGemv(out_ref, A, x, params.m, params.n, params.batchSize, stream);
gemv<T, int>(out, A, x, nullptr, T(1.0), T(0.0), params.m, params.n,
params.batchSize, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(A));
CUDA_CHECK(hipFree(x));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
hipStream_t stream;
BatchGemvInputs<T> params;
T *A, *x, *out_ref, *out;
};
const std::vector<BatchGemvInputs<float>> inputsf = {
{0.005f, 128, 128, 32, 1234ULL}, {0.005f, 128, 126, 32, 1234ULL},
{0.005f, 128, 125, 32, 1234ULL}, {0.005f, 126, 128, 32, 1234ULL},
{0.005f, 126, 126, 32, 1234ULL}, {0.005f, 126, 125, 32, 1234ULL},
{0.005f, 125, 128, 32, 1234ULL}, {0.005f, 125, 126, 32, 1234ULL},
{0.005f, 125, 125, 32, 1234ULL},
};
typedef BatchGemvTest<float> BatchGemvTestF;
TEST_P(BatchGemvTestF, Result) {
int vecleny = params.batchSize * params.m;
ASSERT_TRUE(
devArrMatch(out_ref, out, vecleny, CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchGemvTests, BatchGemvTestF,
::testing::ValuesIn(inputsf));
typedef BatchGemvTest<double> BatchGemvTestD;
const std::vector<BatchGemvInputs<double>> inputsd = {
{0.0000001, 128, 128, 32, 1234ULL}, {0.0000001, 128, 126, 32, 1234ULL},
{0.0000001, 128, 125, 32, 1234ULL}, {0.0000001, 126, 128, 32, 1234ULL},
{0.0000001, 126, 126, 32, 1234ULL}, {0.0000001, 126, 125, 32, 1234ULL},
{0.0000001, 125, 128, 32, 1234ULL}, {0.0000001, 125, 126, 32, 1234ULL},
{0.0000001, 125, 125, 32, 1234ULL},
};
TEST_P(BatchGemvTestD, Result) {
int vecleny = params.batchSize * params.m;
ASSERT_TRUE(devArrMatch(out_ref, out, vecleny,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchGemvTests, BatchGemvTestD,
::testing::ValuesIn(inputsd));
} // end namespace Batched
} // end namespace LinAlg
} // end namespace MLCommon
| ce3560c0203a2c250e6cec72ec790ab53d471d67.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include "linalg/batched/gemv.cuh"
#include "random/rng.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
namespace Batched {
template <typename T>
struct BatchGemvInputs {
T tolerance;
int m, n, batchSize;
unsigned long long int seed;
};
template <typename T, typename IdxType = int>
::std::ostream &operator<<(::std::ostream &os, const BatchGemvInputs<T> &dims) {
return os;
}
template <typename Type>
__global__ void naiveBatchGemvKernel(Type *y, const Type *A, const Type *x,
int m, int n) {
int batch = blockIdx.y;
int row = blockIdx.x;
int col = threadIdx.x;
if (row < m && col < n) {
auto prod = A[batch * m * n + row * n + col] * x[batch * n + col];
atomicAdd(y + batch * m + row, prod);
}
}
template <typename Type>
void naiveBatchGemv(Type *y, const Type *A, const Type *x, int m, int n,
int batchSize, cudaStream_t stream) {
static int TPB = ceildiv(n, WarpSize) * WarpSize;
dim3 nblks(m, batchSize);
naiveBatchGemvKernel<Type><<<nblks, TPB, 0, stream>>>(y, A, x, m, n);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
class BatchGemvTest : public ::testing::TestWithParam<BatchGemvInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<BatchGemvInputs<T>>::GetParam();
Random::Rng r(params.seed);
int len = params.batchSize * params.m * params.n;
int vecleny = params.batchSize * params.m;
int veclenx = params.batchSize * params.n;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(A, len);
allocate(x, veclenx);
allocate(out_ref, vecleny);
allocate(out, vecleny);
r.uniform(A, len, T(-1.0), T(1.0), stream);
r.uniform(x, veclenx, T(-1.0), T(1.0), stream);
CUDA_CHECK(cudaMemsetAsync(out_ref, 0, sizeof(T) * vecleny, stream));
naiveBatchGemv(out_ref, A, x, params.m, params.n, params.batchSize, stream);
gemv<T, int>(out, A, x, nullptr, T(1.0), T(0.0), params.m, params.n,
params.batchSize, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(A));
CUDA_CHECK(cudaFree(x));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
cudaStream_t stream;
BatchGemvInputs<T> params;
T *A, *x, *out_ref, *out;
};
const std::vector<BatchGemvInputs<float>> inputsf = {
{0.005f, 128, 128, 32, 1234ULL}, {0.005f, 128, 126, 32, 1234ULL},
{0.005f, 128, 125, 32, 1234ULL}, {0.005f, 126, 128, 32, 1234ULL},
{0.005f, 126, 126, 32, 1234ULL}, {0.005f, 126, 125, 32, 1234ULL},
{0.005f, 125, 128, 32, 1234ULL}, {0.005f, 125, 126, 32, 1234ULL},
{0.005f, 125, 125, 32, 1234ULL},
};
typedef BatchGemvTest<float> BatchGemvTestF;
TEST_P(BatchGemvTestF, Result) {
int vecleny = params.batchSize * params.m;
ASSERT_TRUE(
devArrMatch(out_ref, out, vecleny, CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchGemvTests, BatchGemvTestF,
::testing::ValuesIn(inputsf));
typedef BatchGemvTest<double> BatchGemvTestD;
const std::vector<BatchGemvInputs<double>> inputsd = {
{0.0000001, 128, 128, 32, 1234ULL}, {0.0000001, 128, 126, 32, 1234ULL},
{0.0000001, 128, 125, 32, 1234ULL}, {0.0000001, 126, 128, 32, 1234ULL},
{0.0000001, 126, 126, 32, 1234ULL}, {0.0000001, 126, 125, 32, 1234ULL},
{0.0000001, 125, 128, 32, 1234ULL}, {0.0000001, 125, 126, 32, 1234ULL},
{0.0000001, 125, 125, 32, 1234ULL},
};
TEST_P(BatchGemvTestD, Result) {
int vecleny = params.batchSize * params.m;
ASSERT_TRUE(devArrMatch(out_ref, out, vecleny,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchGemvTests, BatchGemvTestD,
::testing::ValuesIn(inputsd));
} // end namespace Batched
} // end namespace LinAlg
} // end namespace MLCommon
|
c9e4246a2eb46abf2ffd1505a0c943dbf4b4bc67.hip | // !!! This is a file automatically generated by hipify!!!
/*****************************************************************************
*
* GPU memory allocation, initialization, and transfer (Host <--> GPU)
*
****************************************************************************/
/*
* This file is part of GPUMCML.
*
* GPUMCML is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GPUMCML is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GPUMCML. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include "cudamcml_kernel.h"
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// Initialize Device Constant Memory with read-only data
//////////////////////////////////////////////////////////////////////////////
int InitDCMem(SimulationStruct *sim)
{
// Make sure that the number of layers is within the limit.
UINT32 n_layers = sim->n_layers + 2;
if (n_layers > MAX_LAYERS) return 1;
SimParamGPU h_simparam;
h_simparam.num_layers = sim->n_layers; // not plus 2 here
h_simparam.init_photon_w = sim->start_weight;
h_simparam.dz = sim->det.dz;
h_simparam.dr = sim->det.dr;
h_simparam.na = sim->det.na;
h_simparam.nz = sim->det.nz;
h_simparam.nr = sim->det.nr;
CUDA_SAFE_CALL( hipMemcpyToSymbol(d_simparam,
&h_simparam, sizeof(SimParamGPU)) );
LayerStructGPU h_layerspecs[MAX_LAYERS];
for (UINT32 i = 0; i < n_layers; ++i)
{
h_layerspecs[i].z0 = sim->layers[i].z_min;
h_layerspecs[i].z1 = sim->layers[i].z_max;
FLOAT n1 = sim->layers[i].n;
h_layerspecs[i].n = n1;
// TODO: sim->layer should not do any pre-computation.
FLOAT rmuas = sim->layers[i].mutr;
h_layerspecs[i].muas = FP_ONE / rmuas;
h_layerspecs[i].rmuas = rmuas;
h_layerspecs[i].mua_muas = sim->layers[i].mua * rmuas;
h_layerspecs[i].g = sim->layers[i].g;
if (i == 0 || i == n_layers-1)
{
h_layerspecs[i].cos_crit0 = MCML_FP_ZERO;
h_layerspecs[i].cos_crit1 = MCML_FP_ZERO;
}
else
{
FLOAT n2 = sim->layers[i-1].n;
h_layerspecs[i].cos_crit0 = (n1 > n2) ?
sqrtf(FP_ONE - n2*n2/(n1*n1)) : MCML_FP_ZERO;
n2 = sim->layers[i+1].n;
h_layerspecs[i].cos_crit1 = (n1 > n2) ?
sqrtf(FP_ONE - n2*n2/(n1*n1)) : MCML_FP_ZERO;
}
}
// Copy layer data to constant device memory
CUDA_SAFE_CALL( hipMemcpyToSymbol(d_layerspecs,
&h_layerspecs, n_layers*sizeof(LayerStructGPU)) );
return 0;
}
//////////////////////////////////////////////////////////////////////////////
// Initialize Device Memory (global) for read/write data
//////////////////////////////////////////////////////////////////////////////
// DAVID
int InitSimStates(SimState* HostMem, SimState* DeviceMem,
GPUThreadStates *tstates, SimulationStruct* sim)
{
int rz_size = sim->det.nr * sim->det.nz;
int ra_size = sim->det.nr * sim->det.na;
unsigned int size;
// Allocate n_photons_left (on device only)
size = sizeof(UINT32);
CUDA_SAFE_CALL( hipMalloc((void**)&DeviceMem->n_photons_left, size) );
CUDA_SAFE_CALL( hipMemcpy(DeviceMem->n_photons_left,
HostMem->n_photons_left, size, hipMemcpyHostToDevice) );
// random number generation (on device only)
size = NUM_THREADS * sizeof(UINT32);
CUDA_SAFE_CALL( hipMalloc((void**)&DeviceMem->a, size) );
CUDA_SAFE_CALL( hipMemcpy(DeviceMem->a, HostMem->a, size,
hipMemcpyHostToDevice) );
size = NUM_THREADS * sizeof(UINT64);
CUDA_SAFE_CALL( hipMalloc((void**)&DeviceMem->x, size) );
CUDA_SAFE_CALL( hipMemcpy(DeviceMem->x, HostMem->x, size,
hipMemcpyHostToDevice) );
// Allocate A_rz on host and device
size = rz_size * sizeof(UINT64);
HostMem->A_rz = (UINT64*)malloc(size);
if (HostMem->A_rz == NULL)
{
fprintf(stderr, "Error allocating HostMem->A_rz");
exit(1);
}
// On the device, we allocate multiple copies for less access contention.
//size *= N_A_RZ_COPIES;
CUDA_SAFE_CALL( hipMalloc((void**)&DeviceMem->A_rz, size) );
CUDA_SAFE_CALL( hipMemset(DeviceMem->A_rz, 0, size) );
// Allocate Rd_ra on host and device
size = ra_size * sizeof(UINT64);
HostMem->Rd_ra = (UINT64*)malloc(size);
if(HostMem->Rd_ra==NULL){printf("Error allocating HostMem->Rd_ra"); exit (1);}
CUDA_SAFE_CALL( hipMalloc((void**)&DeviceMem->Rd_ra, size) );
CUDA_SAFE_CALL( hipMemset(DeviceMem->Rd_ra, 0, size) );
// Allocate Tt_ra on host and device
size = ra_size * sizeof(UINT64);
HostMem->Tt_ra = (UINT64*)malloc(size);
if(HostMem->Tt_ra==NULL){printf("Error allocating HostMem->Tt_ra"); exit (1);}
CUDA_SAFE_CALL( hipMalloc((void**)&DeviceMem->Tt_ra, size) );
CUDA_SAFE_CALL( hipMemset(DeviceMem->Tt_ra, 0, size) );
/* Allocate and initialize GPU thread states on the device.
*
* We only initialize rnd_a and rnd_x here. For all other fields, whose
* initial value is a known constant, we use a kernel to do the
* initialization.
*/
// photon structure
size = NUM_THREADS * sizeof(FLOAT);
CUDA_SAFE_CALL( hipMalloc((void**)&tstates->photon_x, size) );
CUDA_SAFE_CALL( hipMalloc((void**)&tstates->photon_y, size) );
CUDA_SAFE_CALL( hipMalloc((void**)&tstates->photon_z, size) );
CUDA_SAFE_CALL( hipMalloc((void**)&tstates->photon_ux, size) );
CUDA_SAFE_CALL( hipMalloc((void**)&tstates->photon_uy, size) );
CUDA_SAFE_CALL( hipMalloc((void**)&tstates->photon_uz, size) );
CUDA_SAFE_CALL( hipMalloc((void**)&tstates->photon_w, size) );
CUDA_SAFE_CALL( hipMalloc((void**)&tstates->photon_sleft, size) );
size = NUM_THREADS * sizeof(UINT32);
CUDA_SAFE_CALL( hipMalloc((void**)&tstates->photon_layer, size) );
// thread active
CUDA_SAFE_CALL( hipMalloc((void**)&tstates->is_active, size) );
return 1;
}
//////////////////////////////////////////////////////////////////////////////
// Transfer data from Device to Host memory after simulation
//////////////////////////////////////////////////////////////////////////////
int CopyDeviceToHostMem(SimState* HostMem, SimState* DeviceMem, SimulationStruct* sim)
{
int rz_size = sim->det.nr*sim->det.nz;
int ra_size = sim->det.nr*sim->det.na;
// Copy A_rz, Rd_ra and Tt_ra
CUDA_SAFE_CALL( hipMemcpy(HostMem->A_rz,DeviceMem->A_rz,rz_size*sizeof(UINT64),hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL( hipMemcpy(HostMem->Rd_ra,DeviceMem->Rd_ra,ra_size*sizeof(UINT64),hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL( hipMemcpy(HostMem->Tt_ra,DeviceMem->Tt_ra,ra_size*sizeof(UINT64),hipMemcpyDeviceToHost) );
//Also copy the state of the RNG's
CUDA_SAFE_CALL( hipMemcpy(HostMem->x,DeviceMem->x,NUM_THREADS*sizeof(UINT64),hipMemcpyDeviceToHost) );
return 0;
}
//////////////////////////////////////////////////////////////////////////////
// Free Host Memory
//////////////////////////////////////////////////////////////////////////////
void FreeHostSimState(SimState *hstate)
{
if (hstate->n_photons_left != NULL)
{
free(hstate->n_photons_left); hstate->n_photons_left = NULL;
}
// DO NOT FREE RANDOM NUMBER SEEDS HERE.
if (hstate->A_rz != NULL)
{
free(hstate->A_rz); hstate->A_rz = NULL;
}
if (hstate->Rd_ra != NULL)
{
free(hstate->Rd_ra); hstate->Rd_ra = NULL;
}
if (hstate->Tt_ra != NULL)
{
free(hstate->Tt_ra); hstate->Tt_ra = NULL;
}
}
//////////////////////////////////////////////////////////////////////////////
// Free GPU Memory
//////////////////////////////////////////////////////////////////////////////
void FreeDeviceSimStates(SimState *dstate, GPUThreadStates *tstates)
{
hipFree(dstate->n_photons_left); dstate->n_photons_left = NULL;
hipFree(dstate->x); dstate->x = NULL;
hipFree(dstate->a); dstate->a = NULL;
hipFree(dstate->A_rz); dstate->A_rz = NULL;
hipFree(dstate->Rd_ra); dstate->Rd_ra = NULL;
hipFree(dstate->Tt_ra); dstate->Tt_ra = NULL;
hipFree(tstates->photon_x); tstates->photon_x = NULL;
hipFree(tstates->photon_y); tstates->photon_y = NULL;
hipFree(tstates->photon_z); tstates->photon_z = NULL;
hipFree(tstates->photon_ux); tstates->photon_ux = NULL;
hipFree(tstates->photon_uy); tstates->photon_uy = NULL;
hipFree(tstates->photon_uz); tstates->photon_uz = NULL;
hipFree(tstates->photon_w); tstates->photon_w = NULL;
hipFree(tstates->photon_sleft); tstates->photon_sleft = NULL;
hipFree(tstates->photon_layer); tstates->photon_layer = NULL;
hipFree(tstates->is_active); tstates->is_active = NULL;
}
//////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////// | c9e4246a2eb46abf2ffd1505a0c943dbf4b4bc67.cu | /*****************************************************************************
*
* GPU memory allocation, initialization, and transfer (Host <--> GPU)
*
****************************************************************************/
/*
* This file is part of GPUMCML.
*
* GPUMCML is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GPUMCML is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GPUMCML. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include "cudamcml_kernel.h"
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// Initialize Device Constant Memory with read-only data
//////////////////////////////////////////////////////////////////////////////
int InitDCMem(SimulationStruct *sim)
{
// Make sure that the number of layers is within the limit.
UINT32 n_layers = sim->n_layers + 2;
if (n_layers > MAX_LAYERS) return 1;
SimParamGPU h_simparam;
h_simparam.num_layers = sim->n_layers; // not plus 2 here
h_simparam.init_photon_w = sim->start_weight;
h_simparam.dz = sim->det.dz;
h_simparam.dr = sim->det.dr;
h_simparam.na = sim->det.na;
h_simparam.nz = sim->det.nz;
h_simparam.nr = sim->det.nr;
CUDA_SAFE_CALL( cudaMemcpyToSymbol(d_simparam,
&h_simparam, sizeof(SimParamGPU)) );
LayerStructGPU h_layerspecs[MAX_LAYERS];
for (UINT32 i = 0; i < n_layers; ++i)
{
h_layerspecs[i].z0 = sim->layers[i].z_min;
h_layerspecs[i].z1 = sim->layers[i].z_max;
FLOAT n1 = sim->layers[i].n;
h_layerspecs[i].n = n1;
// TODO: sim->layer should not do any pre-computation.
FLOAT rmuas = sim->layers[i].mutr;
h_layerspecs[i].muas = FP_ONE / rmuas;
h_layerspecs[i].rmuas = rmuas;
h_layerspecs[i].mua_muas = sim->layers[i].mua * rmuas;
h_layerspecs[i].g = sim->layers[i].g;
if (i == 0 || i == n_layers-1)
{
h_layerspecs[i].cos_crit0 = MCML_FP_ZERO;
h_layerspecs[i].cos_crit1 = MCML_FP_ZERO;
}
else
{
FLOAT n2 = sim->layers[i-1].n;
h_layerspecs[i].cos_crit0 = (n1 > n2) ?
sqrtf(FP_ONE - n2*n2/(n1*n1)) : MCML_FP_ZERO;
n2 = sim->layers[i+1].n;
h_layerspecs[i].cos_crit1 = (n1 > n2) ?
sqrtf(FP_ONE - n2*n2/(n1*n1)) : MCML_FP_ZERO;
}
}
// Copy layer data to constant device memory
CUDA_SAFE_CALL( cudaMemcpyToSymbol(d_layerspecs,
&h_layerspecs, n_layers*sizeof(LayerStructGPU)) );
return 0;
}
//////////////////////////////////////////////////////////////////////////////
// Initialize Device Memory (global) for read/write data
//////////////////////////////////////////////////////////////////////////////
// DAVID
int InitSimStates(SimState* HostMem, SimState* DeviceMem,
GPUThreadStates *tstates, SimulationStruct* sim)
{
int rz_size = sim->det.nr * sim->det.nz;
int ra_size = sim->det.nr * sim->det.na;
unsigned int size;
// Allocate n_photons_left (on device only)
size = sizeof(UINT32);
CUDA_SAFE_CALL( cudaMalloc((void**)&DeviceMem->n_photons_left, size) );
CUDA_SAFE_CALL( cudaMemcpy(DeviceMem->n_photons_left,
HostMem->n_photons_left, size, cudaMemcpyHostToDevice) );
// random number generation (on device only)
size = NUM_THREADS * sizeof(UINT32);
CUDA_SAFE_CALL( cudaMalloc((void**)&DeviceMem->a, size) );
CUDA_SAFE_CALL( cudaMemcpy(DeviceMem->a, HostMem->a, size,
cudaMemcpyHostToDevice) );
size = NUM_THREADS * sizeof(UINT64);
CUDA_SAFE_CALL( cudaMalloc((void**)&DeviceMem->x, size) );
CUDA_SAFE_CALL( cudaMemcpy(DeviceMem->x, HostMem->x, size,
cudaMemcpyHostToDevice) );
// Allocate A_rz on host and device
size = rz_size * sizeof(UINT64);
HostMem->A_rz = (UINT64*)malloc(size);
if (HostMem->A_rz == NULL)
{
fprintf(stderr, "Error allocating HostMem->A_rz");
exit(1);
}
// On the device, we allocate multiple copies for less access contention.
//size *= N_A_RZ_COPIES;
CUDA_SAFE_CALL( cudaMalloc((void**)&DeviceMem->A_rz, size) );
CUDA_SAFE_CALL( cudaMemset(DeviceMem->A_rz, 0, size) );
// Allocate Rd_ra on host and device
size = ra_size * sizeof(UINT64);
HostMem->Rd_ra = (UINT64*)malloc(size);
if(HostMem->Rd_ra==NULL){printf("Error allocating HostMem->Rd_ra"); exit (1);}
CUDA_SAFE_CALL( cudaMalloc((void**)&DeviceMem->Rd_ra, size) );
CUDA_SAFE_CALL( cudaMemset(DeviceMem->Rd_ra, 0, size) );
// Allocate Tt_ra on host and device
size = ra_size * sizeof(UINT64);
HostMem->Tt_ra = (UINT64*)malloc(size);
if(HostMem->Tt_ra==NULL){printf("Error allocating HostMem->Tt_ra"); exit (1);}
CUDA_SAFE_CALL( cudaMalloc((void**)&DeviceMem->Tt_ra, size) );
CUDA_SAFE_CALL( cudaMemset(DeviceMem->Tt_ra, 0, size) );
/* Allocate and initialize GPU thread states on the device.
*
* We only initialize rnd_a and rnd_x here. For all other fields, whose
* initial value is a known constant, we use a kernel to do the
* initialization.
*/
// photon structure
size = NUM_THREADS * sizeof(FLOAT);
CUDA_SAFE_CALL( cudaMalloc((void**)&tstates->photon_x, size) );
CUDA_SAFE_CALL( cudaMalloc((void**)&tstates->photon_y, size) );
CUDA_SAFE_CALL( cudaMalloc((void**)&tstates->photon_z, size) );
CUDA_SAFE_CALL( cudaMalloc((void**)&tstates->photon_ux, size) );
CUDA_SAFE_CALL( cudaMalloc((void**)&tstates->photon_uy, size) );
CUDA_SAFE_CALL( cudaMalloc((void**)&tstates->photon_uz, size) );
CUDA_SAFE_CALL( cudaMalloc((void**)&tstates->photon_w, size) );
CUDA_SAFE_CALL( cudaMalloc((void**)&tstates->photon_sleft, size) );
size = NUM_THREADS * sizeof(UINT32);
CUDA_SAFE_CALL( cudaMalloc((void**)&tstates->photon_layer, size) );
// thread active
CUDA_SAFE_CALL( cudaMalloc((void**)&tstates->is_active, size) );
return 1;
}
//////////////////////////////////////////////////////////////////////////////
// Transfer data from Device to Host memory after simulation
//////////////////////////////////////////////////////////////////////////////
int CopyDeviceToHostMem(SimState* HostMem, SimState* DeviceMem, SimulationStruct* sim)
{
int rz_size = sim->det.nr*sim->det.nz;
int ra_size = sim->det.nr*sim->det.na;
// Copy A_rz, Rd_ra and Tt_ra
CUDA_SAFE_CALL( cudaMemcpy(HostMem->A_rz,DeviceMem->A_rz,rz_size*sizeof(UINT64),cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL( cudaMemcpy(HostMem->Rd_ra,DeviceMem->Rd_ra,ra_size*sizeof(UINT64),cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL( cudaMemcpy(HostMem->Tt_ra,DeviceMem->Tt_ra,ra_size*sizeof(UINT64),cudaMemcpyDeviceToHost) );
//Also copy the state of the RNG's
CUDA_SAFE_CALL( cudaMemcpy(HostMem->x,DeviceMem->x,NUM_THREADS*sizeof(UINT64),cudaMemcpyDeviceToHost) );
return 0;
}
//////////////////////////////////////////////////////////////////////////////
// Free Host Memory
//////////////////////////////////////////////////////////////////////////////
void FreeHostSimState(SimState *hstate)
{
if (hstate->n_photons_left != NULL)
{
free(hstate->n_photons_left); hstate->n_photons_left = NULL;
}
// DO NOT FREE RANDOM NUMBER SEEDS HERE.
if (hstate->A_rz != NULL)
{
free(hstate->A_rz); hstate->A_rz = NULL;
}
if (hstate->Rd_ra != NULL)
{
free(hstate->Rd_ra); hstate->Rd_ra = NULL;
}
if (hstate->Tt_ra != NULL)
{
free(hstate->Tt_ra); hstate->Tt_ra = NULL;
}
}
//////////////////////////////////////////////////////////////////////////////
// Free GPU Memory
//////////////////////////////////////////////////////////////////////////////
void FreeDeviceSimStates(SimState *dstate, GPUThreadStates *tstates)
{
cudaFree(dstate->n_photons_left); dstate->n_photons_left = NULL;
cudaFree(dstate->x); dstate->x = NULL;
cudaFree(dstate->a); dstate->a = NULL;
cudaFree(dstate->A_rz); dstate->A_rz = NULL;
cudaFree(dstate->Rd_ra); dstate->Rd_ra = NULL;
cudaFree(dstate->Tt_ra); dstate->Tt_ra = NULL;
cudaFree(tstates->photon_x); tstates->photon_x = NULL;
cudaFree(tstates->photon_y); tstates->photon_y = NULL;
cudaFree(tstates->photon_z); tstates->photon_z = NULL;
cudaFree(tstates->photon_ux); tstates->photon_ux = NULL;
cudaFree(tstates->photon_uy); tstates->photon_uy = NULL;
cudaFree(tstates->photon_uz); tstates->photon_uz = NULL;
cudaFree(tstates->photon_w); tstates->photon_w = NULL;
cudaFree(tstates->photon_sleft); tstates->photon_sleft = NULL;
cudaFree(tstates->photon_layer); tstates->photon_layer = NULL;
cudaFree(tstates->is_active); tstates->is_active = NULL;
}
//////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////// |
1a08e9210fd3520a1b6e8a28e4a25e555682d392.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#define N 512
/*
void Matriz_CPU_Mult(int A[N][N], int B[N][N], int C[N][N]) {
int n,m;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
int sum = 0;
for (int k = 0; k < N; k++) {
m = A[i][k];
n = B[k][j];
sum += m * n;
}
C[i][j] = sum;
}
}
}
*/
__global__ void Matriz_GPU_Mult(double *a, double *b, double *c) {
int k, sum = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < N) {
for (k = 0; k < N; k++) {
sum += a[j * N + k] * b[k * N + i];
}
c[j * N + i] = sum;
}
}
int main() {
double timeGPU; //, timeCPU;
double A[N][N], B[N][N], C[N][N];
double *d_a, *d_b, *d_c;
int cont,i,j;
//inicializacion
for (i = 0; i < N; i++) {
cont = 0;
for (j = 0; j < N; j++) {
A[i][j] = cont;
B[i][j] = cont;
cont++;
}
}
size_t bytes = N * sizeof(double);
hipMalloc((void **) &d_a, bytes);
hipMalloc((void **) &d_b, bytes);
hipMalloc((void **) &d_c, bytes);
hipMemcpy(d_a, A, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, B, bytes, hipMemcpyHostToDevice);
//int threadsPerBlock(16);
//int numBlocks(N/threadsPerBlock);
dim3 threadsPerBlock(32, 32);
dim3 numBlocks((int)ceil((float)N/threadsPerBlock.x), (int)ceil((float)N/threadsPerBlock.y));
clock_t startGPU = clock();
hipLaunchKernelGGL(( Matriz_GPU_Mult), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_a, d_b, d_c);
timeGPU = ((double)(clock() - startGPU))/CLOCKS_PER_SEC;
hipMemcpy(C, d_c, bytes, hipMemcpyDeviceToHost);
/*
clock_t startCPU = clock();
Matriz_CPU_Mult(A, B, C);
timeCPU = ((double)(clock() - startCPU))/CLOCKS_PER_SEC;
*/
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// tiempos de ejecucion
printf("tiempo GPU = %f s\n",timeGPU);
//printf("\ntiempo CPU = %f s\n",timeCPU);
return 0;
}
| 1a08e9210fd3520a1b6e8a28e4a25e555682d392.cu | #include <stdio.h>
#include <time.h>
#define N 512
/*
void Matriz_CPU_Mult(int A[N][N], int B[N][N], int C[N][N]) {
int n,m;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
int sum = 0;
for (int k = 0; k < N; k++) {
m = A[i][k];
n = B[k][j];
sum += m * n;
}
C[i][j] = sum;
}
}
}
*/
__global__ void Matriz_GPU_Mult(double *a, double *b, double *c) {
int k, sum = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < N) {
for (k = 0; k < N; k++) {
sum += a[j * N + k] * b[k * N + i];
}
c[j * N + i] = sum;
}
}
int main() {
double timeGPU; //, timeCPU;
double A[N][N], B[N][N], C[N][N];
double *d_a, *d_b, *d_c;
int cont,i,j;
//inicializacion
for (i = 0; i < N; i++) {
cont = 0;
for (j = 0; j < N; j++) {
A[i][j] = cont;
B[i][j] = cont;
cont++;
}
}
size_t bytes = N * sizeof(double);
cudaMalloc((void **) &d_a, bytes);
cudaMalloc((void **) &d_b, bytes);
cudaMalloc((void **) &d_c, bytes);
cudaMemcpy(d_a, A, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, B, bytes, cudaMemcpyHostToDevice);
//int threadsPerBlock(16);
//int numBlocks(N/threadsPerBlock);
dim3 threadsPerBlock(32, 32);
dim3 numBlocks((int)ceil((float)N/threadsPerBlock.x), (int)ceil((float)N/threadsPerBlock.y));
clock_t startGPU = clock();
Matriz_GPU_Mult<<<numBlocks, threadsPerBlock>>>(d_a, d_b, d_c);
timeGPU = ((double)(clock() - startGPU))/CLOCKS_PER_SEC;
cudaMemcpy(C, d_c, bytes, cudaMemcpyDeviceToHost);
/*
clock_t startCPU = clock();
Matriz_CPU_Mult(A, B, C);
timeCPU = ((double)(clock() - startCPU))/CLOCKS_PER_SEC;
*/
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// tiempos de ejecucion
printf("tiempo GPU = %f s\n",timeGPU);
//printf("\ntiempo CPU = %f s\n",timeCPU);
return 0;
}
|
6d805dea7a2ec7813d4a14b9ae9f05a8def2cb95.hip | // !!! This is a file automatically generated by hipify!!!
#include "visit_writer.h"
#include <math.h>
#include <cmath>
#include <sstream>
#include <iostream>
#include <cstdlib>
#include <random>
#include <chrono>
#include <algorithm>
#include <future>
#include <vector>
#include "FluidGPU.cuh"
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
const int nspts = 8000; //number of solid particles
const int nbpts = 000;//1000; //number of solid particles
const int tpts = 4000;
//Storage for output
int vardims3[] = { 1,1 };
int morton_host(unsigned int x, unsigned int y, unsigned int z) {
//int x = (bidx / GRIDSIZE / GRIDSIZE);
//int y = (bidx / GRIDSIZE % GRIDSIZE);
//int z = (bidx % GRIDSIZE);
x = (x | (x << 16)) & 0x030000FF;
x = (x | (x << 8)) & 0x0300F00F;
x = (x | (x << 4)) & 0x030C30C3;
x = (x | (x << 2)) & 0x09249249;
y = (y | (y << 16)) & 0x030000FF;
y = (y | (y << 8)) & 0x0300F00F;
y = (y | (y << 4)) & 0x030C30C3;
y = (y | (y << 2)) & 0x09249249;
z = (z | (z << 16)) & 0x030000FF;
z = (z | (z << 8)) & 0x0300F00F;
z = (z | (z << 4)) & 0x030C30C3;
z = (z | (z << 2)) & 0x09249249;
return x | (y << 1) | (z << 2);
}
int demorton_host(unsigned int x, int b) {
//b should be 0 for x, 1 for y, 2 for z
switch (b) {
case 0: break;
case 1: x = (x >> 1);
break;
case 2: x = (x >> 2);
break;
}
x &= 0x09249249; // x = ---- 9--8 --7- -6-- 5--4 --3- -2-- 1--0
x = (x | (x >> 2)) & 0x030c30c3; // x = ---- --98 ---- 76-- --54 ---- 32-- --10
x = (x | (x >> 4)) & 0x0300f00f; // x = ---- --98 ---- ---- 7654 ---- ---- 3210
x = (x | (x >> 8)) & 0xff0000ff; // x = ---- --98 ---- ---- ---- ---- 7654 3210
x = (x | (x >> 16)) & 0x000003ff; // x = ---- ---- ---- ---- ---- --98 7654 3210
return x;
}
int main(int argc, char **argv)
{
/*
std::cout << morton(30, 30, 30) << "\n";
for (int k = -1; k < 2; k++)
for (int j = -1; j < 2; j++)
for (int i = -1; i < 2; i++)
std::cout << morton(demorton(morton(30 + i, 30 + j, 30 + k), 0), demorton(morton(30 + i, 30 + j, 30 + k), 1), demorton(morton(30 + i, 30 + j, 30 + k), 2)) << "\n";
*/
//hipSetDeviceFlags(hipDeviceScheduleBlockingSync);
int *pop, *d_pop, *popidx, *d_popidx;
pop = (int *)malloc(sizeof(int)*27);
popidx = (int *)malloc(sizeof(int) * 27);
hipMalloc((void **)&d_pop, sizeof(int) * 27);
hipMalloc((void **)&d_popidx, sizeof(int) * 27);
for (int i = 0; i < 27; i++) { pop[i] = 0; popidx[i] = i; }
hipMemcpy(d_pop, pop, sizeof(int)*27, hipMemcpyHostToDevice);
hipMemcpy(d_popidx, popidx, sizeof(int) * 27, hipMemcpyHostToDevice);
size_t size = (nspts+nbpts) * sizeof(Particle);
Particle *SPptr;
Particle *d_SPptr;
SPptr = (Particle *)malloc(size); // Allocate particles on host
hipMalloc((void **)&d_SPptr, size); // Allocate particles on device
bool *neighbours;
bool *d_neighbours;
neighbours = (bool *)malloc(sizeof(int)*nspts*(NUMCELLS)); // Allocate particles on host
hipMalloc((void **)&d_neighbours, sizeof(bool) * nspts * (NUMCELLS)); // Allocate particles on device
for (int i = 0; i < nspts * (NUMCELLS); i++)
neighbours[i] = false;
hipMemcpy(d_neighbours, neighbours, sizeof(bool) * nspts * (NUMCELLS), hipMemcpyHostToDevice);
float *spts;
hipMallocManaged(&spts, 3*(nspts + nbpts) * sizeof(float));
for (int i = 0; i < 3*(nspts + nbpts); i++)
spts[i] =0 ;
float *a3;
hipMallocManaged(&a3, (nspts + nbpts) * sizeof(float));
float *b3;
hipMallocManaged(&b3, (nspts + nbpts) * sizeof(float));
const char * const varnames3[] = { "dens", "cellnumber" };
float *arrays3[] = { (float*)a3, (float*)b3, };
//Set up Solid Particles
for (int j = 0; j < nspts; j++) {
SPptr[j] = *(new Particle(-.16 + 0.04*((j / 15) % 15), -0.76 + 0.04*(j / 15 / 15), -0.20 + (j % 15)*0.04, 0., 0., 0.));
SPptr[j].index = j;
SPptr[j].solid = true;
SPptr[j].cellnumber = int((SPptr[j].xcoord - XMIN) / CELLSIZE)*GRIDSIZE*GRIDSIZE + int((SPptr[j].ycoord - YMIN) / CELLSIZE)*GRIDSIZE + int((SPptr[j].zcoord - ZMIN) /CELLSIZE);
//SPptr[j].cellnumber = morton_host(int((SPptr[j].xcoord - XMIN) / CELLSIZE), int((SPptr[j].ycoord - YMIN) / CELLSIZE), int((SPptr[j].zcoord - ZMIN)/CELLSIZE));
}
//Set up boundary particles
for (int i = 0; i < nbpts; i++) {
SPptr[nspts + i] = *(new Particle(-0.96 + 0.06*(i % 30), -0.96 + 0.06*(i / 30), -0.24, true));
SPptr[i+nspts].index = nspts+i;
SPptr[i + nspts].cellnumber = int((SPptr[i + nspts].xcoord - XMIN) / CELLSIZE)*GRIDSIZE*GRIDSIZE + int((SPptr[i + nspts].ycoord - YMIN) / CELLSIZE)*GRIDSIZE + int((SPptr[i + nspts].zcoord - ZMIN) / CELLSIZE);
//SPptr[i+nspts].cellnumber = morton_host(int((SPptr[i + nspts].xcoord - XMIN) / CELLSIZE), int((SPptr[i + nspts].ycoord - YMIN) / CELLSIZE), int((SPptr[i + nspts].zcoord - ZMIN) / CELLSIZE));
}
hipMemcpy(d_SPptr, SPptr, size, hipMemcpyHostToDevice);
///////Sort particles by cell number and keep track of when a new cell starts//////////
int *v_h, *v_d;
//int *cellstart_h, *cellstart_d;
const int N = nspts + nbpts; // Number of elements in arrays
size_t sizes = N * sizeof(int);
//size_t sizes2 = 2*NUMCELLS * sizeof(int);
v_h = (int *)malloc(sizes); // Allocate array on host
hipMalloc((void **)&v_d, sizes);// Allocate array on device
for (int i = 0; i<N; i++)
{
v_h[i] = SPptr[i].cellnumber;
//std::cout << v_h[i] << "\n";
}
//hipMemcpy(cellstart_d, cellstart_h, sizes2, hipMemcpyHostToDevice);
hipMemcpy(v_d, v_h, sizes, hipMemcpyHostToDevice);
thrust::device_ptr<Particle> t_a(d_SPptr);
thrust::device_ptr<int> t_v(v_d);
hipMemcpy(SPptr, d_SPptr, size, hipMemcpyDeviceToHost);
int *start, *end, *d_start, *d_end;
size_t sizes2 = NUMCELLS * sizeof(int);
start = (int *)malloc(sizes2); // Allocate array on host
end = (int *)malloc(sizes2); // Allocate array on host
hipMalloc((void **)&d_start, sizes2);// Allocate array on device
hipMalloc((void **)&d_end, sizes2);// Allocate array on device
for (int i = 0; i<NUMCELLS; i++)
{
start[i] = -1;
end[i] = -1;
}
hipMemcpy(d_start, start, sizes2, hipMemcpyHostToDevice);
hipMemcpy(d_end, end, sizes2, hipMemcpyHostToDevice);
for (int t = 0; t < tpts; t++) {
std::cout << "t= " << t << "\n";
hipEvent_t start, stop;
CUDA_CHECK_RETURN(hipEventCreate(&start));
CUDA_CHECK_RETURN(hipEventCreate(&stop));
float elapsedTime;
CUDA_CHECK_RETURN(hipEventRecord(start, 0));
hipDeviceSynchronize();
thrust::sort_by_key(t_v, t_v + N, t_a);
findneighbours << <NUMCELLS, 1024 >> > (v_d, d_start, d_end, nspts+nbpts);
//std::cout << hipGetErrorName(hipGetLastError())<< "\n";
mykernel <<<NUMCELLS, 64 >> > (d_SPptr, v_d, d_start, d_end, nspts+nbpts);
//std::cout << hipGetErrorName(hipGetLastError())<< "\n";
CUDA_CHECK_RETURN(hipEventRecord(stop, 0));
CUDA_CHECK_RETURN(hipEventSynchronize(stop));
CUDA_CHECK_RETURN(hipEventElapsedTime(&elapsedTime, start, stop));
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(hipGetLastError());
CUDA_CHECK_RETURN(hipEventDestroy(start));
CUDA_CHECK_RETURN(hipEventDestroy(stop));
std::cout << "done.\nElapsed kernel time: " << elapsedTime << " ms\n";
mykernel2 <<<NUMCELLS, 1024>> > (d_SPptr, v_d, d_start, d_end, nspts + nbpts, spts, a3,b3);
//std::cout << hipGetErrorName(hipGetLastError()) << "\n";
// hipDeviceSynchronize();
if (t % 10 == 0) {
hipDeviceSynchronize();
//Write each frame to file
std::ostringstream oss;
oss << "C:\\Users\\robbe\\Desktop\\Code\\anim_s" << t / 10 << ".vtk";
std::string var = oss.str();
const char* cstr = var.c_str();
//write_point_mesh(cstr, 0, nspts + nbpts, spts, 2, vardims3, varnames3, arrays3);
}
}
return 0;
}
| 6d805dea7a2ec7813d4a14b9ae9f05a8def2cb95.cu | #include "visit_writer.h"
#include <math.h>
#include <cmath>
#include <sstream>
#include <iostream>
#include <cstdlib>
#include <random>
#include <chrono>
#include <algorithm>
#include <future>
#include <vector>
#include "FluidGPU.cuh"
#include <cuda_runtime.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
const int nspts = 8000; //number of solid particles
const int nbpts = 000;//1000; //number of solid particles
const int tpts = 4000;
//Storage for output
int vardims3[] = { 1,1 };
int morton_host(unsigned int x, unsigned int y, unsigned int z) {
//int x = (bidx / GRIDSIZE / GRIDSIZE);
//int y = (bidx / GRIDSIZE % GRIDSIZE);
//int z = (bidx % GRIDSIZE);
x = (x | (x << 16)) & 0x030000FF;
x = (x | (x << 8)) & 0x0300F00F;
x = (x | (x << 4)) & 0x030C30C3;
x = (x | (x << 2)) & 0x09249249;
y = (y | (y << 16)) & 0x030000FF;
y = (y | (y << 8)) & 0x0300F00F;
y = (y | (y << 4)) & 0x030C30C3;
y = (y | (y << 2)) & 0x09249249;
z = (z | (z << 16)) & 0x030000FF;
z = (z | (z << 8)) & 0x0300F00F;
z = (z | (z << 4)) & 0x030C30C3;
z = (z | (z << 2)) & 0x09249249;
return x | (y << 1) | (z << 2);
}
int demorton_host(unsigned int x, int b) {
//b should be 0 for x, 1 for y, 2 for z
switch (b) {
case 0: break;
case 1: x = (x >> 1);
break;
case 2: x = (x >> 2);
break;
}
x &= 0x09249249; // x = ---- 9--8 --7- -6-- 5--4 --3- -2-- 1--0
x = (x | (x >> 2)) & 0x030c30c3; // x = ---- --98 ---- 76-- --54 ---- 32-- --10
x = (x | (x >> 4)) & 0x0300f00f; // x = ---- --98 ---- ---- 7654 ---- ---- 3210
x = (x | (x >> 8)) & 0xff0000ff; // x = ---- --98 ---- ---- ---- ---- 7654 3210
x = (x | (x >> 16)) & 0x000003ff; // x = ---- ---- ---- ---- ---- --98 7654 3210
return x;
}
int main(int argc, char **argv)
{
/*
std::cout << morton(30, 30, 30) << "\n";
for (int k = -1; k < 2; k++)
for (int j = -1; j < 2; j++)
for (int i = -1; i < 2; i++)
std::cout << morton(demorton(morton(30 + i, 30 + j, 30 + k), 0), demorton(morton(30 + i, 30 + j, 30 + k), 1), demorton(morton(30 + i, 30 + j, 30 + k), 2)) << "\n";
*/
//cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
int *pop, *d_pop, *popidx, *d_popidx;
pop = (int *)malloc(sizeof(int)*27);
popidx = (int *)malloc(sizeof(int) * 27);
cudaMalloc((void **)&d_pop, sizeof(int) * 27);
cudaMalloc((void **)&d_popidx, sizeof(int) * 27);
for (int i = 0; i < 27; i++) { pop[i] = 0; popidx[i] = i; }
cudaMemcpy(d_pop, pop, sizeof(int)*27, cudaMemcpyHostToDevice);
cudaMemcpy(d_popidx, popidx, sizeof(int) * 27, cudaMemcpyHostToDevice);
size_t size = (nspts+nbpts) * sizeof(Particle);
Particle *SPptr;
Particle *d_SPptr;
SPptr = (Particle *)malloc(size); // Allocate particles on host
cudaMalloc((void **)&d_SPptr, size); // Allocate particles on device
bool *neighbours;
bool *d_neighbours;
neighbours = (bool *)malloc(sizeof(int)*nspts*(NUMCELLS)); // Allocate particles on host
cudaMalloc((void **)&d_neighbours, sizeof(bool) * nspts * (NUMCELLS)); // Allocate particles on device
for (int i = 0; i < nspts * (NUMCELLS); i++)
neighbours[i] = false;
cudaMemcpy(d_neighbours, neighbours, sizeof(bool) * nspts * (NUMCELLS), cudaMemcpyHostToDevice);
float *spts;
cudaMallocManaged(&spts, 3*(nspts + nbpts) * sizeof(float));
for (int i = 0; i < 3*(nspts + nbpts); i++)
spts[i] =0 ;
float *a3;
cudaMallocManaged(&a3, (nspts + nbpts) * sizeof(float));
float *b3;
cudaMallocManaged(&b3, (nspts + nbpts) * sizeof(float));
const char * const varnames3[] = { "dens", "cellnumber" };
float *arrays3[] = { (float*)a3, (float*)b3, };
//Set up Solid Particles
for (int j = 0; j < nspts; j++) {
SPptr[j] = *(new Particle(-.16 + 0.04*((j / 15) % 15), -0.76 + 0.04*(j / 15 / 15), -0.20 + (j % 15)*0.04, 0., 0., 0.));
SPptr[j].index = j;
SPptr[j].solid = true;
SPptr[j].cellnumber = int((SPptr[j].xcoord - XMIN) / CELLSIZE)*GRIDSIZE*GRIDSIZE + int((SPptr[j].ycoord - YMIN) / CELLSIZE)*GRIDSIZE + int((SPptr[j].zcoord - ZMIN) /CELLSIZE);
//SPptr[j].cellnumber = morton_host(int((SPptr[j].xcoord - XMIN) / CELLSIZE), int((SPptr[j].ycoord - YMIN) / CELLSIZE), int((SPptr[j].zcoord - ZMIN)/CELLSIZE));
}
//Set up boundary particles
for (int i = 0; i < nbpts; i++) {
SPptr[nspts + i] = *(new Particle(-0.96 + 0.06*(i % 30), -0.96 + 0.06*(i / 30), -0.24, true));
SPptr[i+nspts].index = nspts+i;
SPptr[i + nspts].cellnumber = int((SPptr[i + nspts].xcoord - XMIN) / CELLSIZE)*GRIDSIZE*GRIDSIZE + int((SPptr[i + nspts].ycoord - YMIN) / CELLSIZE)*GRIDSIZE + int((SPptr[i + nspts].zcoord - ZMIN) / CELLSIZE);
//SPptr[i+nspts].cellnumber = morton_host(int((SPptr[i + nspts].xcoord - XMIN) / CELLSIZE), int((SPptr[i + nspts].ycoord - YMIN) / CELLSIZE), int((SPptr[i + nspts].zcoord - ZMIN) / CELLSIZE));
}
cudaMemcpy(d_SPptr, SPptr, size, cudaMemcpyHostToDevice);
///////Sort particles by cell number and keep track of when a new cell starts//////////
int *v_h, *v_d;
//int *cellstart_h, *cellstart_d;
const int N = nspts + nbpts; // Number of elements in arrays
size_t sizes = N * sizeof(int);
//size_t sizes2 = 2*NUMCELLS * sizeof(int);
v_h = (int *)malloc(sizes); // Allocate array on host
cudaMalloc((void **)&v_d, sizes);// Allocate array on device
for (int i = 0; i<N; i++)
{
v_h[i] = SPptr[i].cellnumber;
//std::cout << v_h[i] << "\n";
}
//cudaMemcpy(cellstart_d, cellstart_h, sizes2, cudaMemcpyHostToDevice);
cudaMemcpy(v_d, v_h, sizes, cudaMemcpyHostToDevice);
thrust::device_ptr<Particle> t_a(d_SPptr);
thrust::device_ptr<int> t_v(v_d);
cudaMemcpy(SPptr, d_SPptr, size, cudaMemcpyDeviceToHost);
int *start, *end, *d_start, *d_end;
size_t sizes2 = NUMCELLS * sizeof(int);
start = (int *)malloc(sizes2); // Allocate array on host
end = (int *)malloc(sizes2); // Allocate array on host
cudaMalloc((void **)&d_start, sizes2);// Allocate array on device
cudaMalloc((void **)&d_end, sizes2);// Allocate array on device
for (int i = 0; i<NUMCELLS; i++)
{
start[i] = -1;
end[i] = -1;
}
cudaMemcpy(d_start, start, sizes2, cudaMemcpyHostToDevice);
cudaMemcpy(d_end, end, sizes2, cudaMemcpyHostToDevice);
for (int t = 0; t < tpts; t++) {
std::cout << "t= " << t << "\n";
cudaEvent_t start, stop;
CUDA_CHECK_RETURN(cudaEventCreate(&start));
CUDA_CHECK_RETURN(cudaEventCreate(&stop));
float elapsedTime;
CUDA_CHECK_RETURN(cudaEventRecord(start, 0));
cudaDeviceSynchronize();
thrust::sort_by_key(t_v, t_v + N, t_a);
findneighbours << <NUMCELLS, 1024 >> > (v_d, d_start, d_end, nspts+nbpts);
//std::cout << cudaGetErrorName(cudaGetLastError())<< "\n";
mykernel <<<NUMCELLS, 64 >> > (d_SPptr, v_d, d_start, d_end, nspts+nbpts);
//std::cout << cudaGetErrorName(cudaGetLastError())<< "\n";
CUDA_CHECK_RETURN(cudaEventRecord(stop, 0));
CUDA_CHECK_RETURN(cudaEventSynchronize(stop));
CUDA_CHECK_RETURN(cudaEventElapsedTime(&elapsedTime, start, stop));
CUDA_CHECK_RETURN(cudaDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaEventDestroy(start));
CUDA_CHECK_RETURN(cudaEventDestroy(stop));
std::cout << "done.\nElapsed kernel time: " << elapsedTime << " ms\n";
mykernel2 <<<NUMCELLS, 1024>> > (d_SPptr, v_d, d_start, d_end, nspts + nbpts, spts, a3,b3);
//std::cout << cudaGetErrorName(cudaGetLastError()) << "\n";
// cudaDeviceSynchronize();
if (t % 10 == 0) {
cudaDeviceSynchronize();
//Write each frame to file
std::ostringstream oss;
oss << "C:\\Users\\robbe\\Desktop\\Code\\anim_s" << t / 10 << ".vtk";
std::string var = oss.str();
const char* cstr = var.c_str();
//write_point_mesh(cstr, 0, nspts + nbpts, spts, 2, vardims3, varnames3, arrays3);
}
}
return 0;
}
|
bffb0727295235cf83b2e3132c19d438073acd09.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::batch_norm(std::string name, Tensor input, bool relu)
{
assert(input.numDim == 4); //Only support 4D BN for now
IndexSpaceT<4> task_is;
BatchNorm *bn = new BatchNorm(name, config, input, task_is, relu);
layers.push_back(bn);
return bn->outputs[0];
}
/*
locals[0] = scale
locals[1] = bias
*/
BatchNorm::BatchNorm(std::string _name, FFConfig _config,
Tensor _input, IndexSpaceT<4> _task_is,
bool _relu)
: Op(_name, _input), relu(_relu), profiling(_config.profiling)
{
Context ctx = _config.lg_ctx;
HighLevelRuntime* runtime = _config.lg_hlr;
Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is);
num_replica = part_rect.volume();
// Create output tensor
int output_w = _input.adim[0];
int output_h = _input.adim[1];
int output_c = _input.adim[2];
int output_n = _input.adim[3];
int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1;
int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1;
int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1;
FieldSpace fs = _config.field_space;
Rect<4> output_rect(Point<4>(0, 0, 0, 0),
Point<4>(output_w-1, output_h-1, output_c-1, output_n-1));
IndexSpaceT<4> output_is = runtime->create_index_space(ctx, output_rect);
LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs);
LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs);
int extent_w = (output_w + num_par_w - 1) / num_par_w;
int extent_h = (output_h + num_par_h - 1) / num_par_h;
int extent_c = output_c / num_par_c;
int extent_n = output_n / num_par_n;
assert(output_c % num_par_c == 0);
assert(output_n % num_par_n == 0);
Rect<4> ext(Point<4>(0, 0, 0, 0),
Point<4>(extent_w-1, extent_h-1, extent_c-1, extent_n-1));
Transform<4, 4, coord_t> trans;
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++)
trans[i][j] = 0;
trans[0][0] = extent_w;
trans[1][1] = extent_h;
trans[2][2] = extent_c;
trans[3][3] = extent_n;
IndexPartition output_ip =
runtime->create_partition_by_restriction(ctx, output_is, task_is, trans, ext);
assert(runtime->is_index_partition_disjoint(ctx, output_ip));
assert(runtime->is_index_partition_complete(ctx, output_ip));
LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip);
LogicalPartition output_grad_lp =
runtime->get_logical_partition(ctx, output_grad_lr, output_ip);
int bias_nc = num_replica * _input.adim[2]; /*input_channels*/
Rect<1, coord_t> bias_grad_rect(0, bias_nc - 1);
Rect<1, coord_t> bias_rect(0, _input.adim[2] - 1);
IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect);
IndexSpaceT<1> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect);
LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion scale_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion bias_grad_lr =
runtime->create_logical_region(ctx, bias_grad_is, fs);
LogicalRegion scale_grad_lr =
runtime->create_logical_region(ctx, bias_grad_is, fs);
IndexPartition bias_grad_ip =
runtime->create_equal_partition(ctx, bias_grad_is, task_is);
LogicalPartition bias_grad_lp =
runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip);
LogicalPartition scale_grad_lp =
runtime->get_logical_partition(ctx, scale_grad_lr, bias_grad_ip);
Parameter scale_tensor, bias_tensor;
scale_tensor.region = scale_lr;
scale_tensor.region_grad = scale_grad_lr;
scale_tensor.part = LogicalPartition::NO_PART;
scale_tensor.part_grad = scale_grad_lp;
weights[0] = scale_tensor;
bias_tensor.region = bias_lr;
bias_tensor.region_grad = bias_grad_lr;
bias_tensor.part = LogicalPartition::NO_PART;
bias_tensor.part_grad = bias_grad_lp;
weights[1] = bias_tensor;
numWeights = 2;
outputs[0] = _input;
outputs[0].region = output_lr;
outputs[0].part = output_lp;
outputs[0].region_grad = output_grad_lr;
outputs[0].part_grad = output_grad_lp;
printf("Create bn layer: output(%d %d %d %d)\n",
outputs[0].adim[3], outputs[0].adim[2], outputs[0].adim[1], outputs[0].adim[0]);
input_lps[0] = _input.part;
}
/*
regions[0]: input
regions[1]: output
regions[2](I): scale
regions[3](I): bias
*/
__host__
OpMeta* BatchNorm::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const BatchNorm* bm = (BatchNorm*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
const AccessorRO<float, 4> acc_input(regions[0], FID_DATA);
const AccessorWO<float, 4> acc_output(regions[1], FID_DATA);
const AccessorRO<float, 1> acc_scale(regions[2], FID_DATA);
const AccessorRO<float, 1> acc_bias(regions[3], FID_DATA);
Rect<1> rect_scale, rect_bias;
Rect<4> rect_input, rect_output;
rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_scale = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
const float *input_ptr = acc_input.ptr(rect_input.lo);
float *output_ptr = acc_output.ptr(rect_output.lo);
const float *scale_ptr = acc_scale.ptr(rect_scale.lo);
const float *bias_ptr = acc_bias.ptr(rect_bias.lo);
BatchNormMeta* m = new BatchNormMeta(handle);
#ifndef DISABLE_COMPUTATION
m->relu = bm->relu;
m->mode = CUDNN_BATCHNORM_SPATIAL;
#if CUDNN_VERSION >= 7000
m->mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#endif
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->biasTensor));
assert(rect_input == rect_output);
int input_w = rect_input.hi[0] - rect_input.lo[0] + 1;
int input_h = rect_input.hi[1] - rect_input.lo[1] + 1;
int channel = bm->inputs[0].pdim[2];
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
bm->inputs[0].pdim[3],
channel, input_h, input_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
bm->inputs[0].pdim[3],
channel, input_h, input_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1, channel, 1, 1));
//float *runningMean, *runningVar, *saveMean, *saveVar;
checkCUDA(hipMalloc(&m->runningMean, sizeof(float) * channel));
checkCUDA(hipMalloc(&m->runningVar, sizeof(float) * channel));
checkCUDA(hipMalloc(&m->saveMean, sizeof(float) * channel));
checkCUDA(hipMalloc(&m->saveVar, sizeof(float) * channel));
if (m->relu) {
checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_PROPAGATE_NAN, 0.0));
}
#endif
return m;
}
/*
regions[0](O): scale, initilized to ones
regions[1](O): bias, initilized to zeros
*/
__host__
void BatchNorm::init_para_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const BatchNorm* bm = (BatchNorm*) task->args;
const AccessorWO<float, 1> acc_scale(regions[0], FID_DATA);
const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA);
Rect<1> rect_scale, rect_bias;
rect_scale = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
float *scale_ptr = acc_scale.ptr(rect_scale.lo);
float *bias_ptr = acc_bias.ptr(rect_bias.lo);
// init kernel and bias
#ifdef PARAMETER_ALL_ONES
hipLaunchKernelGGL(( ones_kernel), dim3(GET_BLOCKS(rect_scale.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
scale_ptr, rect_scale.volume());
hipLaunchKernelGGL(( ones_kernel), dim3(GET_BLOCKS(rect_bias.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
bias_ptr, rect_bias.volume());
#else
//hipStream_t stream;
//checkCUDA(hipStreamCreate(&stream));
//hiprandGenerator_t genGPU;
//hiprandCreateGenerator(&genGPU, HIPRAND_RNG_PSEUDO_DEFAULT);
//hiprandSetStream(genGPU, stream);
//hiprandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL);
//hiprandGenerateUniform(genGPU, scale_ptr, rect_scale.volume());
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(rect_scale.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
scale_ptr, rect_scale.volume(), 1.0f);
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(rect_bias.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
bias_ptr, rect_bias.volume(), 0.0f);
//hiprandDestroyGenerator(genGPU);
#endif
}
__host__
void BatchNorm::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
// First we initialize the scale and bias parameters
{
TaskLauncher para_launcher(BATCHNORM_INIT_PARA_TASK_ID, TaskArgument(NULL, 0));
para_launcher.add_region_requirement(
RegionRequirement(weights[0].region, WRITE_DISCARD, EXCLUSIVE, weights[0].region));
para_launcher.add_field(0, FID_DATA);
para_launcher.add_region_requirement(
RegionRequirement(weights[1].region, WRITE_DISCARD, EXCLUSIVE, weights[1].region));
para_launcher.add_field(1, FID_DATA);
runtime->execute_task(ctx, para_launcher);
}
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher init_launcher(BATCHNORM_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap);
init_launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
init_launcher.add_field(0, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
init_launcher.add_field(1, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
init_launcher.add_field(2, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
init_launcher.add_field(3, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, init_launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/*
regions[0](I): input
regions[1](O): ouptut
regions[2](I): scale
regions[3](I): bias
*/
__host__
void BatchNorm::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 4);
assert(task->regions.size() == 4);
float alpha = 1.0f, beta = 0.0f;
const BatchNorm* bm = (BatchNorm*) task->args;
const BatchNormMeta* m = *((BatchNormMeta**) task->local_args);
const AccessorRO<float, 4> acc_input(regions[0], FID_DATA);
const AccessorWO<float, 4> acc_output(regions[1], FID_DATA);
const AccessorRO<float, 1> acc_scale(regions[2], FID_DATA);
const AccessorRO<float, 1> acc_bias(regions[3], FID_DATA);
Rect<4> rect_input, rect_output;
Rect<1> rect_scale, rect_bias;
rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_scale = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
const float *input_ptr = acc_input.ptr(rect_input.lo);
float *output_ptr = acc_output.ptr(rect_output.lo);
const float *scale_ptr = acc_scale.ptr(rect_scale.lo);
const float *bias_ptr = acc_bias.ptr(rect_bias.lo);
hipEvent_t t_start, t_end;
if (bm->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
coord_t numChannels = bm->inputs[0].pdim[2];
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(numChannels)), dim3(CUDA_NUM_THREADS), 0, 0, m->runningMean, numChannels, 0.0f);
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(numChannels)), dim3(CUDA_NUM_THREADS), 0, 0, m->runningVar, numChannels, 0.0f);
checkCUDNN(cudnnBatchNormalizationForwardTraining(
m->handle.dnn, m->mode, &alpha, &beta, m->inputTensor, input_ptr,
m->outputTensor, output_ptr, m->biasTensor, scale_ptr, bias_ptr,
1.0, m->runningMean, m->runningVar, CUDNN_BN_MIN_EPSILON,
m->saveMean, m->saveVar));
if (bm->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("BatchNorm forward time (BF) = %.2fms\n", elapsed);
}
#endif
}
__host__
void BatchNorm::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(BATCHNORM_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap);
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
/*
regions[0](I): input
regions[1](I/O): input_grad
regions[2](I): output
regions[3](I/O): output_grad
regions[4](I): scale
regions[5](I/O): scale_grad
regions[6](I/O): bias_grad
*/
__host__
void BatchNorm::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 7);
assert(task->regions.size() == 7);
float alpha = 1.0f;
//float beta = 0.0f;
const BatchNorm* bm = (BatchNorm*) task->args;
const BatchNormMeta* m = *((BatchNormMeta**) task->local_args);
const AccessorRO<float, 4> acc_input(regions[0], FID_DATA);
const AccessorRW<float, 4> acc_input_grad(regions[1], FID_DATA);
const AccessorRO<float, 4> acc_output(regions[2], FID_DATA);
const AccessorRW<float, 4> acc_output_grad(regions[3], FID_DATA);
const AccessorRO<float, 1> acc_scale(regions[4], FID_DATA);
const AccessorRW<float, 1> acc_scale_grad(regions[5], FID_DATA);
const AccessorRW<float, 1> acc_bias_grad(regions[6], FID_DATA);
Rect<4> rect_input, rect_input_grad, rect_output, rect_output_grad;
Rect<1> rect_scale, rect_scale_grad, rect_bias_grad;
rect_input =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_input_grad =
runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_output =
runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_output_grad =
runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
rect_scale =
runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space());
rect_scale_grad =
runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space());
rect_bias_grad =
runtime->get_index_space_domain(ctx, task->regions[6].region.get_index_space());
// make sure all regions are dense
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
assert(acc_input_grad.accessor.is_dense_arbitrary(rect_input_grad));
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
assert(acc_output_grad.accessor.is_dense_arbitrary(rect_output_grad));
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_scale_grad.accessor.is_dense_arbitrary(rect_scale_grad));
assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad));
const float *input_ptr = acc_input.ptr(rect_input.lo);
float *input_grad_ptr = acc_input_grad.ptr(rect_input_grad.lo);
const float *output_ptr = acc_output.ptr(rect_output.lo);
float *output_grad_ptr = acc_output_grad.ptr(rect_output_grad.lo);
const float *scale_ptr = acc_scale.ptr(rect_scale.lo);
float *scale_grad_ptr = acc_scale_grad.ptr(rect_scale_grad.lo);
float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo);
hipEvent_t t_start, t_end;
if (bm->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
if (m->relu) {
int n = rect_output.volume();
hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, 0, output_grad_ptr, output_ptr, n);
}
checkCUDNN(cudnnBatchNormalizationBackward(
m->handle.dnn, m->mode, &alpha, &alpha, &alpha, &alpha,
m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr,
m->inputTensor, input_grad_ptr, m->biasTensor, scale_ptr,
scale_grad_ptr, bias_grad_ptr, CUDNN_BN_MIN_EPSILON,
m->saveMean, m->saveVar));
if (bm->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("BatchNorm backward time = %.2fms\n", elapsed);
}
#endif
}
__host__
void BatchNorm::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(BATCHNORM_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap);
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): input_grad (we only need grad tensors)
launcher.add_region_requirement(
RegionRequirement(inputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
// regions[2](I): output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I): filter
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(4, FID_DATA);
// regions[5](I/O): filter_grad
launcher.add_region_requirement(
RegionRequirement(weights[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[0].region_grad));
launcher.add_field(5, FID_DATA);
// regions[6](I/O): bias_grad
launcher.add_region_requirement(
RegionRequirement(weights[1].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[1].region_grad));
launcher.add_field(6, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
}
__host__
void BatchNorm::update(const FFModel& ff)
{
//FIXME: we didn't sync batch norm parameters for now
}
| bffb0727295235cf83b2e3132c19d438073acd09.cu | /* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::batch_norm(std::string name, Tensor input, bool relu)
{
assert(input.numDim == 4); //Only support 4D BN for now
IndexSpaceT<4> task_is;
BatchNorm *bn = new BatchNorm(name, config, input, task_is, relu);
layers.push_back(bn);
return bn->outputs[0];
}
/*
locals[0] = scale
locals[1] = bias
*/
BatchNorm::BatchNorm(std::string _name, FFConfig _config,
Tensor _input, IndexSpaceT<4> _task_is,
bool _relu)
: Op(_name, _input), relu(_relu), profiling(_config.profiling)
{
Context ctx = _config.lg_ctx;
HighLevelRuntime* runtime = _config.lg_hlr;
Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is);
num_replica = part_rect.volume();
// Create output tensor
int output_w = _input.adim[0];
int output_h = _input.adim[1];
int output_c = _input.adim[2];
int output_n = _input.adim[3];
int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1;
int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1;
int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1;
FieldSpace fs = _config.field_space;
Rect<4> output_rect(Point<4>(0, 0, 0, 0),
Point<4>(output_w-1, output_h-1, output_c-1, output_n-1));
IndexSpaceT<4> output_is = runtime->create_index_space(ctx, output_rect);
LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs);
LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs);
int extent_w = (output_w + num_par_w - 1) / num_par_w;
int extent_h = (output_h + num_par_h - 1) / num_par_h;
int extent_c = output_c / num_par_c;
int extent_n = output_n / num_par_n;
assert(output_c % num_par_c == 0);
assert(output_n % num_par_n == 0);
Rect<4> ext(Point<4>(0, 0, 0, 0),
Point<4>(extent_w-1, extent_h-1, extent_c-1, extent_n-1));
Transform<4, 4, coord_t> trans;
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++)
trans[i][j] = 0;
trans[0][0] = extent_w;
trans[1][1] = extent_h;
trans[2][2] = extent_c;
trans[3][3] = extent_n;
IndexPartition output_ip =
runtime->create_partition_by_restriction(ctx, output_is, task_is, trans, ext);
assert(runtime->is_index_partition_disjoint(ctx, output_ip));
assert(runtime->is_index_partition_complete(ctx, output_ip));
LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip);
LogicalPartition output_grad_lp =
runtime->get_logical_partition(ctx, output_grad_lr, output_ip);
int bias_nc = num_replica * _input.adim[2]; /*input_channels*/
Rect<1, coord_t> bias_grad_rect(0, bias_nc - 1);
Rect<1, coord_t> bias_rect(0, _input.adim[2] - 1);
IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect);
IndexSpaceT<1> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect);
LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion scale_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion bias_grad_lr =
runtime->create_logical_region(ctx, bias_grad_is, fs);
LogicalRegion scale_grad_lr =
runtime->create_logical_region(ctx, bias_grad_is, fs);
IndexPartition bias_grad_ip =
runtime->create_equal_partition(ctx, bias_grad_is, task_is);
LogicalPartition bias_grad_lp =
runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip);
LogicalPartition scale_grad_lp =
runtime->get_logical_partition(ctx, scale_grad_lr, bias_grad_ip);
Parameter scale_tensor, bias_tensor;
scale_tensor.region = scale_lr;
scale_tensor.region_grad = scale_grad_lr;
scale_tensor.part = LogicalPartition::NO_PART;
scale_tensor.part_grad = scale_grad_lp;
weights[0] = scale_tensor;
bias_tensor.region = bias_lr;
bias_tensor.region_grad = bias_grad_lr;
bias_tensor.part = LogicalPartition::NO_PART;
bias_tensor.part_grad = bias_grad_lp;
weights[1] = bias_tensor;
numWeights = 2;
outputs[0] = _input;
outputs[0].region = output_lr;
outputs[0].part = output_lp;
outputs[0].region_grad = output_grad_lr;
outputs[0].part_grad = output_grad_lp;
printf("Create bn layer: output(%d %d %d %d)\n",
outputs[0].adim[3], outputs[0].adim[2], outputs[0].adim[1], outputs[0].adim[0]);
input_lps[0] = _input.part;
}
/*
regions[0]: input
regions[1]: output
regions[2](I): scale
regions[3](I): bias
*/
__host__
OpMeta* BatchNorm::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const BatchNorm* bm = (BatchNorm*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
const AccessorRO<float, 4> acc_input(regions[0], FID_DATA);
const AccessorWO<float, 4> acc_output(regions[1], FID_DATA);
const AccessorRO<float, 1> acc_scale(regions[2], FID_DATA);
const AccessorRO<float, 1> acc_bias(regions[3], FID_DATA);
Rect<1> rect_scale, rect_bias;
Rect<4> rect_input, rect_output;
rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_scale = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
const float *input_ptr = acc_input.ptr(rect_input.lo);
float *output_ptr = acc_output.ptr(rect_output.lo);
const float *scale_ptr = acc_scale.ptr(rect_scale.lo);
const float *bias_ptr = acc_bias.ptr(rect_bias.lo);
BatchNormMeta* m = new BatchNormMeta(handle);
#ifndef DISABLE_COMPUTATION
m->relu = bm->relu;
m->mode = CUDNN_BATCHNORM_SPATIAL;
#if CUDNN_VERSION >= 7000
m->mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#endif
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->biasTensor));
assert(rect_input == rect_output);
int input_w = rect_input.hi[0] - rect_input.lo[0] + 1;
int input_h = rect_input.hi[1] - rect_input.lo[1] + 1;
int channel = bm->inputs[0].pdim[2];
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
bm->inputs[0].pdim[3],
channel, input_h, input_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
bm->inputs[0].pdim[3],
channel, input_h, input_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1, channel, 1, 1));
//float *runningMean, *runningVar, *saveMean, *saveVar;
checkCUDA(cudaMalloc(&m->runningMean, sizeof(float) * channel));
checkCUDA(cudaMalloc(&m->runningVar, sizeof(float) * channel));
checkCUDA(cudaMalloc(&m->saveMean, sizeof(float) * channel));
checkCUDA(cudaMalloc(&m->saveVar, sizeof(float) * channel));
if (m->relu) {
checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_PROPAGATE_NAN, 0.0));
}
#endif
return m;
}
/*
regions[0](O): scale, initilized to ones
regions[1](O): bias, initilized to zeros
*/
__host__
void BatchNorm::init_para_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const BatchNorm* bm = (BatchNorm*) task->args;
const AccessorWO<float, 1> acc_scale(regions[0], FID_DATA);
const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA);
Rect<1> rect_scale, rect_bias;
rect_scale = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
float *scale_ptr = acc_scale.ptr(rect_scale.lo);
float *bias_ptr = acc_bias.ptr(rect_bias.lo);
// init kernel and bias
#ifdef PARAMETER_ALL_ONES
ones_kernel<<<GET_BLOCKS(rect_scale.volume()), CUDA_NUM_THREADS>>>(
scale_ptr, rect_scale.volume());
ones_kernel<<<GET_BLOCKS(rect_bias.volume()), CUDA_NUM_THREADS>>>(
bias_ptr, rect_bias.volume());
#else
//cudaStream_t stream;
//checkCUDA(cudaStreamCreate(&stream));
//curandGenerator_t genGPU;
//curandCreateGenerator(&genGPU, CURAND_RNG_PSEUDO_DEFAULT);
//curandSetStream(genGPU, stream);
//curandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL);
//curandGenerateUniform(genGPU, scale_ptr, rect_scale.volume());
assign_kernel<<<GET_BLOCKS(rect_scale.volume()), CUDA_NUM_THREADS>>>(
scale_ptr, rect_scale.volume(), 1.0f);
assign_kernel<<<GET_BLOCKS(rect_bias.volume()), CUDA_NUM_THREADS>>>(
bias_ptr, rect_bias.volume(), 0.0f);
//curandDestroyGenerator(genGPU);
#endif
}
__host__
void BatchNorm::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
// First we initialize the scale and bias parameters
{
TaskLauncher para_launcher(BATCHNORM_INIT_PARA_TASK_ID, TaskArgument(NULL, 0));
para_launcher.add_region_requirement(
RegionRequirement(weights[0].region, WRITE_DISCARD, EXCLUSIVE, weights[0].region));
para_launcher.add_field(0, FID_DATA);
para_launcher.add_region_requirement(
RegionRequirement(weights[1].region, WRITE_DISCARD, EXCLUSIVE, weights[1].region));
para_launcher.add_field(1, FID_DATA);
runtime->execute_task(ctx, para_launcher);
}
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher init_launcher(BATCHNORM_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap);
init_launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
init_launcher.add_field(0, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
init_launcher.add_field(1, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
init_launcher.add_field(2, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
init_launcher.add_field(3, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, init_launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/*
regions[0](I): input
regions[1](O): ouptut
regions[2](I): scale
regions[3](I): bias
*/
__host__
void BatchNorm::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 4);
assert(task->regions.size() == 4);
float alpha = 1.0f, beta = 0.0f;
const BatchNorm* bm = (BatchNorm*) task->args;
const BatchNormMeta* m = *((BatchNormMeta**) task->local_args);
const AccessorRO<float, 4> acc_input(regions[0], FID_DATA);
const AccessorWO<float, 4> acc_output(regions[1], FID_DATA);
const AccessorRO<float, 1> acc_scale(regions[2], FID_DATA);
const AccessorRO<float, 1> acc_bias(regions[3], FID_DATA);
Rect<4> rect_input, rect_output;
Rect<1> rect_scale, rect_bias;
rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_scale = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
const float *input_ptr = acc_input.ptr(rect_input.lo);
float *output_ptr = acc_output.ptr(rect_output.lo);
const float *scale_ptr = acc_scale.ptr(rect_scale.lo);
const float *bias_ptr = acc_bias.ptr(rect_bias.lo);
cudaEvent_t t_start, t_end;
if (bm->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
coord_t numChannels = bm->inputs[0].pdim[2];
assign_kernel<<<GET_BLOCKS(numChannels), CUDA_NUM_THREADS>>>(m->runningMean, numChannels, 0.0f);
assign_kernel<<<GET_BLOCKS(numChannels), CUDA_NUM_THREADS>>>(m->runningVar, numChannels, 0.0f);
checkCUDNN(cudnnBatchNormalizationForwardTraining(
m->handle.dnn, m->mode, &alpha, &beta, m->inputTensor, input_ptr,
m->outputTensor, output_ptr, m->biasTensor, scale_ptr, bias_ptr,
1.0, m->runningMean, m->runningVar, CUDNN_BN_MIN_EPSILON,
m->saveMean, m->saveVar));
if (bm->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("BatchNorm forward time (BF) = %.2fms\n", elapsed);
}
#endif
}
__host__
void BatchNorm::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(BATCHNORM_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap);
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
/*
regions[0](I): input
regions[1](I/O): input_grad
regions[2](I): output
regions[3](I/O): output_grad
regions[4](I): scale
regions[5](I/O): scale_grad
regions[6](I/O): bias_grad
*/
__host__
void BatchNorm::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 7);
assert(task->regions.size() == 7);
float alpha = 1.0f;
//float beta = 0.0f;
const BatchNorm* bm = (BatchNorm*) task->args;
const BatchNormMeta* m = *((BatchNormMeta**) task->local_args);
const AccessorRO<float, 4> acc_input(regions[0], FID_DATA);
const AccessorRW<float, 4> acc_input_grad(regions[1], FID_DATA);
const AccessorRO<float, 4> acc_output(regions[2], FID_DATA);
const AccessorRW<float, 4> acc_output_grad(regions[3], FID_DATA);
const AccessorRO<float, 1> acc_scale(regions[4], FID_DATA);
const AccessorRW<float, 1> acc_scale_grad(regions[5], FID_DATA);
const AccessorRW<float, 1> acc_bias_grad(regions[6], FID_DATA);
Rect<4> rect_input, rect_input_grad, rect_output, rect_output_grad;
Rect<1> rect_scale, rect_scale_grad, rect_bias_grad;
rect_input =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_input_grad =
runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_output =
runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_output_grad =
runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
rect_scale =
runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space());
rect_scale_grad =
runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space());
rect_bias_grad =
runtime->get_index_space_domain(ctx, task->regions[6].region.get_index_space());
// make sure all regions are dense
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
assert(acc_input_grad.accessor.is_dense_arbitrary(rect_input_grad));
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
assert(acc_output_grad.accessor.is_dense_arbitrary(rect_output_grad));
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_scale_grad.accessor.is_dense_arbitrary(rect_scale_grad));
assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad));
const float *input_ptr = acc_input.ptr(rect_input.lo);
float *input_grad_ptr = acc_input_grad.ptr(rect_input_grad.lo);
const float *output_ptr = acc_output.ptr(rect_output.lo);
float *output_grad_ptr = acc_output_grad.ptr(rect_output_grad.lo);
const float *scale_ptr = acc_scale.ptr(rect_scale.lo);
float *scale_grad_ptr = acc_scale_grad.ptr(rect_scale_grad.lo);
float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo);
cudaEvent_t t_start, t_end;
if (bm->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
if (m->relu) {
int n = rect_output.volume();
reluBackward<<<GET_BLOCKS(n), CUDA_NUM_THREADS>>>(output_grad_ptr, output_ptr, n);
}
checkCUDNN(cudnnBatchNormalizationBackward(
m->handle.dnn, m->mode, &alpha, &alpha, &alpha, &alpha,
m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr,
m->inputTensor, input_grad_ptr, m->biasTensor, scale_ptr,
scale_grad_ptr, bias_grad_ptr, CUDNN_BN_MIN_EPSILON,
m->saveMean, m->saveVar));
if (bm->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("BatchNorm backward time = %.2fms\n", elapsed);
}
#endif
}
__host__
void BatchNorm::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(BATCHNORM_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap);
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): input_grad (we only need grad tensors)
launcher.add_region_requirement(
RegionRequirement(inputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
// regions[2](I): output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I): filter
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(4, FID_DATA);
// regions[5](I/O): filter_grad
launcher.add_region_requirement(
RegionRequirement(weights[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[0].region_grad));
launcher.add_field(5, FID_DATA);
// regions[6](I/O): bias_grad
launcher.add_region_requirement(
RegionRequirement(weights[1].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[1].region_grad));
launcher.add_field(6, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
}
__host__
void BatchNorm::update(const FFModel& ff)
{
//FIXME: we didn't sync batch norm parameters for now
}
|
1998ce86ec3e69bab192679881fe0be187ce0ce9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include "../../common/para.h"
#include "../../common/para.cuh"
#include "kernel_hip.cuh"
#include "runtime.cuh"
__global__ void masterKernel(volatile int *done, volatile int *totalScheTasks, volatile gTaskStruct *gTaskPool){
int warpIdxx = (threadIdx.x/warpSize);
__shared__ volatile int barID; // the ID for bar.sync
__shared__ volatile int smStartIndx; // the start pointer for free memory region of shared memory
__shared__ volatile int doneCtr[BP_NUM]; // number of warp in a task
__shared__ volatile gWarpStruct warpPoolDev[BP_NUM]; // warpPool
int taskPointer; //local pointer of task table
int taskStartP; //global pointer of task table
__shared__ volatile int barIDArray[syncNum]; // 16 barriers
__shared__ volatile int sharedTree[SH_TREE_SIZE]; //shared mem data structure
__shared__ volatile int warpCtr; // warp counter
__shared__ volatile int warpId;
__shared__ volatile int doneBarId; // thread block counter
__shared__ volatile int exit;
extern __shared__ volatile int shared_mem[];
int i;
int threadDone;
// Init warp pool
if((threadIdx.x & 0x1f) != 0)
warpPoolDev[(threadIdx.x & 0x1f)].exec = 0;
else
warpPoolDev[(threadIdx.x & 0x1f)].exec = -1;
barIDArray[(threadIdx.x & 0x0f)] = 0;
doneCtr[(threadIdx.x & 0x1f)] = 0;
taskPointer = 0;
exit = 0;
__threadfence_block();
if(threadIdx.x < warpSize){
while(!(*done)){
taskStartP = (taskPointer * BK_NUM + blockIdx.x);
__threadfence_block();
if(gTaskPool[taskStartP].readyId != -1 && doneCtr[taskPointer] == 0){
if(gTaskPool[gTaskPool[taskStartP].readyId].done == 1){
barID = -1;
smStartIndx = -1;
doneCtr[taskPointer] = gTaskPool[gTaskPool[taskStartP].readyId].thread*
gTaskPool[gTaskPool[taskStartP].readyId].block/warpSize;
//parallel scheduling
if(gTaskPool[gTaskPool[taskStartP].readyId].sync == 0){
warpCtr = doneCtr[taskPointer];
warpId = 0;
while(1){
threadDone = 1;
if(threadIdx.x > 0) {
threadDone = 0;
if(warpPoolDev[threadIdx.x].exec == 0){
if(atomicSub((int*)&warpCtr, 1) > 0){
warpPoolDev[threadIdx.x].warpId = atomicAdd((int*)&warpId, 1)*warpSize;
warpPoolDev[threadIdx.x].bufferNum = gTaskPool[taskStartP].readyId; // global pointer of task table
warpPoolDev[threadIdx.x].SMindex = smStartIndx; // shared mem. index
warpPoolDev[threadIdx.x].barId = barID; // index of threadblock
warpPoolDev[threadIdx.x].threadNum = gTaskPool[gTaskPool[taskStartP].readyId].thread; // num. of thread
warpPoolDev[threadIdx.x].taskId = taskPointer; // local pointer of task table
__threadfence_block(); // To make sure the exec. is worked after fence
warpPoolDev[threadIdx.x].exec = 1;
__threadfence_block(); //
} // End atomic
} // End if warpPoolDev
} // End if threadIdx
if(warpCtr <= 0) threadDone = 1;
if(__all(threadDone == 1) != 0){
break;
}
}// End while(1)
} //End sync != 0
if(gTaskPool[gTaskPool[taskStartP].readyId].sync != 0 && gTaskPool[gTaskPool[taskStartP].readyId].sharemem == 0){ // sync bit is on
for(i = 0; i < gTaskPool[gTaskPool[taskStartP].readyId].block; i++){ // Schedule block by block
// get barId
doneBarId = 1;
while(1){
threadDone = 1;
if(threadIdx.x < syncNum){
threadDone = 0;
if(barIDArray[threadIdx.x] == 0){
if(atomicSub((int*)&doneBarId, 1) > 0){
barIDArray[threadIdx.x] = gTaskPool[gTaskPool[taskStartP].readyId].thread/warpSize; //num. of thread in one block
barID = threadIdx.x;
__threadfence_block();
} // End atomicSub
} // End if barIDArray
} // End if threadIdx
if(doneBarId <= 0) threadDone = 1;
if(__all(threadDone == 1) != 0){
break;
} // End if all
} // End while 1
// parallel warp scheduling
warpCtr = gTaskPool[gTaskPool[taskStartP].readyId].thread/warpSize;
warpId = i*(gTaskPool[gTaskPool[taskStartP].readyId].thread/warpSize);
while(1){
threadDone = 1;
if(threadIdx.x > 0) {
threadDone = 0;
if(warpPoolDev[threadIdx.x].exec == 0){
if(atomicSub((int*)&warpCtr, 1) > 0){
warpPoolDev[threadIdx.x].warpId = atomicAdd((int*)&warpId, 1)*warpSize;
warpPoolDev[threadIdx.x].bufferNum = gTaskPool[taskStartP].readyId; // global pointer of task table
warpPoolDev[threadIdx.x].SMindex = smStartIndx; // shared mem. index
warpPoolDev[threadIdx.x].barId = barID; // index of threadblock
//printf("after barId:%d\n", barID);
warpPoolDev[threadIdx.x].threadNum = gTaskPool[gTaskPool[taskStartP].readyId].thread; // num. of thread
warpPoolDev[threadIdx.x].taskId = taskPointer; // local pointer of task table
__threadfence_block(); // To make sure the exec. is worked after fence
warpPoolDev[threadIdx.x].exec = 1;
__threadfence_block(); //
} // End atomic
} // End if warpPoolDev
} // End if threadIdx
if(warpCtr <= 0) threadDone = 1;
if(__all(threadDone == 1) != 0){
break;
}
}// End while(1)
} // End for block
} // End if sync
gTaskPool[gTaskPool[taskStartP].readyId].done = 0; // reset flag whenever task scheduling has been done
gTaskPool[taskStartP].readyId = -1;
} // End if ready flag
}
taskPointer++; // renew the local pointer of task table
if(taskPointer == BP_NUM)
taskPointer = 0;
}// End while done
exit = 1;
__threadfence_block();
}// End if thread < 32
else
{
while(!exit)
{
if(warpPoolDev[warpIdxx].exec == 1){
// kernel running here
FBCore((float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[0],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[1],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[2],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[3],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[4],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[5],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[6],
(int)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[7],
(int)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[8],
warpPoolDev[warpIdxx].warpId, warpPoolDev[warpIdxx].barId);
if((threadIdx.x & 0x1f) == 0){
// release barId
if(gTaskPool[warpPoolDev[warpIdxx].bufferNum].sync != 0){
atomicSub((int*)&barIDArray[warpPoolDev[warpIdxx].barId], 1);
}
if(atomicSub((int*)&doneCtr[warpPoolDev[warpIdxx].taskId], 1) == 1){ // when all warps in a task have been done
__threadfence_system();
gTaskPool[warpPoolDev[warpIdxx].bufferNum].ready = 0; //unset the ready flag
atomicAdd((int*)totalScheTasks,1); //update the global task counter
}
warpPoolDev[warpIdxx].exec = 0;
__threadfence_block();
} // End if exec
} // End if threadIdx.x
} // End while done
} // End else
}
| 1998ce86ec3e69bab192679881fe0be187ce0ce9.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include "../../common/para.h"
#include "../../common/para.cuh"
#include "kernel.cuh"
#include "runtime.cuh"
__global__ void masterKernel(volatile int *done, volatile int *totalScheTasks, volatile gTaskStruct *gTaskPool){
int warpIdxx = (threadIdx.x/warpSize);
__shared__ volatile int barID; // the ID for bar.sync
__shared__ volatile int smStartIndx; // the start pointer for free memory region of shared memory
__shared__ volatile int doneCtr[BP_NUM]; // number of warp in a task
__shared__ volatile gWarpStruct warpPoolDev[BP_NUM]; // warpPool
int taskPointer; //local pointer of task table
int taskStartP; //global pointer of task table
__shared__ volatile int barIDArray[syncNum]; // 16 barriers
__shared__ volatile int sharedTree[SH_TREE_SIZE]; //shared mem data structure
__shared__ volatile int warpCtr; // warp counter
__shared__ volatile int warpId;
__shared__ volatile int doneBarId; // thread block counter
__shared__ volatile int exit;
extern __shared__ volatile int shared_mem[];
int i;
int threadDone;
// Init warp pool
if((threadIdx.x & 0x1f) != 0)
warpPoolDev[(threadIdx.x & 0x1f)].exec = 0;
else
warpPoolDev[(threadIdx.x & 0x1f)].exec = -1;
barIDArray[(threadIdx.x & 0x0f)] = 0;
doneCtr[(threadIdx.x & 0x1f)] = 0;
taskPointer = 0;
exit = 0;
__threadfence_block();
if(threadIdx.x < warpSize){
while(!(*done)){
taskStartP = (taskPointer * BK_NUM + blockIdx.x);
__threadfence_block();
if(gTaskPool[taskStartP].readyId != -1 && doneCtr[taskPointer] == 0){
if(gTaskPool[gTaskPool[taskStartP].readyId].done == 1){
barID = -1;
smStartIndx = -1;
doneCtr[taskPointer] = gTaskPool[gTaskPool[taskStartP].readyId].thread*
gTaskPool[gTaskPool[taskStartP].readyId].block/warpSize;
//parallel scheduling
if(gTaskPool[gTaskPool[taskStartP].readyId].sync == 0){
warpCtr = doneCtr[taskPointer];
warpId = 0;
while(1){
threadDone = 1;
if(threadIdx.x > 0) {
threadDone = 0;
if(warpPoolDev[threadIdx.x].exec == 0){
if(atomicSub((int*)&warpCtr, 1) > 0){
warpPoolDev[threadIdx.x].warpId = atomicAdd((int*)&warpId, 1)*warpSize;
warpPoolDev[threadIdx.x].bufferNum = gTaskPool[taskStartP].readyId; // global pointer of task table
warpPoolDev[threadIdx.x].SMindex = smStartIndx; // shared mem. index
warpPoolDev[threadIdx.x].barId = barID; // index of threadblock
warpPoolDev[threadIdx.x].threadNum = gTaskPool[gTaskPool[taskStartP].readyId].thread; // num. of thread
warpPoolDev[threadIdx.x].taskId = taskPointer; // local pointer of task table
__threadfence_block(); // To make sure the exec. is worked after fence
warpPoolDev[threadIdx.x].exec = 1;
__threadfence_block(); //
} // End atomic
} // End if warpPoolDev
} // End if threadIdx
if(warpCtr <= 0) threadDone = 1;
if(__all(threadDone == 1) != 0){
break;
}
}// End while(1)
} //End sync != 0
if(gTaskPool[gTaskPool[taskStartP].readyId].sync != 0 && gTaskPool[gTaskPool[taskStartP].readyId].sharemem == 0){ // sync bit is on
for(i = 0; i < gTaskPool[gTaskPool[taskStartP].readyId].block; i++){ // Schedule block by block
// get barId
doneBarId = 1;
while(1){
threadDone = 1;
if(threadIdx.x < syncNum){
threadDone = 0;
if(barIDArray[threadIdx.x] == 0){
if(atomicSub((int*)&doneBarId, 1) > 0){
barIDArray[threadIdx.x] = gTaskPool[gTaskPool[taskStartP].readyId].thread/warpSize; //num. of thread in one block
barID = threadIdx.x;
__threadfence_block();
} // End atomicSub
} // End if barIDArray
} // End if threadIdx
if(doneBarId <= 0) threadDone = 1;
if(__all(threadDone == 1) != 0){
break;
} // End if all
} // End while 1
// parallel warp scheduling
warpCtr = gTaskPool[gTaskPool[taskStartP].readyId].thread/warpSize;
warpId = i*(gTaskPool[gTaskPool[taskStartP].readyId].thread/warpSize);
while(1){
threadDone = 1;
if(threadIdx.x > 0) {
threadDone = 0;
if(warpPoolDev[threadIdx.x].exec == 0){
if(atomicSub((int*)&warpCtr, 1) > 0){
warpPoolDev[threadIdx.x].warpId = atomicAdd((int*)&warpId, 1)*warpSize;
warpPoolDev[threadIdx.x].bufferNum = gTaskPool[taskStartP].readyId; // global pointer of task table
warpPoolDev[threadIdx.x].SMindex = smStartIndx; // shared mem. index
warpPoolDev[threadIdx.x].barId = barID; // index of threadblock
//printf("after barId:%d\n", barID);
warpPoolDev[threadIdx.x].threadNum = gTaskPool[gTaskPool[taskStartP].readyId].thread; // num. of thread
warpPoolDev[threadIdx.x].taskId = taskPointer; // local pointer of task table
__threadfence_block(); // To make sure the exec. is worked after fence
warpPoolDev[threadIdx.x].exec = 1;
__threadfence_block(); //
} // End atomic
} // End if warpPoolDev
} // End if threadIdx
if(warpCtr <= 0) threadDone = 1;
if(__all(threadDone == 1) != 0){
break;
}
}// End while(1)
} // End for block
} // End if sync
gTaskPool[gTaskPool[taskStartP].readyId].done = 0; // reset flag whenever task scheduling has been done
gTaskPool[taskStartP].readyId = -1;
} // End if ready flag
}
taskPointer++; // renew the local pointer of task table
if(taskPointer == BP_NUM)
taskPointer = 0;
}// End while done
exit = 1;
__threadfence_block();
}// End if thread < 32
else
{
while(!exit)
{
if(warpPoolDev[warpIdxx].exec == 1){
// kernel running here
FBCore((float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[0],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[1],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[2],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[3],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[4],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[5],
(float*)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[6],
(int)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[7],
(int)gTaskPool[warpPoolDev[warpIdxx].bufferNum].para[8],
warpPoolDev[warpIdxx].warpId, warpPoolDev[warpIdxx].barId);
if((threadIdx.x & 0x1f) == 0){
// release barId
if(gTaskPool[warpPoolDev[warpIdxx].bufferNum].sync != 0){
atomicSub((int*)&barIDArray[warpPoolDev[warpIdxx].barId], 1);
}
if(atomicSub((int*)&doneCtr[warpPoolDev[warpIdxx].taskId], 1) == 1){ // when all warps in a task have been done
__threadfence_system();
gTaskPool[warpPoolDev[warpIdxx].bufferNum].ready = 0; //unset the ready flag
atomicAdd((int*)totalScheTasks,1); //update the global task counter
}
warpPoolDev[warpIdxx].exec = 0;
__threadfence_block();
} // End if exec
} // End if threadIdx.x
} // End while done
} // End else
}
|
98e0998fae48b7d760678b1a0241f6391a59bbd9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@precisions normal d
*/
#include "common_magma.h"
#include "commonblas_d.h"
static __device__ void daxpy(double a,double *b, double *c) {
c[0] += a * b[0];
c[1] += a * b[1];
c[2] += a * b[2];
c[3] += a * b[3];
c[4] += a * b[4];
c[5] += a * b[5];
c[6] += a * b[6];
c[7] += a * b[7];
c[8] += a * b[8];
c[9] += a * b[9];
c[10] += a * b[10];
c[11] += a * b[11];
c[12] += a * b[12];
c[13] += a * b[13];
c[14] += a * b[14];
c[15] += a * b[15];
}
__global__ void
dgemm_kernel_N_T_64_16_4_16_4(double *C, const double *A, const double *B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta)
{
/* -- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose:
========
This routine computes
C = alpha* A*B^T + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=4 nthd_x=16 nthd_y=4
This code should run for any matrix size.
=============================================================== */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y *16;
const int idt = ty * 16 + tx;
if( iby + tx >=n )
B+= iby+0;
else
B+= iby+tx;
/*
Taking care of boundary cases where K<4.
*/
if( ty >=k )
B+= __mul24( 0,ldb);
else
B+= __mul24( ty,ldb);
if( ibx + idt >= m )
A += ibx + 0 ;
else
A += ibx + idt;
int s2=lda, s3=2*lda, s4=3*lda ;
switch (k){
case 1:
s2=0; s3=0;s4=0 ;
break ;
case 2:
s2=lda; s3=0;s4=0 ;
break ;
case 3:
s2=lda; s3=2*lda;s4=0 ;
break ;
}
C += ibx +idt +__mul24( iby,ldc);
double Ap[4]={A[0], A[s2], A[s3], A[s4]};
double b=B[0];
const double *Bend = B + ldb*(k-k%4);
B+=4*ldb;
A+=4*lda;
__shared__ double Bb[4][16];
double Cb[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
if(k>7)
do {
double Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[s2];
Ap[2] = A[s3];
Ap[3] = A[s4];
b=B[0];
daxpy(Ab[0], &Bb[0][0], Cb);
daxpy(Ab[1], &Bb[1][0], Cb);
daxpy(Ab[2], &Bb[2][0], Cb);
daxpy(Ab[3], &Bb[3][0], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend);
if(k>3){
Bb[ty][tx]=b;
int k1 = k-k%4;
if( (k1+ty) >=k)
B-=4*ldb;
else
B-=0*ldb;
if( (k1+0) >= k ) {s2=0 ;s3=0*lda;s4=0;A-=4*lda;} else
if( (k1+1) >= k ) {s2=0 ;s3=0*lda;s4=0;A-=0*lda;} else
if( (k1+2) >= k ) {s2=lda;s3=0*lda;s4=0;A-=0*lda;} else
if( (k1+3) >= k ) {s2=lda;s3=2*lda;s4=0;A-=0*lda;}
__syncthreads();
b=B[0];
daxpy(Ap[0], &Bb[0][0], Cb); Ap[0] = A[0];
daxpy(Ap[1], &Bb[1][0], Cb); Ap[1] = A[s2];
daxpy(Ap[2], &Bb[2][0], Cb); Ap[2] = A[s3];
daxpy(Ap[3], &Bb[3][0], Cb); Ap[3] = A[s4];
}
k=k%4;
if ( k!=0){
__syncthreads();
Bb[ty][tx]=b;
__syncthreads();
for(int i=0;i<k;i++){
daxpy(Ap[i],&Bb[i][0], Cb);
}
}
if( (iby+16)>=n) {
lda = n-iby;
}
else{
lda = 16;
}
if( (ibx+idt) >= m )
lda = 0 ;
else lda = lda ;
switch(lda){
case 16:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc];
C[15*ldc] =alpha*Cb[15] + beta * C[15*ldc];
break;
case 15:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc];
break;
case 14:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
break;
case 13:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
break;
case 12:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
break;
case 11:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0] =alpha*Cb[0] + beta * C[0];
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_kernel_N_T_64_16_4_16_4(double *C,
const double *A,
const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta)
{
dim3 threads( 16, 4 );
dim3 grid(m/64+(m%64!=0),n/16+(n%16!=0));
hipLaunchKernelGGL(( dgemm_kernel_N_T_64_16_4_16_4), dim3(grid), dim3(threads), 0, magma_stream , C, A, B,
m, n, k,
lda, ldb, ldc,
alpha, beta);
}
| 98e0998fae48b7d760678b1a0241f6391a59bbd9.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@precisions normal d
*/
#include "common_magma.h"
#include "commonblas_d.h"
static __device__ void daxpy(double a,double *b, double *c) {
c[0] += a * b[0];
c[1] += a * b[1];
c[2] += a * b[2];
c[3] += a * b[3];
c[4] += a * b[4];
c[5] += a * b[5];
c[6] += a * b[6];
c[7] += a * b[7];
c[8] += a * b[8];
c[9] += a * b[9];
c[10] += a * b[10];
c[11] += a * b[11];
c[12] += a * b[12];
c[13] += a * b[13];
c[14] += a * b[14];
c[15] += a * b[15];
}
__global__ void
dgemm_kernel_N_T_64_16_4_16_4(double *C, const double *A, const double *B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta)
{
/* -- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose:
========
This routine computes
C = alpha* A*B^T + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=4 nthd_x=16 nthd_y=4
This code should run for any matrix size.
=============================================================== */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y *16;
const int idt = ty * 16 + tx;
if( iby + tx >=n )
B+= iby+0;
else
B+= iby+tx;
/*
Taking care of boundary cases where K<4.
*/
if( ty >=k )
B+= __mul24( 0,ldb);
else
B+= __mul24( ty,ldb);
if( ibx + idt >= m )
A += ibx + 0 ;
else
A += ibx + idt;
int s2=lda, s3=2*lda, s4=3*lda ;
switch (k){
case 1:
s2=0; s3=0;s4=0 ;
break ;
case 2:
s2=lda; s3=0;s4=0 ;
break ;
case 3:
s2=lda; s3=2*lda;s4=0 ;
break ;
}
C += ibx +idt +__mul24( iby,ldc);
double Ap[4]={A[0], A[s2], A[s3], A[s4]};
double b=B[0];
const double *Bend = B + ldb*(k-k%4);
B+=4*ldb;
A+=4*lda;
__shared__ double Bb[4][16];
double Cb[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
if(k>7)
do {
double Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[s2];
Ap[2] = A[s3];
Ap[3] = A[s4];
b=B[0];
daxpy(Ab[0], &Bb[0][0], Cb);
daxpy(Ab[1], &Bb[1][0], Cb);
daxpy(Ab[2], &Bb[2][0], Cb);
daxpy(Ab[3], &Bb[3][0], Cb);
A+=4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend);
if(k>3){
Bb[ty][tx]=b;
int k1 = k-k%4;
if( (k1+ty) >=k)
B-=4*ldb;
else
B-=0*ldb;
if( (k1+0) >= k ) {s2=0 ;s3=0*lda;s4=0;A-=4*lda;} else
if( (k1+1) >= k ) {s2=0 ;s3=0*lda;s4=0;A-=0*lda;} else
if( (k1+2) >= k ) {s2=lda;s3=0*lda;s4=0;A-=0*lda;} else
if( (k1+3) >= k ) {s2=lda;s3=2*lda;s4=0;A-=0*lda;}
__syncthreads();
b=B[0];
daxpy(Ap[0], &Bb[0][0], Cb); Ap[0] = A[0];
daxpy(Ap[1], &Bb[1][0], Cb); Ap[1] = A[s2];
daxpy(Ap[2], &Bb[2][0], Cb); Ap[2] = A[s3];
daxpy(Ap[3], &Bb[3][0], Cb); Ap[3] = A[s4];
}
k=k%4;
if ( k!=0){
__syncthreads();
Bb[ty][tx]=b;
__syncthreads();
for(int i=0;i<k;i++){
daxpy(Ap[i],&Bb[i][0], Cb);
}
}
if( (iby+16)>=n) {
lda = n-iby;
}
else{
lda = 16;
}
if( (ibx+idt) >= m )
lda = 0 ;
else lda = lda ;
switch(lda){
case 16:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc];
C[15*ldc] =alpha*Cb[15] + beta * C[15*ldc];
break;
case 15:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc];
break;
case 14:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
break;
case 13:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
break;
case 12:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
break;
case 11:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0] =alpha*Cb[0] + beta * C[0];
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_kernel_N_T_64_16_4_16_4(double *C,
const double *A,
const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta)
{
dim3 threads( 16, 4 );
dim3 grid(m/64+(m%64!=0),n/16+(n%16!=0));
dgemm_kernel_N_T_64_16_4_16_4<<< grid, threads, 0, magma_stream >>>(C, A, B,
m, n, k,
lda, ldb, ldc,
alpha, beta);
}
|
38e605a2a5f997ff2887674b59f83ef7e8120bc2.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
namespace filter
{
template void linearColumn<float4, int4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| 38e605a2a5f997ff2887674b59f83ef7e8120bc2.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
namespace filter
{
template void linearColumn<float4, int4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
56ead294d62af20093b3fb9963a95699a553feb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <THH/THHAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <numeric>
#include <float.h>
using namespace at;
// this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1])
// so if l is l_0 l_1 ... l_(tl-1) then this looks up idx in
// l' = BLANK l_0 BLANK l_1 BLANK ... BLANK l_(tl-1) BLANK
// - note that no bound-checking is done
// - it is important to only call it witth idx == 0 if the target length is 0
// - __restrict__ impact to be measured, see
// https://devblogs.nvidia.com/cuda-pro-tip-optimize-pointer-aliasing/
template <typename target_t>
__device__ static inline int64_t get_target_prime(
const target_t* __restrict__ target,
int64_t offset,
int64_t stride,
int64_t idx,
int64_t BLANK) {
if (idx % 2 == 0) {
return BLANK;
} else {
return target[offset + stride * (idx / 2)];
}
}
template <typename scalar_t>
__device__ static inline scalar_t safe_log_add(scalar_t a, scalar_t b)
{
scalar_t m=((a > b) ? a : b);
if (m == -INFINITY)
m = 0;
return (::log(::exp(a-m) + ::exp(b-m)) + m);
}
// this kernel is a relatively straightforward implementation of the alpha calculation in the forward backward algorithm (section 4.1).
// A (minor) twist is that we are using log-calculations to enhance numerical stability (log_probs and log_alpha).
// In total it would be more efficient to compute the beta in the same kernel (e.g. cudnn does this). While the beta are not
// needed for the loss itself (just the grad), we can return log_alpha+log_beta (so same space as currently) and the overhead
// is small and the use-case for loss without grad is relatively limited.
// We parallelize by batch and target sequence. Empirically, it is faster to loop over the input (log probs) sequence and do
// target in parallel, even if it means more frequent __syncthreads.
// In contrast to the cuDNN implementation, we allow large target lengths. For this we need that all previous `s` have been
// computed when we start a new block_s. This is why we have our own for loop here.
template<typename scalar_t, typename target_t>
__global__ void
#if defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
#endif
ctc_loss_log_alpha_gpu_kernel(scalar_t* __restrict__ log_alpha_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
scalar_t* __restrict__ neg_log_likelihood_data,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t BLANK) {
constexpr scalar_t neginf = -INFINITY;
// bookkeeping
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
if (b >= batch_size)
return;
// first row (t=0), the three equations for alpha_1 above eq (6)
for (int64_t block_s = 0; block_s < 2*max_target_length+1; block_s += blockDim.x) {
int64_t s = threadIdx.x + block_s;
scalar_t la;
switch (s) {
case 0:
la = log_probs_data[lp_batch_offset + lp_char_stride * BLANK];
break;
case 1:
la = target_length == 0 ? neginf
: log_probs_data
[lp_batch_offset +
lp_char_stride *
get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
1,
BLANK)];
break;
default:
la = neginf;
}
if (s < 2*max_target_length+1)
log_alpha_data[la_batch_offset + /* la_input_stride * 0 */ + la_target_stride * s] = la;
}
for (int64_t block_s = 0; block_s < 2*max_target_length+1; block_s += blockDim.x) {
int64_t s = threadIdx.x + block_s;
// These two only depend on s, so we can cache them.
int64_t current_char; // l_s in eq (6)
bool have_three; // flag which of the two cases in eq (6) we have
if (s < 2 * target_length + 1 && target_length > 0) {
current_char = get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
s,
BLANK);
have_three =
((s > 1) &&
(get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
s - 2,
BLANK) != current_char));
} else {
current_char = BLANK;
have_three = false;
}
for (int64_t t=1; t < max_input_length; t++) {
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch
if ((t < input_length) && (s < 2 * target_length + 1)) {
// only for valid t, s. This is equation (6) and (7), la1, la2, la3 are the three summands,
// lamax is the maximum for the logsumexp trick.
scalar_t la1 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * s];
scalar_t lamax = la1;
scalar_t la2, la3;
if (s > 0) {
la2 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * (s-1)];
if (la2 > lamax)
lamax = la2;
} else {
la2 = neginf;
}
if (have_three) {
la3 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * (s-2)];
if (la3 > lamax)
lamax = la3;
} else {
la3 = neginf;
}
if (lamax == neginf) // when all are neginf. (then the whole thing is neginf, but we can pretend)
lamax = 0;
log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] = ::log(::exp(la1-lamax)+::exp(la2-lamax)+::exp(la3-lamax))+lamax
+ log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * current_char];
} else {
// otherwise we just set to neginf
if (s < 2*max_target_length+1)
log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] = neginf;
}
}
}
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch
// compute the loss (eq (8))
if (threadIdx.x == 0) {
scalar_t l1 = log_alpha_data[la_batch_offset + la_input_stride * (input_length-1) + la_target_stride * (target_length*2)];
scalar_t l2 = target_length > 0
? log_alpha_data
[la_batch_offset + la_input_stride * (input_length - 1) +
la_target_stride * (target_length * 2 - 1)]
: neginf;
scalar_t m = ((l1 > l2) ? l1 : l2);
m = ((m == neginf) ? 0 : m);
scalar_t log_likelihood = ::log(::exp(l1-m)+::exp(l2-m))+m;
neg_log_likelihood_data[b] = -log_likelihood;
}
}
// The forward computation. Lot's of admin and a call to the alpha kernel.
// Note: we do not check that the labels are in the valid range. As we use
// them for indexing in the kernels, you'll see memory errors when you
// pass corrupt labels.
// We support both a 2-dimensional tensor as targets (one set of targets in each row) and
// a 1-dimensional tensor where all targets are concatenated (and we use target_lengths
// to figure out where they begin).
// We return log_alpha (currently, might change to (log_alpha+log_beta) to be passed to the
// backward. The dispatch function will only return the loss.
template<typename scalar_t, ScalarType target_scalar_type>
std::tuple<Tensor, Tensor> ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK) {
// log_probs: input_len x batch_size x num_labels
// targets [int64]: batch_size x target_length OR sum(target_lengths)
CheckedFrom c = "ctc_loss_gpu";
using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type;
auto log_probs_arg = TensorArg(log_probs, "log_probs", 1);
auto targets_arg = TensorArg(targets, "targets", 2);
checkAllSameGPU(c, {log_probs_arg, targets_arg});
checkScalarType(c, targets_arg, target_scalar_type);
checkDim(c, log_probs_arg, 3);
checkDimRange(c, targets_arg, 1, 3);
int64_t batch_size = log_probs.size(1);
int64_t num_labels = log_probs.size(2);
TORCH_CHECK((0 <= BLANK) && (BLANK < num_labels), "blank must be in label range");
TORCH_CHECK(input_lengths.size() == batch_size, "input_lengths must be of size batch_size");
TORCH_CHECK(target_lengths.size() == batch_size, "target_lengths must be of size batch_size");
int64_t lp_input_stride = log_probs.stride(0);
int64_t lp_char_stride = log_probs.stride(2);
int64_t tg_target_stride;
int64_t max_target_length = 0;
auto tg_batch_offsets = at::empty({batch_size}, at::device(at::kCPU).dtype(at::kLong));
auto tg_batch_offsets_data = tg_batch_offsets.data_ptr<int64_t>();
if (targets.dim() == 1) { // concatenated targets
int64_t pos = 0;
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = pos;
pos += target_lengths[i];
if (max_target_length < target_lengths[i])
max_target_length = target_lengths[i];
}
tg_target_stride = targets.stride(0);
checkSize(c, targets_arg, 0, pos);
}
else { // batch x max_target_length
// dim is 2
int64_t tg_batch_stride = targets.stride(0);
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = i * tg_batch_stride;
if (max_target_length < target_lengths[i])
max_target_length = target_lengths[i];
}
tg_target_stride = targets.stride(1);
checkSize(c, targets_arg, 0, batch_size);
TORCH_CHECK(targets.size(1) >= max_target_length,
"Expected tensor to have size at least ", max_target_length, " at dimension 1, but got size ", targets.size(1), " for ", targets_arg,
" (while checking arguments for ", c, ")");
}
int64_t max_input_length = log_probs.size(0);
for (int64_t b = 0; b < batch_size; b++) {
TORCH_CHECK(input_lengths[b] <= max_input_length,
"Expected input_lengths to have value at most ", max_input_length, ", but got value ", input_lengths[b],
" (while checking arguments for ", c, ")");
}
auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong));
auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong));
tg_batch_offsets = tg_batch_offsets.cuda();
Tensor log_alpha = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options());
Tensor neg_log_likelihood = at::empty({batch_size}, log_probs.options());
// Very likely, we could be more clever here, e.g. learning (or genralizing and reusing) from SoftMax.cu...
//constexpr int max_threads = std::is_same<scalar_t, float>::value ? 256 : 128; // we need 72 or so 32 bit registers for double
constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so 32 bit registers for double
int threads_target = max_threads;
while (threads_target / 2 >= 2*max_target_length+1) {
threads_target /= 2;
}
int threads_batch = ::min(max_threads / threads_target, (int) batch_size);
dim3 block(threads_target, threads_batch);
dim3 grid((2*max_target_length+1 + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( ctc_loss_log_alpha_gpu_kernel<scalar_t, target_t>), dim3(grid), dim3(block), 0, stream,
log_alpha.data_ptr<scalar_t>(),
log_probs.data_ptr<scalar_t>(), input_lengths_t.data_ptr<int64_t>(), log_probs.size(0),
targets.data_ptr<target_t>(), target_lengths_t.data_ptr<int64_t>(), max_target_length,
neg_log_likelihood.data_ptr<scalar_t>(),
// char_frequency_tensor.data_ptr<int>(),
// char_es_tensor.data_ptr<int>(),
// num_labels,
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2),
tg_batch_offsets.data_ptr<int64_t>(), tg_target_stride,
batch_size, BLANK);
// TORCH_CUDA_KERNEL_LAUNCH_CHECK();
return std::make_tuple(neg_log_likelihood, log_alpha);
}
// The second (backward) half of the forward backward algorithm, (10) and (11). This is parallel to the
// alpha kernel above. (As mentioned above, it might make sense do the calculation in the alpha kernel.)
template<typename scalar_t, typename target_t>
__global__ void
C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
ctc_loss_backward_log_beta_gpu_kernel(scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t BLANK) {
constexpr scalar_t neginf = -INFINITY;
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
if (b >= batch_size)
return;
// "first" row, the beta initiaization before eq (10) (t=target_length - differes per batch)
for (int64_t block_s = 2*max_target_length - (2*max_target_length % blockDim.x); block_s >= 0; block_s -= blockDim.x) {
int64_t s = threadIdx.x + block_s;
scalar_t lb;
if (s == 2*target_length) {
lb = log_probs_data[lp_batch_offset + (input_length-1) * lp_input_stride + lp_char_stride * BLANK];
} else if (s == 2 * target_length - 1) { // false for target_length == 0
int64_t current_target_prime = get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
s,
BLANK);
lb = log_probs_data[lp_batch_offset + (input_length-1) * lp_input_stride + lp_char_stride * current_target_prime];
} else {
lb = neginf;
}
if (s < 2*max_target_length+1) {
log_beta_data[lb_batch_offset + (input_length-1) * lb_input_stride + lb_target_stride * s] = lb;
}
}
// go backward in s
for (int64_t block_s = 2*max_target_length - (2*max_target_length % blockDim.x); block_s >= 0; block_s -= blockDim.x) {
int64_t s = threadIdx.x + block_s;
int64_t current_target_prime;
bool have_three;
if (s < 2 * target_length + 1 && target_length > 0) {
current_target_prime = get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
s,
BLANK);
have_three =
((s < 2 * target_length - 1) &&
(get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
s + 2,
BLANK) != current_target_prime));
} else {
current_target_prime = BLANK;
have_three = false;
}
// now go backward in t. Note that we need to skip the last timestep that we did above.
for (int64_t t=max_input_length-2; t>=0; t--) {
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch item
if ((t < input_length - 1) && (s < 2 * target_length + 1)) {
scalar_t lb1 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * s];
scalar_t lbmax = lb1;
scalar_t lb2, lb3;
if (s < 2*target_length) {
lb2 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * (s+1)];
if (lb2 > lbmax)
lbmax = lb2;
} else {
lb2 = neginf;
}
if (have_three) {
lb3 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * (s+2)];
if (lb3 > lbmax)
lbmax = lb3;
} else {
lb3 = neginf;
}
if (lbmax == neginf)
lbmax = 0;
scalar_t lb = ::log(::exp(lb1-lbmax)+::exp(lb2-lbmax)+::exp(lb3-lbmax))+lbmax
+ log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * current_target_prime];
log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s] = lb;
} else if (
(s < 2 * max_target_length + 1) &&
(((target_length == 0) && (s > 0)) || (s >= 2 * target_length + 1) ||
(t >= input_length))) {
log_beta_data
[lb_batch_offset + lb_input_stride * t + lb_target_stride * s] =
neginf;
}
}
}
}
// This implements the subtrahend of equation (16) for all *nonblank* characters.
// It assumes you have probs in gradient_data when called
// and it modifies gradient_data to be, the gradient.
// In order to facilitate this inplace update, We don't actually do this in logspace.
// (The other variant implemented uses log_space and the differences seem to be
// not so problematic at least with unit normal distributed test activations.)
// Internally this uses atomicAdd because different threads may write to the same
// gradient position.
// This is parallelised over b and s again.
// Note that for us, the Z of eqn (16) is actually constant for all t and it is the
// likelihood - this is why we use the negative log likelihood below.
// We also multiply by the input gradient to keep with standard autograd style.
// I took this trick from [2], for moderate alphabet sizes a log-space
// calculation (with an atomic log add) is similarly in performance, but for large
// alphabets the inplace nature is a considerable advantage.
template<typename scalar_t, typename target_t>
__global__ void
#if defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
#endif
ctc_loss_backward_collect_nonblank_gpu_kernel(scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
const scalar_t* __restrict__ neg_log_likelihood_data,
int64_t gr_input_stride, int64_t gr_batch_stride, int64_t gr_char_stride,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t num_labels, int64_t BLANK, bool zero_infinity) {
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t s = threadIdx.x + blockIdx.x * blockDim.x; // note, this directly indexes into targets, not targets prime!
if (b >= batch_size)
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t gr_batch_offset = b*gr_batch_stride;
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
if (s >= target_length)
return;
int64_t target = targets_data[tg_batch_offset + s * tg_target_stride];
scalar_t nll = neg_log_likelihood_data[b];
scalar_t gr = grad_out_data[b * grad_out_batch_stride];
if (zero_infinity && nll == INFINITY)
return;
for (int64_t t = 0; t < input_length; t++) {
scalar_t lp = log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * target];
//bug,gradient_datalpgr
// gpuAtomicAdd(&gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * target],
// -::exp(log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * (s*2+1)]
// + log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * (s*2+1)]
// + nll - lp) * gr);
gpuAtomicAdd(&gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * target],
-::exp(log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * (s*2+1)]
+ log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * (s*2+1)]
+ nll - lp));
// //
gpuAtomicMul(&gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * target],gr);
// gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * target] *= gr;
}
}
// This is the naive implementation of equation (16). It is parallelised in batch and input timestep.
// It appears to be faster than the above method for small batch sizes.
template<typename scalar_t, typename target_t>
__global__ void
#if defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
#endif
ctc_loss_backward_collect_gpu_kernel(scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
const scalar_t* __restrict__ neg_log_likelihood_data,
int64_t gr_input_stride, int64_t gr_batch_stride, int64_t gr_char_stride,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t num_labels, int64_t BLANK, bool zero_infinity) {
constexpr scalar_t neginf = -INFINITY;
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t t = threadIdx.x + blockIdx.x * blockDim.x;
if ((t >= max_input_length) || (b >= batch_size))
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t gr_batch_offset = b*gr_batch_stride;
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
// collected[b, t, target'[s]] "log+=" log_alpha[t, s]+log_beta[t, s]
for (int s = 0; s < 2*max_target_length+1; s++) {
if (s < 2 * target_length + 1) { // if target_length == 0, s == 0
int64_t current_target_prime = get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
s,
BLANK);
scalar_t log_alpha_beta = (log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s]
+ log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s]);
scalar_t& lcab = gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * current_target_prime];
if (lcab == neginf) {
lcab = log_alpha_beta;
} else {
scalar_t max = ((lcab > log_alpha_beta) ? lcab : log_alpha_beta);
lcab = ::log(::exp(lcab-max)+::exp(log_alpha_beta-max))+max;
}
}
}
scalar_t nll = neg_log_likelihood_data[b];
scalar_t gr = grad_out_data[b * grad_out_batch_stride];
for (int64_t c = 0; c < num_labels; c++) {
scalar_t& res = gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * c];
if (t < input_length && (! zero_infinity || nll != INFINITY)) {
//yt - 1/(p(l|x)*ykt)*(\alpha*\beta) =(::exp(lp)-::exp(res + nll - lp))
scalar_t lp = log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * c];
res = (::exp(lp)-::exp(res + nll - lp)) * gr;
}
else {
res = 0.;
}
}
}
// This is to zero gradients which corresponding to the out-of-sequence position
// Those gradients should not be used in any model update since the input
// elements are padded
template<typename scalar_t>
__global__ void
#if defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
#endif
ctc_loss_zero_padded_gradients(
scalar_t* __restrict__ gradient_data, /* (T, B, D) layout */
const int64_t* __restrict__ input_lengths, /* (B, ) layout */
int64_t gr_timestep_stride,
int64_t gr_batch_stride,
int64_t gr_label_stride,
int64_t max_input_length, /* T */
int64_t batch_size, /* B */
int64_t num_labels /* D */ ) {
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t t = threadIdx.x + blockIdx.x * blockDim.x;
if (b >= batch_size || t >= max_input_length) {
return;
}
scalar_t input_length = input_lengths[b];
if (t >= input_length) {
for (int l = 0; l < num_labels; l++)
gradient_data[
t * gr_timestep_stride + b * gr_batch_stride + l * gr_label_stride]
= 0.0f;
}
}
// The backward. It essentially computes eq 16 by using the above kernels.
// We don't do a lot of checking as we envision this to be called only when backpropagating through a (well-checked) forward.
template<typename scalar_t, ScalarType target_scalar_type>
Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths,
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK, bool zero_infinity) {
constexpr scalar_t neginf = -INFINITY;
using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type;
int64_t batch_size = log_probs.size(1);
int64_t num_labels = log_probs.size(2);
int64_t lp_input_stride = log_probs.stride(0);
int64_t lp_char_stride = log_probs.stride(2);
int64_t tg_target_stride;
int64_t max_target_length;
auto tg_batch_offsets = at::empty({batch_size}, TensorOptions(at::CPU(kLong)));
auto tg_batch_offsets_data = tg_batch_offsets.data_ptr<int64_t>();
if (targets.dim() == 1) { // concatenated targets
int64_t pos = 0;
max_target_length = 0;
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = pos;
pos += target_lengths[i];
if (max_target_length < target_lengths[i])
max_target_length = target_lengths[i];
}
tg_target_stride = targets.stride(0);
}
else { // batch x max_target_length
// dim is 2
int64_t tg_batch_stride = targets.stride(0);
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = i * tg_batch_stride;
}
tg_target_stride = targets.stride(1);
max_target_length = log_alpha.size(2)/2; // targets.size(1) might be larger
}
auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong));
auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong));
tg_batch_offsets = tg_batch_offsets.cuda();
Tensor log_beta = at::empty_like(log_alpha);
log_beta.fill_(neginf);
Tensor grad = at::full_like(log_probs, neginf); // initialization for log(sum (alpha beta))
// As above, there may be better configurations to use.
constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so 32 bit registers for double
int threads_target = max_threads;
while (threads_target / 2 >= 2*max_target_length+1) {
threads_target /= 2;
}
int threads_batch = ::min(max_threads / threads_target, (int) batch_size);
//beta
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
{
dim3 block(threads_target, threads_batch);
dim3 grid((2*max_target_length+1 + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch);
hipLaunchKernelGGL(( ctc_loss_backward_log_beta_gpu_kernel<scalar_t, target_t>), dim3(grid), dim3(block), 0, stream,
log_beta.data_ptr<scalar_t>(),
log_probs.data_ptr<scalar_t>(), input_lengths_t.data_ptr<int64_t>(), log_probs.size(0),
targets.data_ptr<target_t>(), target_lengths_t.data_ptr<int64_t>(), max_target_length,
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2),
tg_batch_offsets.data_ptr<int64_t>(), tg_target_stride,
batch_size, BLANK);
// TORCH_CUDA_KERNEL_LAUNCH_CHECK();
}
// Very crude heuristic for what is a small problem., based on linearly regressing problem dimensions on
// the (capped) difference of timings.
// Note that for OK problems target length <= input length, so we
// only consider input length.
bool is_large = (2*log_probs.size(0)+(24*batch_size)/10+(2*num_labels)/10) > 450;
if (is_large) { // large alphabet, large batch
// this computes the probs, minuend in (16)
at::exp_out(grad, log_probs);
// now we compute the subtrahend for the blanks. It is a straightforward reduction because we know that
// blanks are in every other position.
// maybe we should kernelize this, too.
// inputdimensionstartstart+lengthstart+length
auto grad_blank = grad.narrow(2, BLANK, 1);
grad_blank -= (at::logsumexp(log_alpha.as_strided({batch_size, log_alpha.size(1), max_target_length+1},
{log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2)*2})
+ log_beta.as_strided({batch_size, log_beta.size(1), max_target_length+1},
{log_beta.stride(0), log_beta.stride(1), log_beta.stride(2)*2}),
2, true)
.permute({1, 0, 2})
.add_(neg_log_likelihood.view({1, batch_size, 1}))
.sub_(log_probs.narrow(2, BLANK, 1))
.exp_()
);
// scale by output gradient (blanks and first summand of non-blanks)
//TODO:grad_blank
grad *= grad_out.view({1, batch_size, 1});
if (zero_infinity) {
grad = at::where(neg_log_likelihood.view({1, batch_size, 1}) == Scalar(INFINITY), at::zeros({}, grad.options()), grad);
}
// For the non-blank characters, we use a kernel to compute the subtrahend.
// Again we might configure block and grid in a better way.
int threads_target = max_threads;
while (threads_target / 2 >= max_target_length && threads_target > 1) {
threads_target /= 2;
}
int threads_batch = ::min(max_threads / threads_target, (int) batch_size);
dim3 block(threads_target, threads_batch);
dim3 grid(
std::max<int>(
(max_target_length + threads_target - 1) / threads_target, 1),
(batch_size + threads_batch - 1) / threads_batch,
1);
hipLaunchKernelGGL(( ctc_loss_backward_collect_nonblank_gpu_kernel<scalar_t, target_t>), dim3(grid), dim3(block), 0, stream,
grad.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(), grad_out.stride(0),
log_alpha.data_ptr<scalar_t>(), log_beta.data_ptr<scalar_t>(),
log_probs.data_ptr<scalar_t>(), input_lengths_t.data_ptr<int64_t>(), log_probs.size(0),
targets.data_ptr<target_t>(), target_lengths_t.data_ptr<int64_t>(), max_target_length,
neg_log_likelihood.data_ptr<scalar_t>(),
grad.stride(0), grad.stride(1), grad.stride(2),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2),
tg_batch_offsets.data_ptr<int64_t>(), tg_target_stride,
batch_size, num_labels, BLANK, zero_infinity);
// TORCH_CUDA_KERNEL_LAUNCH_CHECK();
} else { // small problem, use naive algorithm
// Still no block/grid configuration guru...
int threads_input = max_threads;
while (threads_input / 2 >= log_probs.size(0) && threads_input > 1) {
threads_input /= 2;
}
threads_batch = ::min(max_threads / threads_input, (int) batch_size);
dim3 block(threads_input, threads_batch);
dim3 grid((log_probs.size(0) + threads_input-1)/threads_input, (batch_size+threads_batch-1)/threads_batch);
hipLaunchKernelGGL(( ctc_loss_backward_collect_gpu_kernel<scalar_t, target_t>), dim3(grid), dim3(block), 0, stream,
grad.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(), grad_out.stride(0),
log_alpha.data_ptr<scalar_t>(), log_beta.data_ptr<scalar_t>(),
log_probs.data_ptr<scalar_t>(), input_lengths_t.data_ptr<int64_t>(), log_probs.size(0),
targets.data_ptr<target_t>(), target_lengths_t.data_ptr<int64_t>(), max_target_length,
neg_log_likelihood.data_ptr<scalar_t>(),
grad.stride(0), grad.stride(1), grad.stride(2),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2),
tg_batch_offsets.data_ptr<int64_t>(), tg_target_stride,
batch_size, num_labels, BLANK, zero_infinity);
// TORCH_CUDA_KERNEL_LAUNCH_CHECK(); // catch launch errors
}
// zero those invalid graident elements due to padding
{
int threads_input = max_threads;
while (threads_input / 2 >= log_probs.size(0)) {
threads_input /= 2;
}
threads_batch = ::min(max_threads / threads_input, (int) batch_size);
dim3 block(threads_input, threads_batch);
dim3 grid(
(log_probs.size(0) + threads_input-1)/threads_input,
(batch_size+threads_batch-1)/threads_batch);
hipLaunchKernelGGL(( ctc_loss_zero_padded_gradients<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.data_ptr<scalar_t>(),
input_lengths_t.data_ptr<int64_t>(),
grad.stride(0),
grad.stride(1),
grad.stride(2),
grad.size(0),
grad.size(1),
grad.size(2)
);
// TORCH_CUDA_KERNEL_LAUNCH_CHECK();
}
return grad;
}
std::tuple<Tensor, Tensor> ctc_loss_gpu(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK, bool zero_infinity) {
(void)zero_infinity; // only used for backward
return AT_DISPATCH_FLOATING_TYPES(log_probs.scalar_type(), "ctc_loss_cuda", [&] {
if (targets.scalar_type() == kLong) {
return ctc_loss_gpu_template<scalar_t, kLong>(log_probs, targets, input_lengths, target_lengths, BLANK);
} else {
return ctc_loss_gpu_template<scalar_t, kInt>(log_probs, targets, input_lengths, target_lengths, BLANK);
}
});
}
Tensor ctc_loss_backward_gpu(const Tensor& grad, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths,
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK, bool zero_infinity) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
// globalContext().alertNotDeterministic("ctc_loss_backward_gpu");
return AT_DISPATCH_FLOATING_TYPES(log_probs.scalar_type(), "ctc_loss_backward_cuda", [&] {
if (targets.scalar_type() == kLong) {
return ctc_loss_backward_gpu_template<scalar_t, kLong>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK, zero_infinity);
} else {
return ctc_loss_backward_gpu_template<scalar_t, kInt>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK, zero_infinity);
}
});
}
| 56ead294d62af20093b3fb9963a95699a553feb5.cu |
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <THC/THCAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <numeric>
#include <float.h>
using namespace at;
// this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1])
// so if l is l_0 l_1 ... l_(tl-1) then this looks up idx in
// l' = BLANK l_0 BLANK l_1 BLANK ... BLANK l_(tl-1) BLANK
// - note that no bound-checking is done
// - it is important to only call it witth idx == 0 if the target length is 0
// - __restrict__ impact to be measured, see
// https://devblogs.nvidia.com/cuda-pro-tip-optimize-pointer-aliasing/
template <typename target_t>
__device__ static inline int64_t get_target_prime(
const target_t* __restrict__ target,
int64_t offset,
int64_t stride,
int64_t idx,
int64_t BLANK) {
if (idx % 2 == 0) {
return BLANK;
} else {
return target[offset + stride * (idx / 2)];
}
}
template <typename scalar_t>
__device__ static inline scalar_t safe_log_add(scalar_t a, scalar_t b)
{
scalar_t m=((a > b) ? a : b);
if (m == -INFINITY)
m = 0;
return (std::log(std::exp(a-m) + std::exp(b-m)) + m);
}
// this kernel is a relatively straightforward implementation of the alpha calculation in the forward backward algorithm (section 4.1).
// A (minor) twist is that we are using log-calculations to enhance numerical stability (log_probs and log_alpha).
// In total it would be more efficient to compute the beta in the same kernel (e.g. cudnn does this). While the beta are not
// needed for the loss itself (just the grad), we can return log_alpha+log_beta (so same space as currently) and the overhead
// is small and the use-case for loss without grad is relatively limited.
// We parallelize by batch and target sequence. Empirically, it is faster to loop over the input (log probs) sequence and do
// target in parallel, even if it means more frequent __syncthreads.
// In contrast to the cuDNN implementation, we allow large target lengths. For this we need that all previous `s` have been
// computed when we start a new block_s. This is why we have our own for loop here.
template<typename scalar_t, typename target_t>
__global__ void
#if defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
#endif
ctc_loss_log_alpha_gpu_kernel(scalar_t* __restrict__ log_alpha_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
scalar_t* __restrict__ neg_log_likelihood_data,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t BLANK) {
constexpr scalar_t neginf = -INFINITY;
// bookkeeping
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
if (b >= batch_size)
return;
// first row (t=0), the three equations for alpha_1 above eq (6)
for (int64_t block_s = 0; block_s < 2*max_target_length+1; block_s += blockDim.x) {
int64_t s = threadIdx.x + block_s;
scalar_t la;
switch (s) {
case 0:
la = log_probs_data[lp_batch_offset + lp_char_stride * BLANK];
break;
case 1:
la = target_length == 0 ? neginf
: log_probs_data
[lp_batch_offset +
lp_char_stride *
get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
1,
BLANK)];
break;
default:
la = neginf;
}
if (s < 2*max_target_length+1)
log_alpha_data[la_batch_offset + /* la_input_stride * 0 */ + la_target_stride * s] = la;
}
for (int64_t block_s = 0; block_s < 2*max_target_length+1; block_s += blockDim.x) {
int64_t s = threadIdx.x + block_s;
// These two only depend on s, so we can cache them.
int64_t current_char; // l_s in eq (6)
bool have_three; // flag which of the two cases in eq (6) we have
if (s < 2 * target_length + 1 && target_length > 0) {
current_char = get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
s,
BLANK);
have_three =
((s > 1) &&
(get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
s - 2,
BLANK) != current_char));
} else {
current_char = BLANK;
have_three = false;
}
for (int64_t t=1; t < max_input_length; t++) {
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch
if ((t < input_length) && (s < 2 * target_length + 1)) {
// only for valid t, s. This is equation (6) and (7), la1, la2, la3 are the three summands,
// lamax is the maximum for the logsumexp trick.
scalar_t la1 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * s];
scalar_t lamax = la1;
scalar_t la2, la3;
if (s > 0) {
la2 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * (s-1)];
if (la2 > lamax)
lamax = la2;
} else {
la2 = neginf;
}
if (have_three) {
la3 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * (s-2)];
if (la3 > lamax)
lamax = la3;
} else {
la3 = neginf;
}
if (lamax == neginf) // when all are neginf. (then the whole thing is neginf, but we can pretend)
lamax = 0;
log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] = std::log(std::exp(la1-lamax)+std::exp(la2-lamax)+std::exp(la3-lamax))+lamax
+ log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * current_char];
} else {
// otherwise we just set to neginf
if (s < 2*max_target_length+1)
log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] = neginf;
}
}
}
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch
// compute the loss (eq (8))
if (threadIdx.x == 0) {
scalar_t l1 = log_alpha_data[la_batch_offset + la_input_stride * (input_length-1) + la_target_stride * (target_length*2)];
scalar_t l2 = target_length > 0
? log_alpha_data
[la_batch_offset + la_input_stride * (input_length - 1) +
la_target_stride * (target_length * 2 - 1)]
: neginf;
scalar_t m = ((l1 > l2) ? l1 : l2);
m = ((m == neginf) ? 0 : m);
scalar_t log_likelihood = std::log(std::exp(l1-m)+std::exp(l2-m))+m;
neg_log_likelihood_data[b] = -log_likelihood;
}
}
// The forward computation. Lot's of admin and a call to the alpha kernel.
// Note: we do not check that the labels are in the valid range. As we use
// them for indexing in the kernels, you'll see memory errors when you
// pass corrupt labels.
// We support both a 2-dimensional tensor as targets (one set of targets in each row) and
// a 1-dimensional tensor where all targets are concatenated (and we use target_lengths
// to figure out where they begin).
// We return log_alpha (currently, might change to (log_alpha+log_beta) to be passed to the
// backward. The dispatch function will only return the loss.
template<typename scalar_t, ScalarType target_scalar_type>
std::tuple<Tensor, Tensor> ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK) {
// log_probs: input_len x batch_size x num_labels
// targets [int64]: batch_size x target_length OR sum(target_lengths)
CheckedFrom c = "ctc_loss_gpu";
using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type;
auto log_probs_arg = TensorArg(log_probs, "log_probs", 1);
auto targets_arg = TensorArg(targets, "targets", 2);
checkAllSameGPU(c, {log_probs_arg, targets_arg});
checkScalarType(c, targets_arg, target_scalar_type);
checkDim(c, log_probs_arg, 3);
checkDimRange(c, targets_arg, 1, 3);
int64_t batch_size = log_probs.size(1);
int64_t num_labels = log_probs.size(2);
TORCH_CHECK((0 <= BLANK) && (BLANK < num_labels), "blank must be in label range");
TORCH_CHECK(input_lengths.size() == batch_size, "input_lengths must be of size batch_size");
TORCH_CHECK(target_lengths.size() == batch_size, "target_lengths must be of size batch_size");
int64_t lp_input_stride = log_probs.stride(0);
int64_t lp_char_stride = log_probs.stride(2);
int64_t tg_target_stride;
int64_t max_target_length = 0;
auto tg_batch_offsets = at::empty({batch_size}, at::device(at::kCPU).dtype(at::kLong));
auto tg_batch_offsets_data = tg_batch_offsets.data_ptr<int64_t>();
if (targets.dim() == 1) { // concatenated targets
int64_t pos = 0;
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = pos;
pos += target_lengths[i];
if (max_target_length < target_lengths[i])
max_target_length = target_lengths[i];
}
tg_target_stride = targets.stride(0);
checkSize(c, targets_arg, 0, pos);
}
else { // batch x max_target_length
// dim is 2
int64_t tg_batch_stride = targets.stride(0);
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = i * tg_batch_stride;
if (max_target_length < target_lengths[i])
max_target_length = target_lengths[i];
}
tg_target_stride = targets.stride(1);
checkSize(c, targets_arg, 0, batch_size);
TORCH_CHECK(targets.size(1) >= max_target_length,
"Expected tensor to have size at least ", max_target_length, " at dimension 1, but got size ", targets.size(1), " for ", targets_arg,
" (while checking arguments for ", c, ")");
}
int64_t max_input_length = log_probs.size(0);
for (int64_t b = 0; b < batch_size; b++) {
TORCH_CHECK(input_lengths[b] <= max_input_length,
"Expected input_lengths to have value at most ", max_input_length, ", but got value ", input_lengths[b],
" (while checking arguments for ", c, ")");
}
auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong));
auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong));
tg_batch_offsets = tg_batch_offsets.cuda();
Tensor log_alpha = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options());
Tensor neg_log_likelihood = at::empty({batch_size}, log_probs.options());
// Very likely, we could be more clever here, e.g. learning (or genralizing and reusing) from SoftMax.cu...
//constexpr int max_threads = std::is_same<scalar_t, float>::value ? 256 : 128; // we need 72 or so 32 bit registers for double
constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so 32 bit registers for double
int threads_target = max_threads;
while (threads_target / 2 >= 2*max_target_length+1) {
threads_target /= 2;
}
int threads_batch = std::min(max_threads / threads_target, (int) batch_size);
dim3 block(threads_target, threads_batch);
dim3 grid((2*max_target_length+1 + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
ctc_loss_log_alpha_gpu_kernel<scalar_t, target_t><<<grid, block, 0, stream>>>(
log_alpha.data_ptr<scalar_t>(),
log_probs.data_ptr<scalar_t>(), input_lengths_t.data_ptr<int64_t>(), log_probs.size(0),
targets.data_ptr<target_t>(), target_lengths_t.data_ptr<int64_t>(), max_target_length,
neg_log_likelihood.data_ptr<scalar_t>(),
// char_frequency_tensor.data_ptr<int>(),
// char_es_tensor.data_ptr<int>(),
// num_labels,
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2),
tg_batch_offsets.data_ptr<int64_t>(), tg_target_stride,
batch_size, BLANK);
// TORCH_CUDA_KERNEL_LAUNCH_CHECK();
return std::make_tuple(neg_log_likelihood, log_alpha);
}
// The second (backward) half of the forward backward algorithm, (10) and (11). This is parallel to the
// alpha kernel above. (As mentioned above, it might make sense do the calculation in the alpha kernel.)
template<typename scalar_t, typename target_t>
__global__ void
C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
ctc_loss_backward_log_beta_gpu_kernel(scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t BLANK) {
constexpr scalar_t neginf = -INFINITY;
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
if (b >= batch_size)
return;
// "first" row, the beta initiaization before eq (10) (t=target_length - differes per batch)
for (int64_t block_s = 2*max_target_length - (2*max_target_length % blockDim.x); block_s >= 0; block_s -= blockDim.x) {
int64_t s = threadIdx.x + block_s;
scalar_t lb;
if (s == 2*target_length) {
lb = log_probs_data[lp_batch_offset + (input_length-1) * lp_input_stride + lp_char_stride * BLANK];
} else if (s == 2 * target_length - 1) { // false for target_length == 0
int64_t current_target_prime = get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
s,
BLANK);
lb = log_probs_data[lp_batch_offset + (input_length-1) * lp_input_stride + lp_char_stride * current_target_prime];
} else {
lb = neginf;
}
if (s < 2*max_target_length+1) {
log_beta_data[lb_batch_offset + (input_length-1) * lb_input_stride + lb_target_stride * s] = lb;
}
}
// go backward in s
for (int64_t block_s = 2*max_target_length - (2*max_target_length % blockDim.x); block_s >= 0; block_s -= blockDim.x) {
int64_t s = threadIdx.x + block_s;
int64_t current_target_prime;
bool have_three;
if (s < 2 * target_length + 1 && target_length > 0) {
current_target_prime = get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
s,
BLANK);
have_three =
((s < 2 * target_length - 1) &&
(get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
s + 2,
BLANK) != current_target_prime));
} else {
current_target_prime = BLANK;
have_three = false;
}
// now go backward in t. Note that we need to skip the last timestep that we did above.
for (int64_t t=max_input_length-2; t>=0; t--) {
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch item
if ((t < input_length - 1) && (s < 2 * target_length + 1)) {
scalar_t lb1 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * s];
scalar_t lbmax = lb1;
scalar_t lb2, lb3;
if (s < 2*target_length) {
lb2 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * (s+1)];
if (lb2 > lbmax)
lbmax = lb2;
} else {
lb2 = neginf;
}
if (have_three) {
lb3 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * (s+2)];
if (lb3 > lbmax)
lbmax = lb3;
} else {
lb3 = neginf;
}
if (lbmax == neginf)
lbmax = 0;
scalar_t lb = std::log(std::exp(lb1-lbmax)+std::exp(lb2-lbmax)+std::exp(lb3-lbmax))+lbmax
+ log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * current_target_prime];
log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s] = lb;
} else if (
(s < 2 * max_target_length + 1) &&
(((target_length == 0) && (s > 0)) || (s >= 2 * target_length + 1) ||
(t >= input_length))) {
log_beta_data
[lb_batch_offset + lb_input_stride * t + lb_target_stride * s] =
neginf;
}
}
}
}
// This implements the subtrahend of equation (16) for all *nonblank* characters.
// It assumes you have probs in gradient_data when called
// and it modifies gradient_data to be, the gradient.
// In order to facilitate this inplace update, We don't actually do this in logspace.
// (The other variant implemented uses log_space and the differences seem to be
// not so problematic at least with unit normal distributed test activations.)
// Internally this uses atomicAdd because different threads may write to the same
// gradient position.
// This is parallelised over b and s again.
// Note that for us, the Z of eqn (16) is actually constant for all t and it is the
// likelihood - this is why we use the negative log likelihood below.
// We also multiply by the input gradient to keep with standard autograd style.
// I took this trick from [2], for moderate alphabet sizes a log-space
// calculation (with an atomic log add) is similarly in performance, but for large
// alphabets the inplace nature is a considerable advantage.
template<typename scalar_t, typename target_t>
__global__ void
#if defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
#endif
ctc_loss_backward_collect_nonblank_gpu_kernel(scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
const scalar_t* __restrict__ neg_log_likelihood_data,
int64_t gr_input_stride, int64_t gr_batch_stride, int64_t gr_char_stride,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t num_labels, int64_t BLANK, bool zero_infinity) {
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t s = threadIdx.x + blockIdx.x * blockDim.x; // note, this directly indexes into targets, not targets prime!
if (b >= batch_size)
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t gr_batch_offset = b*gr_batch_stride;
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
if (s >= target_length)
return;
int64_t target = targets_data[tg_batch_offset + s * tg_target_stride];
scalar_t nll = neg_log_likelihood_data[b];
scalar_t gr = grad_out_data[b * grad_out_batch_stride];
if (zero_infinity && nll == INFINITY)
return;
for (int64_t t = 0; t < input_length; t++) {
scalar_t lp = log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * target];
//此处应该有bug,gradient_data原来保存的lp没有得到乘gr
// gpuAtomicAdd(&gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * target],
// -std::exp(log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * (s*2+1)]
// + log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * (s*2+1)]
// + nll - lp) * gr);
gpuAtomicAdd(&gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * target],
-std::exp(log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * (s*2+1)]
+ log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * (s*2+1)]
+ nll - lp));
// //找不到头文件
gpuAtomicMul(&gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * target],gr);
// gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * target] *= gr;
}
}
// This is the naive implementation of equation (16). It is parallelised in batch and input timestep.
// It appears to be faster than the above method for small batch sizes.
template<typename scalar_t, typename target_t>
__global__ void
#if defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
#endif
ctc_loss_backward_collect_gpu_kernel(scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
const scalar_t* __restrict__ neg_log_likelihood_data,
int64_t gr_input_stride, int64_t gr_batch_stride, int64_t gr_char_stride,
int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride,
const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride,
int64_t batch_size, int64_t num_labels, int64_t BLANK, bool zero_infinity) {
constexpr scalar_t neginf = -INFINITY;
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t t = threadIdx.x + blockIdx.x * blockDim.x;
if ((t >= max_input_length) || (b >= batch_size))
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t gr_batch_offset = b*gr_batch_stride;
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = tg_batch_offsets[b];
// collected[b, t, target'[s]] "log+=" log_alpha[t, s]+log_beta[t, s]
for (int s = 0; s < 2*max_target_length+1; s++) {
if (s < 2 * target_length + 1) { // if target_length == 0, s == 0
int64_t current_target_prime = get_target_prime(
targets_data,
tg_batch_offset,
tg_target_stride,
s,
BLANK);
scalar_t log_alpha_beta = (log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s]
+ log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s]);
scalar_t& lcab = gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * current_target_prime];
if (lcab == neginf) {
lcab = log_alpha_beta;
} else {
scalar_t max = ((lcab > log_alpha_beta) ? lcab : log_alpha_beta);
lcab = std::log(std::exp(lcab-max)+std::exp(log_alpha_beta-max))+max;
}
}
}
scalar_t nll = neg_log_likelihood_data[b];
scalar_t gr = grad_out_data[b * grad_out_batch_stride];
for (int64_t c = 0; c < num_labels; c++) {
scalar_t& res = gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * c];
if (t < input_length && (! zero_infinity || nll != INFINITY)) {
//yt - 1/(p(l|x)*ykt)*(\alpha*\beta) =(std::exp(lp)-std::exp(res + nll - lp)) 梯度
scalar_t lp = log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * c];
res = (std::exp(lp)-std::exp(res + nll - lp)) * gr;
}
else {
res = 0.;
}
}
}
// This is to zero gradients which corresponding to the out-of-sequence position
// Those gradients should not be used in any model update since the input
// elements are padded
template<typename scalar_t>
__global__ void
#if defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
#endif
ctc_loss_zero_padded_gradients(
scalar_t* __restrict__ gradient_data, /* (T, B, D) layout */
const int64_t* __restrict__ input_lengths, /* (B, ) layout */
int64_t gr_timestep_stride,
int64_t gr_batch_stride,
int64_t gr_label_stride,
int64_t max_input_length, /* T */
int64_t batch_size, /* B */
int64_t num_labels /* D */ ) {
int64_t b = threadIdx.y + blockIdx.y * blockDim.y;
int64_t t = threadIdx.x + blockIdx.x * blockDim.x;
if (b >= batch_size || t >= max_input_length) {
return;
}
scalar_t input_length = input_lengths[b];
if (t >= input_length) {
for (int l = 0; l < num_labels; l++)
gradient_data[
t * gr_timestep_stride + b * gr_batch_stride + l * gr_label_stride]
= 0.0f;
}
}
// The backward. It essentially computes eq 16 by using the above kernels.
// We don't do a lot of checking as we envision this to be called only when backpropagating through a (well-checked) forward.
template<typename scalar_t, ScalarType target_scalar_type>
Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths,
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK, bool zero_infinity) {
constexpr scalar_t neginf = -INFINITY;
using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type;
int64_t batch_size = log_probs.size(1);
int64_t num_labels = log_probs.size(2);
int64_t lp_input_stride = log_probs.stride(0);
int64_t lp_char_stride = log_probs.stride(2);
int64_t tg_target_stride;
int64_t max_target_length;
auto tg_batch_offsets = at::empty({batch_size}, TensorOptions(at::CPU(kLong)));
auto tg_batch_offsets_data = tg_batch_offsets.data_ptr<int64_t>();
if (targets.dim() == 1) { // concatenated targets
int64_t pos = 0;
max_target_length = 0;
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = pos;
pos += target_lengths[i];
if (max_target_length < target_lengths[i])
max_target_length = target_lengths[i];
}
tg_target_stride = targets.stride(0);
}
else { // batch x max_target_length
// dim is 2
int64_t tg_batch_stride = targets.stride(0);
for (int64_t i = 0; i < batch_size; i++) {
tg_batch_offsets_data[i] = i * tg_batch_stride;
}
tg_target_stride = targets.stride(1);
max_target_length = log_alpha.size(2)/2; // targets.size(1) might be larger
}
auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong));
auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong));
tg_batch_offsets = tg_batch_offsets.cuda();
Tensor log_beta = at::empty_like(log_alpha);
log_beta.fill_(neginf);
Tensor grad = at::full_like(log_probs, neginf); // initialization for log(sum (alpha beta))
// As above, there may be better configurations to use.
constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so 32 bit registers for double
int threads_target = max_threads;
while (threads_target / 2 >= 2*max_target_length+1) {
threads_target /= 2;
}
int threads_batch = std::min(max_threads / threads_target, (int) batch_size);
//计算后向概率beta
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
{
dim3 block(threads_target, threads_batch);
dim3 grid((2*max_target_length+1 + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch);
ctc_loss_backward_log_beta_gpu_kernel<scalar_t, target_t><<<grid, block, 0, stream>>>
(log_beta.data_ptr<scalar_t>(),
log_probs.data_ptr<scalar_t>(), input_lengths_t.data_ptr<int64_t>(), log_probs.size(0),
targets.data_ptr<target_t>(), target_lengths_t.data_ptr<int64_t>(), max_target_length,
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2),
tg_batch_offsets.data_ptr<int64_t>(), tg_target_stride,
batch_size, BLANK);
// TORCH_CUDA_KERNEL_LAUNCH_CHECK();
}
// Very crude heuristic for what is a small problem., based on linearly regressing problem dimensions on
// the (capped) difference of timings.
// Note that for OK problems target length <= input length, so we
// only consider input length.
bool is_large = (2*log_probs.size(0)+(24*batch_size)/10+(2*num_labels)/10) > 450;
if (is_large) { // large alphabet, large batch
// this computes the probs, minuend in (16)
at::exp_out(grad, log_probs);
// now we compute the subtrahend for the blanks. It is a straightforward reduction because we know that
// blanks are in every other position.
// maybe we should kernelize this, too.
// 表示取变量input在第dimension维上,从索引start到start+length范围(不包括start+length)的值。
auto grad_blank = grad.narrow(2, BLANK, 1);
grad_blank -= (at::logsumexp(log_alpha.as_strided({batch_size, log_alpha.size(1), max_target_length+1},
{log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2)*2})
+ log_beta.as_strided({batch_size, log_beta.size(1), max_target_length+1},
{log_beta.stride(0), log_beta.stride(1), log_beta.stride(2)*2}),
2, true)
.permute({1, 0, 2})
.add_(neg_log_likelihood.view({1, batch_size, 1}))
.sub_(log_probs.narrow(2, BLANK, 1))
.exp_()
);
// scale by output gradient (blanks and first summand of non-blanks)
//TODO:此处应该没乘grad_blank进去
grad *= grad_out.view({1, batch_size, 1});
if (zero_infinity) {
grad = at::where(neg_log_likelihood.view({1, batch_size, 1}) == Scalar(INFINITY), at::zeros({}, grad.options()), grad);
}
// For the non-blank characters, we use a kernel to compute the subtrahend.
// Again we might configure block and grid in a better way.
int threads_target = max_threads;
while (threads_target / 2 >= max_target_length && threads_target > 1) {
threads_target /= 2;
}
int threads_batch = std::min(max_threads / threads_target, (int) batch_size);
dim3 block(threads_target, threads_batch);
dim3 grid(
std::max<int>(
(max_target_length + threads_target - 1) / threads_target, 1),
(batch_size + threads_batch - 1) / threads_batch,
1);
ctc_loss_backward_collect_nonblank_gpu_kernel<scalar_t, target_t><<<grid, block, 0, stream>>>
(grad.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(), grad_out.stride(0),
log_alpha.data_ptr<scalar_t>(), log_beta.data_ptr<scalar_t>(),
log_probs.data_ptr<scalar_t>(), input_lengths_t.data_ptr<int64_t>(), log_probs.size(0),
targets.data_ptr<target_t>(), target_lengths_t.data_ptr<int64_t>(), max_target_length,
neg_log_likelihood.data_ptr<scalar_t>(),
grad.stride(0), grad.stride(1), grad.stride(2),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2),
tg_batch_offsets.data_ptr<int64_t>(), tg_target_stride,
batch_size, num_labels, BLANK, zero_infinity);
// TORCH_CUDA_KERNEL_LAUNCH_CHECK();
} else { // small problem, use naive algorithm
// Still no block/grid configuration guru...
int threads_input = max_threads;
while (threads_input / 2 >= log_probs.size(0) && threads_input > 1) {
threads_input /= 2;
}
threads_batch = std::min(max_threads / threads_input, (int) batch_size);
dim3 block(threads_input, threads_batch);
dim3 grid((log_probs.size(0) + threads_input-1)/threads_input, (batch_size+threads_batch-1)/threads_batch);
ctc_loss_backward_collect_gpu_kernel<scalar_t, target_t><<<grid, block, 0, stream>>>
(grad.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(), grad_out.stride(0),
log_alpha.data_ptr<scalar_t>(), log_beta.data_ptr<scalar_t>(),
log_probs.data_ptr<scalar_t>(), input_lengths_t.data_ptr<int64_t>(), log_probs.size(0),
targets.data_ptr<target_t>(), target_lengths_t.data_ptr<int64_t>(), max_target_length,
neg_log_likelihood.data_ptr<scalar_t>(),
grad.stride(0), grad.stride(1), grad.stride(2),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2),
tg_batch_offsets.data_ptr<int64_t>(), tg_target_stride,
batch_size, num_labels, BLANK, zero_infinity);
// TORCH_CUDA_KERNEL_LAUNCH_CHECK(); // catch launch errors
}
// zero those invalid graident elements due to padding
{
int threads_input = max_threads;
while (threads_input / 2 >= log_probs.size(0)) {
threads_input /= 2;
}
threads_batch = std::min(max_threads / threads_input, (int) batch_size);
dim3 block(threads_input, threads_batch);
dim3 grid(
(log_probs.size(0) + threads_input-1)/threads_input,
(batch_size+threads_batch-1)/threads_batch);
ctc_loss_zero_padded_gradients<scalar_t><<<grid, block, 0, stream>>>(
grad.data_ptr<scalar_t>(),
input_lengths_t.data_ptr<int64_t>(),
grad.stride(0),
grad.stride(1),
grad.stride(2),
grad.size(0),
grad.size(1),
grad.size(2)
);
// TORCH_CUDA_KERNEL_LAUNCH_CHECK();
}
return grad;
}
std::tuple<Tensor, Tensor> ctc_loss_gpu(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK, bool zero_infinity) {
(void)zero_infinity; // only used for backward
return AT_DISPATCH_FLOATING_TYPES(log_probs.scalar_type(), "ctc_loss_cuda", [&] {
if (targets.scalar_type() == kLong) {
return ctc_loss_gpu_template<scalar_t, kLong>(log_probs, targets, input_lengths, target_lengths, BLANK);
} else {
return ctc_loss_gpu_template<scalar_t, kInt>(log_probs, targets, input_lengths, target_lengths, BLANK);
}
});
}
Tensor ctc_loss_backward_gpu(const Tensor& grad, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths,
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK, bool zero_infinity) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
// globalContext().alertNotDeterministic("ctc_loss_backward_gpu");
return AT_DISPATCH_FLOATING_TYPES(log_probs.scalar_type(), "ctc_loss_backward_cuda", [&] {
if (targets.scalar_type() == kLong) {
return ctc_loss_backward_gpu_template<scalar_t, kLong>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK, zero_infinity);
} else {
return ctc_loss_backward_gpu_template<scalar_t, kInt>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK, zero_infinity);
}
});
}
|
9055d7eb37c552bf73bf622abc777bce9a04eb08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma, created on 10.06.2019
//
#include <ops/declarable/helpers/cross.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void crossCuda(const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo) {
__shared__ const T* x;
__shared__ const T* y;
__shared__ T* z;
__shared__ int rank, *sharedMem;
__shared__ Nd4jLong lenWithoutLastDim, totalThreads;
if (threadIdx.x == 0) {
x = reinterpret_cast<const T*>(vx);
y = reinterpret_cast<const T*>(vy);
z = reinterpret_cast<T*>(vz);
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<int*>(shmem);
totalThreads = gridDim.x * blockDim.x;
rank = shape::rank(xShapeInfo);
lenWithoutLastDim = shape::length(xShapeInfo) / xShapeInfo[rank]; // shape::length(xShapeInfo) / 3;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (uint i = tid; i < lenWithoutLastDim; i += totalThreads) {
shape::index2coords(i, rank - 1, xShapeInfo + 1, coords);
coords[rank - 1] = 0;
auto xOffset = shape::getOffset(xShapeInfo, coords);
auto yOffset = shape::getOffset(yShapeInfo, coords);
const auto x0 = x[xOffset];
const auto y0 = y[yOffset];
xOffset += shape::stride(const_cast<Nd4jLong*>(xShapeInfo))[rank - 1];
yOffset += shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[rank - 1];
const auto x1 = x[xOffset];
const auto y1 = y[yOffset];
xOffset += shape::stride(const_cast<Nd4jLong*>(xShapeInfo))[rank - 1];
yOffset += shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[rank - 1];
const auto x2 = x[xOffset];
const auto y2 = y[yOffset];
auto zOffset = shape::getOffset(zShapeInfo, coords);
z[zOffset] = x1 * y2 - x2 * y1;
zOffset += shape::stride(const_cast<Nd4jLong*>(zShapeInfo))[rank - 1];
z[zOffset] = x2 * y0 - x0 * y2;
zOffset += shape::stride(const_cast<Nd4jLong*>(zShapeInfo))[rank - 1];
z[zOffset] = x0 * y1 - x1 * y0;
}
}
template<typename T>
__host__ static void crossCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo) {
hipLaunchKernelGGL(( crossCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo);
}
BUILD_SINGLE_TEMPLATE(template void crossCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo), NUMERIC_TYPES);
void crossBatched(sd::LaunchContext* context, NDArray *x, NDArray *y, NDArray *z) {
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (x->lengthOf() / x->sizeAt(-1) + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = sizeof(int) * threadsPerBlock * x->rankOf() + 128;
PointersManager manager(context, "cross");
NDArray::prepareSpecialUse({z}, {x, y});
BUILD_SINGLE_SELECTOR(x->dataType(), crossCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), x->specialBuffer(), x->specialShapeInfo(), y->specialBuffer(), y->specialShapeInfo(), z->specialBuffer(), z->specialShapeInfo()), NUMERIC_TYPES);
NDArray::registerSpecialUse({z}, {x, y});
manager.synchronize();
}
}
}
} | 9055d7eb37c552bf73bf622abc777bce9a04eb08.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma, created on 10.06.2019
//
#include <ops/declarable/helpers/cross.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void crossCuda(const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo) {
__shared__ const T* x;
__shared__ const T* y;
__shared__ T* z;
__shared__ int rank, *sharedMem;
__shared__ Nd4jLong lenWithoutLastDim, totalThreads;
if (threadIdx.x == 0) {
x = reinterpret_cast<const T*>(vx);
y = reinterpret_cast<const T*>(vy);
z = reinterpret_cast<T*>(vz);
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<int*>(shmem);
totalThreads = gridDim.x * blockDim.x;
rank = shape::rank(xShapeInfo);
lenWithoutLastDim = shape::length(xShapeInfo) / xShapeInfo[rank]; // shape::length(xShapeInfo) / 3;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (uint i = tid; i < lenWithoutLastDim; i += totalThreads) {
shape::index2coords(i, rank - 1, xShapeInfo + 1, coords);
coords[rank - 1] = 0;
auto xOffset = shape::getOffset(xShapeInfo, coords);
auto yOffset = shape::getOffset(yShapeInfo, coords);
const auto x0 = x[xOffset];
const auto y0 = y[yOffset];
xOffset += shape::stride(const_cast<Nd4jLong*>(xShapeInfo))[rank - 1];
yOffset += shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[rank - 1];
const auto x1 = x[xOffset];
const auto y1 = y[yOffset];
xOffset += shape::stride(const_cast<Nd4jLong*>(xShapeInfo))[rank - 1];
yOffset += shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[rank - 1];
const auto x2 = x[xOffset];
const auto y2 = y[yOffset];
auto zOffset = shape::getOffset(zShapeInfo, coords);
z[zOffset] = x1 * y2 - x2 * y1;
zOffset += shape::stride(const_cast<Nd4jLong*>(zShapeInfo))[rank - 1];
z[zOffset] = x2 * y0 - x0 * y2;
zOffset += shape::stride(const_cast<Nd4jLong*>(zShapeInfo))[rank - 1];
z[zOffset] = x0 * y1 - x1 * y0;
}
}
template<typename T>
__host__ static void crossCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo) {
crossCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo);
}
BUILD_SINGLE_TEMPLATE(template void crossCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo), NUMERIC_TYPES);
void crossBatched(sd::LaunchContext* context, NDArray *x, NDArray *y, NDArray *z) {
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (x->lengthOf() / x->sizeAt(-1) + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = sizeof(int) * threadsPerBlock * x->rankOf() + 128;
PointersManager manager(context, "cross");
NDArray::prepareSpecialUse({z}, {x, y});
BUILD_SINGLE_SELECTOR(x->dataType(), crossCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), x->specialBuffer(), x->specialShapeInfo(), y->specialBuffer(), y->specialShapeInfo(), z->specialBuffer(), z->specialShapeInfo()), NUMERIC_TYPES);
NDArray::registerSpecialUse({z}, {x, y});
manager.synchronize();
}
}
}
} |
c9147626999de8c8a992abfe148bee7bb46e2107.hip | // !!! This is a file automatically generated by hipify!!!
/*
******************************************************************
* HISTORY
* 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University
* Prepared for 15-681, Fall 1994.
* Modified by Shuai Che
******************************************************************
*/
#include <stdio.h>
#include <stdlib.h>
#include "backprop.h"
//#define CUDA_UVM
/*** Allocate 1d array of floats ***/
extern "C"
float *alloc_1d_dbl(unsigned long long n)
{
float *newmem;
#ifndef CUDA_UVM
newmem = (float *) malloc ((n * sizeof (float)));
#else
hipMallocManaged((void**)&newmem, (n * sizeof (float)));
#endif
if (newmem == NULL) {
printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n");
return (NULL);
}
return newmem;
}
/*** Allocate 2d array of floats ***/
extern "C"
float **alloc_2d_dbl(unsigned long long m, unsigned long long n)
{
int i;
float **newmem;
float *newmem_content;
#ifndef CUDA_UVM
newmem = (float **) malloc ((m * sizeof (float *)));
newmem_content = (float *) malloc ((m * n * sizeof (float)));
if (newmem == NULL || newmem_content == NULL) {
printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n");
return (NULL);
}
for (i = 0; i < m; i++) {
//newmem[i] = alloc_1d_dbl(n);
newmem[i] = &newmem_content[i*n];
}
#else
hipMallocManaged((void**)&newmem, (m * sizeof (float *)));
hipMallocManaged((void**)&newmem_content, (m * n * sizeof (float)));
if (newmem == NULL || newmem_content == NULL) {
printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n");
return (NULL);
}
for (i = 0; i < m; i++) {
newmem[i] = &newmem_content[i*n];
}
#endif
return (newmem);
}
extern "C"
BPNN *bpnn_internal_create(int n_in, int n_hidden, int n_out)
{
BPNN *newnet;
#ifndef CUDA_UVM
newnet = (BPNN *) malloc (sizeof (BPNN));
#else
hipMallocManaged((void**)&newnet, sizeof (BPNN));
#endif
if (newnet == NULL) {
printf("BPNN_CREATE: Couldn't allocate neural network\n");
return (NULL);
}
newnet->input_n = n_in;
newnet->hidden_n = n_hidden;
newnet->output_n = n_out;
newnet->input_units = alloc_1d_dbl(n_in + 1);
newnet->hidden_units = alloc_1d_dbl(n_hidden + 1);
newnet->output_units = alloc_1d_dbl(n_out + 1);
newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1);
newnet->output_delta = alloc_1d_dbl(n_out + 1);
newnet->target = alloc_1d_dbl(n_out + 1);
newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
return (newnet);
}
extern "C"
void bpnn_free(BPNN *net)
{
int n1, n2, i;
n1 = net->input_n;
n2 = net->hidden_n;
#ifndef CUDA_UVM
free((char *) net->input_units);
free((char *) net->hidden_units);
free((char *) net->output_units);
free((char *) net->hidden_delta);
free((char *) net->output_delta);
free((char *) net->target);
for (i = 0; i <= n1; i++) {
free((char *) net->input_weights[i]);
free((char *) net->input_prev_weights[i]);
}
free((char *) net->input_weights);
free((char *) net->input_prev_weights);
for (i = 0; i <= n2; i++) {
free((char *) net->hidden_weights[i]);
free((char *) net->hidden_prev_weights[i]);
}
free((char *) net->hidden_weights);
free((char *) net->hidden_prev_weights);
free((char *) net);
#else
hipFree((char *) net->input_units);
hipFree((char *) net->hidden_units);
hipFree((char *) net->output_units);
hipFree((char *) net->hidden_delta);
hipFree((char *) net->output_delta);
hipFree((char *) net->target);
for (i = 0; i <= n1; i++) {
hipFree((char *) net->input_weights[i]);
hipFree((char *) net->input_prev_weights[i]);
}
hipFree((char *) net->input_weights);
hipFree((char *) net->input_prev_weights);
for (i = 0; i <= n2; i++) {
hipFree((char *) net->hidden_weights[i]);
hipFree((char *) net->hidden_prev_weights[i]);
}
hipFree((char *) net->hidden_weights);
hipFree((char *) net->hidden_prev_weights);
hipFree((char *) net);
#endif
}
| c9147626999de8c8a992abfe148bee7bb46e2107.cu | /*
******************************************************************
* HISTORY
* 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University
* Prepared for 15-681, Fall 1994.
* Modified by Shuai Che
******************************************************************
*/
#include <stdio.h>
#include <stdlib.h>
#include "backprop.h"
//#define CUDA_UVM
/*** Allocate 1d array of floats ***/
extern "C"
float *alloc_1d_dbl(unsigned long long n)
{
float *newmem;
#ifndef CUDA_UVM
newmem = (float *) malloc ((n * sizeof (float)));
#else
cudaMallocManaged((void**)&newmem, (n * sizeof (float)));
#endif
if (newmem == NULL) {
printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n");
return (NULL);
}
return newmem;
}
/*** Allocate 2d array of floats ***/
extern "C"
float **alloc_2d_dbl(unsigned long long m, unsigned long long n)
{
int i;
float **newmem;
float *newmem_content;
#ifndef CUDA_UVM
newmem = (float **) malloc ((m * sizeof (float *)));
newmem_content = (float *) malloc ((m * n * sizeof (float)));
if (newmem == NULL || newmem_content == NULL) {
printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n");
return (NULL);
}
for (i = 0; i < m; i++) {
//newmem[i] = alloc_1d_dbl(n);
newmem[i] = &newmem_content[i*n];
}
#else
cudaMallocManaged((void**)&newmem, (m * sizeof (float *)));
cudaMallocManaged((void**)&newmem_content, (m * n * sizeof (float)));
if (newmem == NULL || newmem_content == NULL) {
printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n");
return (NULL);
}
for (i = 0; i < m; i++) {
newmem[i] = &newmem_content[i*n];
}
#endif
return (newmem);
}
extern "C"
BPNN *bpnn_internal_create(int n_in, int n_hidden, int n_out)
{
BPNN *newnet;
#ifndef CUDA_UVM
newnet = (BPNN *) malloc (sizeof (BPNN));
#else
cudaMallocManaged((void**)&newnet, sizeof (BPNN));
#endif
if (newnet == NULL) {
printf("BPNN_CREATE: Couldn't allocate neural network\n");
return (NULL);
}
newnet->input_n = n_in;
newnet->hidden_n = n_hidden;
newnet->output_n = n_out;
newnet->input_units = alloc_1d_dbl(n_in + 1);
newnet->hidden_units = alloc_1d_dbl(n_hidden + 1);
newnet->output_units = alloc_1d_dbl(n_out + 1);
newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1);
newnet->output_delta = alloc_1d_dbl(n_out + 1);
newnet->target = alloc_1d_dbl(n_out + 1);
newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
return (newnet);
}
extern "C"
void bpnn_free(BPNN *net)
{
int n1, n2, i;
n1 = net->input_n;
n2 = net->hidden_n;
#ifndef CUDA_UVM
free((char *) net->input_units);
free((char *) net->hidden_units);
free((char *) net->output_units);
free((char *) net->hidden_delta);
free((char *) net->output_delta);
free((char *) net->target);
for (i = 0; i <= n1; i++) {
free((char *) net->input_weights[i]);
free((char *) net->input_prev_weights[i]);
}
free((char *) net->input_weights);
free((char *) net->input_prev_weights);
for (i = 0; i <= n2; i++) {
free((char *) net->hidden_weights[i]);
free((char *) net->hidden_prev_weights[i]);
}
free((char *) net->hidden_weights);
free((char *) net->hidden_prev_weights);
free((char *) net);
#else
cudaFree((char *) net->input_units);
cudaFree((char *) net->hidden_units);
cudaFree((char *) net->output_units);
cudaFree((char *) net->hidden_delta);
cudaFree((char *) net->output_delta);
cudaFree((char *) net->target);
for (i = 0; i <= n1; i++) {
cudaFree((char *) net->input_weights[i]);
cudaFree((char *) net->input_prev_weights[i]);
}
cudaFree((char *) net->input_weights);
cudaFree((char *) net->input_prev_weights);
for (i = 0; i <= n2; i++) {
cudaFree((char *) net->hidden_weights[i]);
cudaFree((char *) net->hidden_prev_weights[i]);
}
cudaFree((char *) net->hidden_weights);
cudaFree((char *) net->hidden_prev_weights);
cudaFree((char *) net);
#endif
}
|
29dd078b3dc7207464aeb64caa623c6cf83da446.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 12.06.2019
//
#include <ops/ops.h>
#include <ConstantTadHelper.h>
#include <PointersManager.h>
#include <ShapeUtils.h>
#include <ops/declarable/helpers/prefix.h>
namespace nd4j {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void prefixPerBlockCuda(scalar::Ops op,
const void* vx, const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets,
void* vz, const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets,
const Nd4jLong numTads, const Nd4jLong tadLen,
const bool exclusive, const bool reverse) {
__shared__ T *shared, lastElemInChunk;
__shared__ uint numTadChunks, blockDim2;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
shared = reinterpret_cast<T*>(shmem);
blockDim2 = 2 * blockDim.x;
numTadChunks = (tadLen + blockDim2 - 1) / blockDim2; // ceil
}
__syncthreads();
const auto xTad = reinterpret_cast<const T*>(vx) + xTadOffsets[blockIdx.x];
auto zTad = reinterpret_cast<T*>(vz) + zTadOffsets[blockIdx.x];
Nd4jLong sharedInd(2 * threadIdx.x), leftArrInd, rightArrInd, step;
T xLeft, xRight;
for (uint i = 0; i < numTadChunks; ++i) {
leftArrInd = sharedInd + i * blockDim2;
rightArrInd = leftArrInd + 1;
if(reverse) {
if(rightArrInd < tadLen) {
rightArrInd = tadLen - 1 - rightArrInd;
leftArrInd = tadLen - 1 - leftArrInd;
}
else if(leftArrInd < tadLen)
leftArrInd = tadLen - 1 - leftArrInd;
}
if(leftArrInd < tadLen)
shared[sharedInd] = xLeft = xTad[shape::getIndexOffset(leftArrInd, xTadShapeInfo, tadLen)];
// else
// shared[sharedInd] = (op == scalar::Add) ? 0 : 1;
if(rightArrInd < tadLen)
shared[sharedInd + 1] = xRight = xTad[shape::getIndexOffset(rightArrInd, xTadShapeInfo, tadLen)];
// else
// shared[sharedInd + 1] = (op == scalar::Add) ? 0 : 1;
step = 1;
for (uint d = blockDim.x; d > 0; d /= 2) {
__syncthreads();
if(threadIdx.x < d) {
uint left = step * (sharedInd + 1) - 1;
uint right = step * (sharedInd + 2) - 1;
shared[right] = (op == scalar::Add) ? (shared[right] + shared[left]) : (shared[right] * shared[left]);
}
step *= 2;
}
if (threadIdx.x == 0)
shared[blockDim2 - 1] = (op == scalar::Add) ? 0 : 1;
__syncthreads();
for (uint d = 1; d < blockDim2; d *= 2) {
step /= 2;
__syncthreads();
if(threadIdx.x < d) {
uint left = step * (sharedInd + 1) - 1;
uint right = step * (sharedInd + 2) - 1;
T temp = shared[left];
shared[left] = shared[right];
shared[right] = (op == scalar::Add) ? (shared[right] + temp) : (shared[right] * temp);
}
}
__syncthreads();
if(leftArrInd < tadLen) {
T result = shared[sharedInd];
if(!exclusive)
result = (op == scalar::Add) ? result + xLeft : result * xLeft;
if(i > 0)
result = (op == scalar::Add) ? result + lastElemInChunk : result * lastElemInChunk;
zTad[shape::getIndexOffset(leftArrInd, zTadShapeInfo, tadLen)] = result;
}
if(rightArrInd < tadLen) {
T result = shared[sharedInd + 1];
if(!exclusive)
result = (op == scalar::Add) ? result + xRight : result * xRight;
if(i > 0)
result = (op == scalar::Add) ? result + lastElemInChunk : result * lastElemInChunk;
if(i < numTadChunks - 1 && threadIdx.x == blockDim.x - 1) // last element in chunk
lastElemInChunk = !exclusive ? result : (op == scalar::Add) ? result + xRight : result * xRight;
zTad[shape::getIndexOffset(rightArrInd, zTadShapeInfo, tadLen)] = result;
}
}
}
///////////////////////////////////////////////////////////////////
template<typename X>
static void prefixPerBlockCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
scalar::Ops op,
const void* vx, const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets,
void* vz, const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets,
const Nd4jLong numTads, const Nd4jLong tadLen,
const bool exclusive, const bool reverse) {
hipLaunchKernelGGL(( prefixPerBlockCuda<X>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, op, vx, xTadShapeInfo, xTadOffsets, vz, zTadShapeInfo, zTadOffsets, numTads, tadLen, exclusive, reverse);
}
///////////////////////////////////////////////////////////////////
void prefix(nd4j::LaunchContext * context, scalar::Ops op, const NDArray* x, NDArray* z, const std::vector<int>& dims, bool exclusive, bool reverse) {
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(x->getShapeInfo(), dims);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(z->getShapeInfo(), dims);
const Nd4jLong numTads = packX.numberOfTads();
const Nd4jLong tadLen = x->lengthOf() / numTads;
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = numTads;
const int sharedMem = 2 * threadsPerBlock * x->sizeOfT() + 128;
PointersManager manager(context, "prefix");
NDArray::prepareSpecialUse({z}, {x});
BUILD_SINGLE_SELECTOR(x->dataType(), prefixPerBlockCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), op, x->getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), z->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), numTads, tadLen, exclusive, reverse), NUMERIC_TYPES);
NDArray::registerSpecialUse({z}, {x});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
void prefix(nd4j::LaunchContext * context, scalar::Ops op, const NDArray* x, NDArray* z, bool exclusive, bool reverse) {
prefix(context, op, x, z, {}, exclusive, reverse);
}
}
}
} | 29dd078b3dc7207464aeb64caa623c6cf83da446.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 12.06.2019
//
#include <ops/ops.h>
#include <ConstantTadHelper.h>
#include <PointersManager.h>
#include <ShapeUtils.h>
#include <ops/declarable/helpers/prefix.h>
namespace nd4j {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void prefixPerBlockCuda(scalar::Ops op,
const void* vx, const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets,
void* vz, const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets,
const Nd4jLong numTads, const Nd4jLong tadLen,
const bool exclusive, const bool reverse) {
__shared__ T *shared, lastElemInChunk;
__shared__ uint numTadChunks, blockDim2;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
shared = reinterpret_cast<T*>(shmem);
blockDim2 = 2 * blockDim.x;
numTadChunks = (tadLen + blockDim2 - 1) / blockDim2; // ceil
}
__syncthreads();
const auto xTad = reinterpret_cast<const T*>(vx) + xTadOffsets[blockIdx.x];
auto zTad = reinterpret_cast<T*>(vz) + zTadOffsets[blockIdx.x];
Nd4jLong sharedInd(2 * threadIdx.x), leftArrInd, rightArrInd, step;
T xLeft, xRight;
for (uint i = 0; i < numTadChunks; ++i) {
leftArrInd = sharedInd + i * blockDim2;
rightArrInd = leftArrInd + 1;
if(reverse) {
if(rightArrInd < tadLen) {
rightArrInd = tadLen - 1 - rightArrInd;
leftArrInd = tadLen - 1 - leftArrInd;
}
else if(leftArrInd < tadLen)
leftArrInd = tadLen - 1 - leftArrInd;
}
if(leftArrInd < tadLen)
shared[sharedInd] = xLeft = xTad[shape::getIndexOffset(leftArrInd, xTadShapeInfo, tadLen)];
// else
// shared[sharedInd] = (op == scalar::Add) ? 0 : 1;
if(rightArrInd < tadLen)
shared[sharedInd + 1] = xRight = xTad[shape::getIndexOffset(rightArrInd, xTadShapeInfo, tadLen)];
// else
// shared[sharedInd + 1] = (op == scalar::Add) ? 0 : 1;
step = 1;
for (uint d = blockDim.x; d > 0; d /= 2) {
__syncthreads();
if(threadIdx.x < d) {
uint left = step * (sharedInd + 1) - 1;
uint right = step * (sharedInd + 2) - 1;
shared[right] = (op == scalar::Add) ? (shared[right] + shared[left]) : (shared[right] * shared[left]);
}
step *= 2;
}
if (threadIdx.x == 0)
shared[blockDim2 - 1] = (op == scalar::Add) ? 0 : 1;
__syncthreads();
for (uint d = 1; d < blockDim2; d *= 2) {
step /= 2;
__syncthreads();
if(threadIdx.x < d) {
uint left = step * (sharedInd + 1) - 1;
uint right = step * (sharedInd + 2) - 1;
T temp = shared[left];
shared[left] = shared[right];
shared[right] = (op == scalar::Add) ? (shared[right] + temp) : (shared[right] * temp);
}
}
__syncthreads();
if(leftArrInd < tadLen) {
T result = shared[sharedInd];
if(!exclusive)
result = (op == scalar::Add) ? result + xLeft : result * xLeft;
if(i > 0)
result = (op == scalar::Add) ? result + lastElemInChunk : result * lastElemInChunk;
zTad[shape::getIndexOffset(leftArrInd, zTadShapeInfo, tadLen)] = result;
}
if(rightArrInd < tadLen) {
T result = shared[sharedInd + 1];
if(!exclusive)
result = (op == scalar::Add) ? result + xRight : result * xRight;
if(i > 0)
result = (op == scalar::Add) ? result + lastElemInChunk : result * lastElemInChunk;
if(i < numTadChunks - 1 && threadIdx.x == blockDim.x - 1) // last element in chunk
lastElemInChunk = !exclusive ? result : (op == scalar::Add) ? result + xRight : result * xRight;
zTad[shape::getIndexOffset(rightArrInd, zTadShapeInfo, tadLen)] = result;
}
}
}
///////////////////////////////////////////////////////////////////
template<typename X>
static void prefixPerBlockCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
scalar::Ops op,
const void* vx, const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets,
void* vz, const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets,
const Nd4jLong numTads, const Nd4jLong tadLen,
const bool exclusive, const bool reverse) {
prefixPerBlockCuda<X><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(op, vx, xTadShapeInfo, xTadOffsets, vz, zTadShapeInfo, zTadOffsets, numTads, tadLen, exclusive, reverse);
}
///////////////////////////////////////////////////////////////////
void prefix(nd4j::LaunchContext * context, scalar::Ops op, const NDArray* x, NDArray* z, const std::vector<int>& dims, bool exclusive, bool reverse) {
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(x->getShapeInfo(), dims);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(z->getShapeInfo(), dims);
const Nd4jLong numTads = packX.numberOfTads();
const Nd4jLong tadLen = x->lengthOf() / numTads;
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = numTads;
const int sharedMem = 2 * threadsPerBlock * x->sizeOfT() + 128;
PointersManager manager(context, "prefix");
NDArray::prepareSpecialUse({z}, {x});
BUILD_SINGLE_SELECTOR(x->dataType(), prefixPerBlockCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), op, x->getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), z->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), numTads, tadLen, exclusive, reverse), NUMERIC_TYPES);
NDArray::registerSpecialUse({z}, {x});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
void prefix(nd4j::LaunchContext * context, scalar::Ops op, const NDArray* x, NDArray* z, bool exclusive, bool reverse) {
prefix(context, op, x, z, {}, exclusive, reverse);
}
}
}
} |
a2faeafe7da04ac5b4adb8d451d252bfb28b726b.hip | // !!! This is a file automatically generated by hipify!!!
#include "BufferedRandomNumberGenerator.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void generateRandomInts(unsigned int seed, int* result) {
hiprandState_t state;
hiprand_init(seed, /* the seed controls the sequence of random values that are produced */
blockIdx.x, /* the sequence number is only important with multiple cores */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&state);
*result = hiprand(&state);
}
/*
void CudaRNG::fillBuffer(int * buffer, int bufferSize) {
//BufferedRandomNumberGenerator::mtx.lock();
int* gpu_x;
hipMalloc((void**)&gpu_x, sizeof(int));
//generateRandomInts<<<1, 1 >>>(123, gpu_x);
int x;
hipMemcpy(&x, gpu_x, sizeof(int), hipMemcpyDeviceToHost);
printf("Random number = %d.\n", x);
hipFree(gpu_x);
//BufferedRandomNumberGenerator::mtx.unlock();
}*/ | a2faeafe7da04ac5b4adb8d451d252bfb28b726b.cu | #include "BufferedRandomNumberGenerator.h"
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void generateRandomInts(unsigned int seed, int* result) {
curandState_t state;
curand_init(seed, /* the seed controls the sequence of random values that are produced */
blockIdx.x, /* the sequence number is only important with multiple cores */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&state);
*result = curand(&state);
}
/*
void CudaRNG::fillBuffer(int * buffer, int bufferSize) {
//BufferedRandomNumberGenerator::mtx.lock();
int* gpu_x;
cudaMalloc((void**)&gpu_x, sizeof(int));
//generateRandomInts<<<1, 1 >>>(123, gpu_x);
int x;
cudaMemcpy(&x, gpu_x, sizeof(int), cudaMemcpyDeviceToHost);
printf("Random number = %d.\n", x);
cudaFree(gpu_x);
//BufferedRandomNumberGenerator::mtx.unlock();
}*/ |
a4c455f709eac581399f3adf11f3bf3354565a84.hip | // !!! This is a file automatically generated by hipify!!!
/*------------------------------------------------------------------------------
Copyright 2015 by Nicola Bombieri
H-BF is provided under the terms of The MIT License (MIT):
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------*/
/**
* @author Federico Busato
* Univerity of Verona, Dept. of Computer Science
* federico.busato@univr.it
*/
#include <iostream>
#include <string>
#include <roctracer/roctx.h>
#include "../include/cuda_util.cuh"
namespace cuda_util {
void memCheckCUDA(size_t Req) {
size_t free, total;
hipMemGetInfo(&free, &total);
if (free > Req)
__ERROR("MEMORY TOO LOW. Req: " << (float) Req / (1<<20) <<
" MB, available: " << (float) Req / (1<<20) << " MB");
}
bool memInfoCUDA(size_t Req) {
size_t free, total;
hipMemGetInfo(&free, &total);
std::cout << " Total Device Memory:\t" << (total >> 20) << " MB"
<< std::endl
<< " Free Device Memory:\t" << (free >> 20) << " MB"
<< std::endl
<< "Requested Device memory:\t" << (Req >> 20) << " MB"
<< "\t(" << ((Req >> 20) * 100) / (total >> 20) << "%)"
<< std::endl << std::endl;
return free > Req;
}
int deviceProperty::NUM_OF_STREAMING_MULTIPROCESSOR = 0;
int deviceProperty::getNum_of_SMs() {
if(NUM_OF_STREAMING_MULTIPROCESSOR == 0) {
hipDeviceProp_t devProperty;
hipGetDeviceProperties(&devProperty, 0);
NUM_OF_STREAMING_MULTIPROCESSOR = devProperty.multiProcessorCount;
}
return NUM_OF_STREAMING_MULTIPROCESSOR;
}
namespace NVTX {
/*void PushRange(std::string s, const int color) {
nvtxEventAttributes_t eventAttrib = {};
eventAttrib.version = NVTX_VERSION;
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
eventAttrib.colorType = NVTX_COLOR_ARGB;
eventAttrib.color = color; //colors[color_id];
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
eventAttrib.message.ascii = s.c_str();
nvtxRangePushEx(&eventAttrib);
}
void PopRange() {
roctxRangePop();
}*/
}
void cudaStatics() {
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
__CUDA_ERROR("statics");
std::cout << std::endl
<< "\t Graphic Card: " << devProp.name << " (cc: " << devProp.major
<< "." << devProp.minor << ")" << std::endl
<< "\t # SM: " << devProp.multiProcessorCount
<< "\t Threads per SM: " << devProp.maxThreadsPerMultiProcessor
<< "\t Max Resident Thread: " << devProp.multiProcessorCount *
devProp.maxThreadsPerMultiProcessor << std::endl
<< "\t Global Mem.: " << devProp.totalGlobalMem / (1 << 20) << " MB"
<< "\t Shared Mem.: " << devProp.sharedMemPerBlock / 1024 << " KB"
<< "\t L2 Cache: " << devProp.l2CacheSize / 1024
<< " KB" << std::endl
<< "\tsmemPerThreads: " << devProp.sharedMemPerBlock /
devProp.maxThreadsPerMultiProcessor << " Byte"
<< "\t regsPerThreads: " << devProp.regsPerBlock /
devProp.maxThreadsPerMultiProcessor
<< "\t regsPerSM: " << devProp.regsPerBlock << std::endl
<< std::endl;
#if defined(SM)
if (devProp.multiProcessorCount != SM)
__ERROR("Wrong SM configuration: " << devProp.multiProcessorCount
<< " vs. " << SM)
#endif
}
} // @CudaUtil
| a4c455f709eac581399f3adf11f3bf3354565a84.cu | /*------------------------------------------------------------------------------
Copyright © 2015 by Nicola Bombieri
H-BF is provided under the terms of The MIT License (MIT):
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------*/
/**
* @author Federico Busato
* Univerity of Verona, Dept. of Computer Science
* federico.busato@univr.it
*/
#include <iostream>
#include <string>
#include <nvToolsExt.h>
#include "../include/cuda_util.cuh"
namespace cuda_util {
void memCheckCUDA(size_t Req) {
size_t free, total;
cudaMemGetInfo(&free, &total);
if (free > Req)
__ERROR("MEMORY TOO LOW. Req: " << (float) Req / (1<<20) <<
" MB, available: " << (float) Req / (1<<20) << " MB");
}
bool memInfoCUDA(size_t Req) {
size_t free, total;
cudaMemGetInfo(&free, &total);
std::cout << " Total Device Memory:\t" << (total >> 20) << " MB"
<< std::endl
<< " Free Device Memory:\t" << (free >> 20) << " MB"
<< std::endl
<< "Requested Device memory:\t" << (Req >> 20) << " MB"
<< "\t(" << ((Req >> 20) * 100) / (total >> 20) << "%)"
<< std::endl << std::endl;
return free > Req;
}
int deviceProperty::NUM_OF_STREAMING_MULTIPROCESSOR = 0;
int deviceProperty::getNum_of_SMs() {
if(NUM_OF_STREAMING_MULTIPROCESSOR == 0) {
cudaDeviceProp devProperty;
cudaGetDeviceProperties(&devProperty, 0);
NUM_OF_STREAMING_MULTIPROCESSOR = devProperty.multiProcessorCount;
}
return NUM_OF_STREAMING_MULTIPROCESSOR;
}
namespace NVTX {
/*void PushRange(std::string s, const int color) {
nvtxEventAttributes_t eventAttrib = {};
eventAttrib.version = NVTX_VERSION;
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
eventAttrib.colorType = NVTX_COLOR_ARGB;
eventAttrib.color = color; //colors[color_id];
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
eventAttrib.message.ascii = s.c_str();
nvtxRangePushEx(&eventAttrib);
}
void PopRange() {
nvtxRangePop();
}*/
}
void cudaStatics() {
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
__CUDA_ERROR("statics");
std::cout << std::endl
<< "\t Graphic Card: " << devProp.name << " (cc: " << devProp.major
<< "." << devProp.minor << ")" << std::endl
<< "\t # SM: " << devProp.multiProcessorCount
<< "\t Threads per SM: " << devProp.maxThreadsPerMultiProcessor
<< "\t Max Resident Thread: " << devProp.multiProcessorCount *
devProp.maxThreadsPerMultiProcessor << std::endl
<< "\t Global Mem.: " << devProp.totalGlobalMem / (1 << 20) << " MB"
<< "\t Shared Mem.: " << devProp.sharedMemPerBlock / 1024 << " KB"
<< "\t L2 Cache: " << devProp.l2CacheSize / 1024
<< " KB" << std::endl
<< "\tsmemPerThreads: " << devProp.sharedMemPerBlock /
devProp.maxThreadsPerMultiProcessor << " Byte"
<< "\t regsPerThreads: " << devProp.regsPerBlock /
devProp.maxThreadsPerMultiProcessor
<< "\t regsPerSM: " << devProp.regsPerBlock << std::endl
<< std::endl;
#if defined(SM)
if (devProp.multiProcessorCount != SM)
__ERROR("Wrong SM configuration: " << devProp.multiProcessorCount
<< " vs. " << SM)
#endif
}
} // @CudaUtil
|
9399a5b3d2a23196f2b73b42ebb2e92c7eff0318.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief The cugraph Jaccard core functionality
*
* @file jaccard.cu
* ---------------------------------------------------------------------------**/
#include "utilities/graph_utils.cuh"
#include "graph.hpp"
#include "rmm_utils.h"
#include "utilities/error_utils.h"
namespace cugraph {
namespace detail {
// Volume of neighboors (*weight_s)
template<bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_row_sum(vertex_t n,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *v,
weight_t *work) {
vertex_t row;
edge_t start, end, length;
weight_t sum;
for (row = threadIdx.y + blockIdx.y * blockDim.y;
row < n;
row += gridDim.y * blockDim.y) {
start = csrPtr[row];
end = csrPtr[row + 1];
length = end - start;
//compute row sums
if (weighted) {
sum = parallel_prefix_sum(length, csrInd + start, v);
if (threadIdx.x == 0)
work[row] = sum;
} else {
work[row] = static_cast<weight_t>(length);
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
template<bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_is(vertex_t n,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *v,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s) {
edge_t i, j, Ni, Nj;
vertex_t row, col;
vertex_t ref, cur, ref_col, cur_col, match;
weight_t ref_val;
for (row = threadIdx.z + blockIdx.z * blockDim.z ;
row < n ;
row += gridDim.z * blockDim.z) {
for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y ;
j < csrPtr[row + 1] ;
j += gridDim.y * blockDim.y) {
col = csrInd[j];
//find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[j] = work[row] + work[col];
//compute new intersection weights
//search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x ;
i < csrPtr[ref + 1] ;
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
//binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
}
else if (cur_col < ref_col) {
left = middle + 1;
}
else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1) {
atomicAdd(&weight_i[j], ref_val);
}
}
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// Using list of node pairs
template<bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_is_pairs(edge_t num_pairs,
edge_t const *csrPtr,
vertex_t const *csrInd,
vertex_t const *first_pair,
vertex_t const *second_pair,
weight_t const *v,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s) {
edge_t i, idx, Ni, Nj, match;
vertex_t row, col, ref, cur, ref_col, cur_col;
weight_t ref_val;
for (idx = threadIdx.z + blockIdx.z * blockDim.z ;
idx < num_pairs ;
idx += gridDim.z * blockDim.z) {
row = first_pair[idx];
col = second_pair[idx];
//find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[idx] = work[row] + work[col];
//compute new intersection weights
//search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x ;
i < csrPtr[ref + 1] ;
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
//binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
} else if (cur_col < ref_col) {
left = middle + 1;
} else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1) {
atomicAdd(&weight_i[idx], ref_val);
}
}
}
}
//Jaccard weights (*weight)
template<bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_jw(edge_t e,
weight_t const *weight_i,
weight_t const *weight_s,
weight_t *weight_j) {
edge_t j;
weight_t Wi, Ws, Wu;
for (j = threadIdx.x + blockIdx.x * blockDim.x ;
j < e ;
j += gridDim.x * blockDim.x) {
Wi = weight_i[j];
Ws = weight_s[j];
Wu = Ws - Wi;
weight_j[j] = (Wi / Wu);
}
}
template<bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int jaccard(vertex_t n,
edge_t e,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *weight_in,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j) {
dim3 nthreads, nblocks;
int y = 4;
//setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
//launch kernel
hipLaunchKernelGGL(( jaccard_row_sum<weighted, vertex_t, edge_t, weight_t>) , dim3(nblocks), dim3(nthreads), 0, 0, n,
csrPtr,
csrInd,
weight_in,
work);
hipDeviceSynchronize();
fill(e, weight_i, weight_t{0.0});
//setup launch configuration
nthreads.x = 32 / y;
nthreads.y = y;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); //1;
//launch kernel
hipLaunchKernelGGL(( jaccard_is<weighted, vertex_t, edge_t, weight_t>) , dim3(nblocks), dim3(nthreads), 0, 0, n,
csrPtr,
csrInd,
weight_in,
work,
weight_i,
weight_s);
//setup launch configuration
nthreads.x = min(e, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((e + nthreads.x - 1) / nthreads.x, edge_t{CUDA_MAX_BLOCKS});
nblocks.y = 1;
nblocks.z = 1;
//launch kernel
hipLaunchKernelGGL(( jaccard_jw<weighted, vertex_t, edge_t, weight_t>) , dim3(nblocks), dim3(nthreads), 0, 0, e,
weight_i,
weight_s,
weight_j);
return 0;
}
template<bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int jaccard_pairs(vertex_t n,
edge_t num_pairs,
edge_t const *csrPtr,
vertex_t const *csrInd,
vertex_t const *first_pair,
vertex_t const *second_pair,
weight_t const *weight_in,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j) {
dim3 nthreads, nblocks;
int y = 4;
//setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
//launch kernel
hipLaunchKernelGGL(( jaccard_row_sum<weighted, vertex_t, edge_t, weight_t>) , dim3(nblocks), dim3(nthreads), 0, 0, n,
csrPtr,
csrInd,
weight_in,
work);
hipDeviceSynchronize();
// NOTE: initilized weight_i vector with 0.0
//fill(num_pairs, weight_i, weight_t{0.0});
//setup launch configuration
nthreads.x = 32;
nthreads.y = 1;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); //1;
//launch kernel
hipLaunchKernelGGL(( jaccard_is_pairs<weighted, vertex_t, edge_t, weight_t>) , dim3(nblocks), dim3(nthreads), 0, 0, num_pairs,
csrPtr,
csrInd,
first_pair,
second_pair,
weight_in,
work,
weight_i,
weight_s);
//setup launch configuration
nthreads.x = min(num_pairs, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, (edge_t) CUDA_MAX_BLOCKS);
nblocks.y = 1;
nblocks.z = 1;
//launch kernel
hipLaunchKernelGGL(( jaccard_jw<weighted, vertex_t, edge_t, weight_t>) , dim3(nblocks), dim3(nthreads), 0, 0, num_pairs,
weight_i,
weight_s,
weight_j);
return 0;
}
} //namespace detail
template <typename VT, typename ET, typename WT>
void jaccard(experimental::GraphCSR<VT,ET,WT> const &graph,
WT const *weights,
WT *result) {
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter: result pointer is NULL");
rmm::device_vector<WT> weight_i(graph.number_of_edges);
rmm::device_vector<WT> weight_s(graph.number_of_edges);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::jaccard<false, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::jaccard<true, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template <typename VT, typename ET, typename WT>
void jaccard_list(experimental::GraphCSR<VT,ET,WT> const &graph,
WT const *weights,
ET num_pairs,
VT const *first,
VT const *second,
WT *result) {
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter: result pointer is NULL");
CUGRAPH_EXPECTS(first != nullptr, "Invalid API parameter: first is NULL");
CUGRAPH_EXPECTS(second != nullptr, "Invalid API parameter: second in NULL");
rmm::device_vector<WT> weight_i(num_pairs, WT{0.0});
rmm::device_vector<WT> weight_s(num_pairs);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::jaccard_pairs<false, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::jaccard_pairs<true, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template void jaccard<int32_t, int32_t, float>(experimental::GraphCSR<int32_t,int32_t,float> const &, float const *, float *);
template void jaccard<int32_t, int32_t, double>(experimental::GraphCSR<int32_t,int32_t,double> const &, double const *, double *);
template void jaccard<int64_t, int64_t, float>(experimental::GraphCSR<int64_t,int64_t,float> const &, float const *, float *);
template void jaccard<int64_t, int64_t, double>(experimental::GraphCSR<int64_t,int64_t,double> const &, double const *, double *);
template void jaccard_list<int32_t, int32_t, float>(experimental::GraphCSR<int32_t,int32_t,float> const &, float const *, int32_t, int32_t const *, int32_t const *, float *);
template void jaccard_list<int32_t, int32_t, double>(experimental::GraphCSR<int32_t,int32_t,double> const &, double const *, int32_t, int32_t const *, int32_t const *, double *);
template void jaccard_list<int64_t, int64_t, float>(experimental::GraphCSR<int64_t,int64_t,float> const &, float const *, int64_t, int64_t const *, int64_t const *, float *);
template void jaccard_list<int64_t, int64_t, double>(experimental::GraphCSR<int64_t,int64_t,double> const &, double const *, int64_t, int64_t const *, int64_t const *, double *);
} //namespace cugraph
| 9399a5b3d2a23196f2b73b42ebb2e92c7eff0318.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief The cugraph Jaccard core functionality
*
* @file jaccard.cu
* ---------------------------------------------------------------------------**/
#include "utilities/graph_utils.cuh"
#include "graph.hpp"
#include "rmm_utils.h"
#include "utilities/error_utils.h"
namespace cugraph {
namespace detail {
// Volume of neighboors (*weight_s)
template<bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_row_sum(vertex_t n,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *v,
weight_t *work) {
vertex_t row;
edge_t start, end, length;
weight_t sum;
for (row = threadIdx.y + blockIdx.y * blockDim.y;
row < n;
row += gridDim.y * blockDim.y) {
start = csrPtr[row];
end = csrPtr[row + 1];
length = end - start;
//compute row sums
if (weighted) {
sum = parallel_prefix_sum(length, csrInd + start, v);
if (threadIdx.x == 0)
work[row] = sum;
} else {
work[row] = static_cast<weight_t>(length);
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
template<bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_is(vertex_t n,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *v,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s) {
edge_t i, j, Ni, Nj;
vertex_t row, col;
vertex_t ref, cur, ref_col, cur_col, match;
weight_t ref_val;
for (row = threadIdx.z + blockIdx.z * blockDim.z ;
row < n ;
row += gridDim.z * blockDim.z) {
for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y ;
j < csrPtr[row + 1] ;
j += gridDim.y * blockDim.y) {
col = csrInd[j];
//find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[j] = work[row] + work[col];
//compute new intersection weights
//search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x ;
i < csrPtr[ref + 1] ;
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
//binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
}
else if (cur_col < ref_col) {
left = middle + 1;
}
else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1) {
atomicAdd(&weight_i[j], ref_val);
}
}
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// Using list of node pairs
template<bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_is_pairs(edge_t num_pairs,
edge_t const *csrPtr,
vertex_t const *csrInd,
vertex_t const *first_pair,
vertex_t const *second_pair,
weight_t const *v,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s) {
edge_t i, idx, Ni, Nj, match;
vertex_t row, col, ref, cur, ref_col, cur_col;
weight_t ref_val;
for (idx = threadIdx.z + blockIdx.z * blockDim.z ;
idx < num_pairs ;
idx += gridDim.z * blockDim.z) {
row = first_pair[idx];
col = second_pair[idx];
//find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[idx] = work[row] + work[col];
//compute new intersection weights
//search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x ;
i < csrPtr[ref + 1] ;
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
//binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
} else if (cur_col < ref_col) {
left = middle + 1;
} else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1) {
atomicAdd(&weight_i[idx], ref_val);
}
}
}
}
//Jaccard weights (*weight)
template<bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_jw(edge_t e,
weight_t const *weight_i,
weight_t const *weight_s,
weight_t *weight_j) {
edge_t j;
weight_t Wi, Ws, Wu;
for (j = threadIdx.x + blockIdx.x * blockDim.x ;
j < e ;
j += gridDim.x * blockDim.x) {
Wi = weight_i[j];
Ws = weight_s[j];
Wu = Ws - Wi;
weight_j[j] = (Wi / Wu);
}
}
template<bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int jaccard(vertex_t n,
edge_t e,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *weight_in,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j) {
dim3 nthreads, nblocks;
int y = 4;
//setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
//launch kernel
jaccard_row_sum<weighted, vertex_t, edge_t, weight_t> <<<nblocks, nthreads>>>(n,
csrPtr,
csrInd,
weight_in,
work);
cudaDeviceSynchronize();
fill(e, weight_i, weight_t{0.0});
//setup launch configuration
nthreads.x = 32 / y;
nthreads.y = y;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); //1;
//launch kernel
jaccard_is<weighted, vertex_t, edge_t, weight_t> <<<nblocks, nthreads>>>(n,
csrPtr,
csrInd,
weight_in,
work,
weight_i,
weight_s);
//setup launch configuration
nthreads.x = min(e, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((e + nthreads.x - 1) / nthreads.x, edge_t{CUDA_MAX_BLOCKS});
nblocks.y = 1;
nblocks.z = 1;
//launch kernel
jaccard_jw<weighted, vertex_t, edge_t, weight_t> <<<nblocks, nthreads>>>(e,
weight_i,
weight_s,
weight_j);
return 0;
}
template<bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int jaccard_pairs(vertex_t n,
edge_t num_pairs,
edge_t const *csrPtr,
vertex_t const *csrInd,
vertex_t const *first_pair,
vertex_t const *second_pair,
weight_t const *weight_in,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j) {
dim3 nthreads, nblocks;
int y = 4;
//setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
//launch kernel
jaccard_row_sum<weighted, vertex_t, edge_t, weight_t> <<<nblocks, nthreads>>>(n,
csrPtr,
csrInd,
weight_in,
work);
cudaDeviceSynchronize();
// NOTE: initilized weight_i vector with 0.0
//fill(num_pairs, weight_i, weight_t{0.0});
//setup launch configuration
nthreads.x = 32;
nthreads.y = 1;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); //1;
//launch kernel
jaccard_is_pairs<weighted, vertex_t, edge_t, weight_t> <<<nblocks, nthreads>>>(num_pairs,
csrPtr,
csrInd,
first_pair,
second_pair,
weight_in,
work,
weight_i,
weight_s);
//setup launch configuration
nthreads.x = min(num_pairs, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, (edge_t) CUDA_MAX_BLOCKS);
nblocks.y = 1;
nblocks.z = 1;
//launch kernel
jaccard_jw<weighted, vertex_t, edge_t, weight_t> <<<nblocks, nthreads>>>(num_pairs,
weight_i,
weight_s,
weight_j);
return 0;
}
} //namespace detail
template <typename VT, typename ET, typename WT>
void jaccard(experimental::GraphCSR<VT,ET,WT> const &graph,
WT const *weights,
WT *result) {
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter: result pointer is NULL");
rmm::device_vector<WT> weight_i(graph.number_of_edges);
rmm::device_vector<WT> weight_s(graph.number_of_edges);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::jaccard<false, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::jaccard<true, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template <typename VT, typename ET, typename WT>
void jaccard_list(experimental::GraphCSR<VT,ET,WT> const &graph,
WT const *weights,
ET num_pairs,
VT const *first,
VT const *second,
WT *result) {
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter: result pointer is NULL");
CUGRAPH_EXPECTS(first != nullptr, "Invalid API parameter: first is NULL");
CUGRAPH_EXPECTS(second != nullptr, "Invalid API parameter: second in NULL");
rmm::device_vector<WT> weight_i(num_pairs, WT{0.0});
rmm::device_vector<WT> weight_s(num_pairs);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::jaccard_pairs<false, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::jaccard_pairs<true, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template void jaccard<int32_t, int32_t, float>(experimental::GraphCSR<int32_t,int32_t,float> const &, float const *, float *);
template void jaccard<int32_t, int32_t, double>(experimental::GraphCSR<int32_t,int32_t,double> const &, double const *, double *);
template void jaccard<int64_t, int64_t, float>(experimental::GraphCSR<int64_t,int64_t,float> const &, float const *, float *);
template void jaccard<int64_t, int64_t, double>(experimental::GraphCSR<int64_t,int64_t,double> const &, double const *, double *);
template void jaccard_list<int32_t, int32_t, float>(experimental::GraphCSR<int32_t,int32_t,float> const &, float const *, int32_t, int32_t const *, int32_t const *, float *);
template void jaccard_list<int32_t, int32_t, double>(experimental::GraphCSR<int32_t,int32_t,double> const &, double const *, int32_t, int32_t const *, int32_t const *, double *);
template void jaccard_list<int64_t, int64_t, float>(experimental::GraphCSR<int64_t,int64_t,float> const &, float const *, int64_t, int64_t const *, int64_t const *, float *);
template void jaccard_list<int64_t, int64_t, double>(experimental::GraphCSR<int64_t,int64_t,double> const &, double const *, int64_t, int64_t const *, int64_t const *, double *);
} //namespace cugraph
|
36d661ef042b8fe784731a841cc63d78bc1acdeb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "gru_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "blas.h"
}
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
void forward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
l.forward_gpu(l, state);
state.input = l.output_gpu;
}
}
void backward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
forward_network_gpu(net, state);
backward_network_gpu(net, state);
}
float train_network_datum_gpu(network net, float *x, float *y)
{
*net.seen += net.batch;
forward_backward_network_gpu(net, x, y);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
//printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
//printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//hipDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//hipDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
state.net = net;
state.input = cuda_make_array(input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
cuda_free(state.input);
return out;
}
| 36d661ef042b8fe784731a841cc63d78bc1acdeb.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "gru_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "blas.h"
}
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
void forward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
l.forward_gpu(l, state);
state.input = l.output_gpu;
}
}
void backward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
forward_network_gpu(net, state);
backward_network_gpu(net, state);
}
float train_network_datum_gpu(network net, float *x, float *y)
{
*net.seen += net.batch;
forward_backward_network_gpu(net, x, y);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
//printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
//printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//cudaDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//cudaDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
state.net = net;
state.input = cuda_make_array(input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
cuda_free(state.input);
return out;
}
|
54937efd9ce53a95692cc42877d60707ea2e740c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/fractionalmaxpool3dgradwithfixedksize_impl.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
template <typename T>
__global__ void InitOutput(T *output, const int64_t outer_size) {
T zero = 0;
for (size_t id = blockIdx.x * blockDim.x + threadIdx.x; id < outer_size; id += blockDim.x * gridDim.x) {
output[id] = zero;
}
return;
}
template <typename T, typename S>
__global__ void Fractionalmaxpool3dgradwithfixedksize(const T *origin_input, const T *out_backprop, S *argmax,
T *output, int64_t outputD, int64_t outputH, int64_t outputW,
int64_t N, int64_t C, int64_t inputD, int64_t inputH,
int64_t inputW, const int64_t out_backprop_size) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < out_backprop_size; pos += blockDim.x * gridDim.x) {
const int posn = pos / (C * outputD * outputH * outputW);
const int posc = pos / (outputD * outputH * outputW) % C;
S maxind = argmax[pos];
MsAtomicAdd(output + (posn * C + posc) * inputD * inputH * inputW + maxind, out_backprop[pos]);
return;
}
}
template <typename T, typename S>
void CalFractionalmaxpool3dgradwithfixedksize(const T *origin_input, const T *out_backprop, S *argmax, T *output,
int64_t outputD, int64_t outputH, int64_t outputW, int64_t inputN,
int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size,
const uint32_t &device_id, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( InitOutput), dim3(CUDA_BLOCKS(device_id, outer_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, output, outer_size);
hipLaunchKernelGGL(( Fractionalmaxpool3dgradwithfixedksize), dim3(CUDA_BLOCKS(device_id, out_backprop_size)), dim3(CUDA_THREADS(device_id)), 0,
cuda_stream, origin_input, out_backprop, argmax, output, outputD, outputH,
outputW, inputN, inputC, inputD, inputH, inputW,
out_backprop_size);
return;
}
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<half, int32_t>(
const half *origin_input, const half *out_backprop, int32_t *argmax, half *output, int64_t outputD, int64_t outputH,
int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<float, int32_t>(
const float *origin_input, const float *out_backprop, int32_t *argmax, float *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<double, int32_t>(
const double *origin_input, const double *out_backprop, int32_t *argmax, double *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<int32_t, int32_t>(
const int32_t *origin_input, const int32_t *out_backprop, int32_t *argmax, int32_t *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<int64_t, int32_t>(
const int64_t *origin_input, const int64_t *out_backprop, int32_t *argmax, int64_t *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<half, int64_t>(
const half *origin_input, const half *out_backprop, int64_t *argmax, half *output, int64_t outputD, int64_t outputH,
int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<float, int64_t>(
const float *origin_input, const float *out_backprop, int64_t *argmax, float *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<double, int64_t>(
const double *origin_input, const double *out_backprop, int64_t *argmax, double *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<int32_t, int64_t>(
const int32_t *origin_input, const int32_t *out_backprop, int64_t *argmax, int32_t *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<int64_t, int64_t>(
const int64_t *origin_input, const int64_t *out_backprop, int64_t *argmax, int64_t *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, hipStream_t cuda_stream);
| 54937efd9ce53a95692cc42877d60707ea2e740c.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/fractionalmaxpool3dgradwithfixedksize_impl.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
template <typename T>
__global__ void InitOutput(T *output, const int64_t outer_size) {
T zero = 0;
for (size_t id = blockIdx.x * blockDim.x + threadIdx.x; id < outer_size; id += blockDim.x * gridDim.x) {
output[id] = zero;
}
return;
}
template <typename T, typename S>
__global__ void Fractionalmaxpool3dgradwithfixedksize(const T *origin_input, const T *out_backprop, S *argmax,
T *output, int64_t outputD, int64_t outputH, int64_t outputW,
int64_t N, int64_t C, int64_t inputD, int64_t inputH,
int64_t inputW, const int64_t out_backprop_size) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < out_backprop_size; pos += blockDim.x * gridDim.x) {
const int posn = pos / (C * outputD * outputH * outputW);
const int posc = pos / (outputD * outputH * outputW) % C;
S maxind = argmax[pos];
MsAtomicAdd(output + (posn * C + posc) * inputD * inputH * inputW + maxind, out_backprop[pos]);
return;
}
}
template <typename T, typename S>
void CalFractionalmaxpool3dgradwithfixedksize(const T *origin_input, const T *out_backprop, S *argmax, T *output,
int64_t outputD, int64_t outputH, int64_t outputW, int64_t inputN,
int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size,
const uint32_t &device_id, cudaStream_t cuda_stream) {
InitOutput<<<CUDA_BLOCKS(device_id, outer_size), CUDA_THREADS(device_id), 0, cuda_stream>>>(output, outer_size);
Fractionalmaxpool3dgradwithfixedksize<<<CUDA_BLOCKS(device_id, out_backprop_size), CUDA_THREADS(device_id), 0,
cuda_stream>>>(origin_input, out_backprop, argmax, output, outputD, outputH,
outputW, inputN, inputC, inputD, inputH, inputW,
out_backprop_size);
return;
}
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<half, int32_t>(
const half *origin_input, const half *out_backprop, int32_t *argmax, half *output, int64_t outputD, int64_t outputH,
int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<float, int32_t>(
const float *origin_input, const float *out_backprop, int32_t *argmax, float *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<double, int32_t>(
const double *origin_input, const double *out_backprop, int32_t *argmax, double *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<int32_t, int32_t>(
const int32_t *origin_input, const int32_t *out_backprop, int32_t *argmax, int32_t *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<int64_t, int32_t>(
const int64_t *origin_input, const int64_t *out_backprop, int32_t *argmax, int64_t *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<half, int64_t>(
const half *origin_input, const half *out_backprop, int64_t *argmax, half *output, int64_t outputD, int64_t outputH,
int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<float, int64_t>(
const float *origin_input, const float *out_backprop, int64_t *argmax, float *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<double, int64_t>(
const double *origin_input, const double *out_backprop, int64_t *argmax, double *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<int32_t, int64_t>(
const int32_t *origin_input, const int32_t *out_backprop, int64_t *argmax, int32_t *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalFractionalmaxpool3dgradwithfixedksize<int64_t, int64_t>(
const int64_t *origin_input, const int64_t *out_backprop, int64_t *argmax, int64_t *output, int64_t outputD,
int64_t outputH, int64_t outputW, int64_t inputN, int64_t inputC, int64_t inputD, int64_t inputH, int64_t inputW,
const int64_t outer_size, const int64_t out_backprop_size, const uint32_t &device_id, cudaStream_t cuda_stream);
|
571943ef52564bdea55bc9e3a179ef412a6564dd.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 571943ef52564bdea55bc9e3a179ef412a6564dd.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
047edca7ca172b741297f99205d0a666e034e98b.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "../gpu/Region.h"
int main(int, char**) {
std::cout << "Test..." << std::endl;
/*
unsigned int flags = 0;
gpuErrchk(hipGetDeviceFlags(&flags));
flags |= hipDeviceScheduleYield;
gpuErrchk(hipSetDeviceFlags(flags));
*/
MPGraph<double, int> g;
// hipMallocManaged((void**)&g, sizeof(g));
double dataUnary[2]{0.0, 1.0};
//double dataUnaryStrong[2]{1.0, 3.0};
double dataPair[4]{1.0, 0.0, 0.0, 1.0};
//double dataPairAsym[4]{3.5, 0.5, 0.2, 1.0};
g.AddVariables({ 2, 2, 2 });
std::vector<MPGraph<double, int>::PotentialID> pots;
pots.push_back(g.AddPotential(MPGraph<double, int>::PotentialVector(dataUnary, 2)));
//pots.push_back(g.AddPotential(MPGraph<double, int>::PotentialVector(dataUnary, 2)));
//pots.push_back(g.AddPotential(MPGraph<double, int>::PotentialVector(dataUnary, 2)));
pots.push_back(g.AddPotential(MPGraph<double, int>::PotentialVector(dataPair, 4)));
//pots.push_back(g.AddPotential(MPGraph<double, int>::PotentialVector(dataPairAsym, 4)));
std::vector<MPGraph<double, int>::RegionID> regs;
for (int k = 0; k < 3; ++k) {
regs.push_back(g.AddRegion(1.0, std::vector<int>{k}, pots[0]));
}
regs.push_back(g.AddRegion(1.0, { 0, 1 }, pots[1]));
regs.push_back(g.AddRegion(1.0, { 1, 2 }, pots[1]));
regs.push_back(g.AddRegion(1.0, { 0, 2 }, pots[1]));
g.AddConnection(regs[0], regs[3]);
g.AddConnection(regs[0], regs[5]);
g.AddConnection(regs[1], regs[3]);
g.AddConnection(regs[1], regs[4]);
g.AddConnection(regs[2], regs[4]);
g.AddConnection(regs[2], regs[5]);
g.AllocateMessageMemory();
//AsyncRMP<float, int> ARMP;
//ARMP.RunMP(g, 1.0f);
//return 0;
g.CopyMessageMemory();
CPrecisionTimer CTmr;
CTmr.Start();
CudaAsyncRMPThread<double, int> ARMP;
ARMP.CudaRunMP(g, 1.0, 100,1000, 100);
std::cout << CTmr.Stop() << std::endl;
g.DeallocateGpuGraph();
return 0;
}
| 047edca7ca172b741297f99205d0a666e034e98b.cu | #include <iostream>
#include "../gpu/Region.h"
int main(int, char**) {
std::cout << "Test..." << std::endl;
/*
unsigned int flags = 0;
gpuErrchk(cudaGetDeviceFlags(&flags));
flags |= cudaDeviceScheduleYield;
gpuErrchk(cudaSetDeviceFlags(flags));
*/
MPGraph<double, int> g;
// cudaMallocManaged((void**)&g, sizeof(g));
double dataUnary[2]{0.0, 1.0};
//double dataUnaryStrong[2]{1.0, 3.0};
double dataPair[4]{1.0, 0.0, 0.0, 1.0};
//double dataPairAsym[4]{3.5, 0.5, 0.2, 1.0};
g.AddVariables({ 2, 2, 2 });
std::vector<MPGraph<double, int>::PotentialID> pots;
pots.push_back(g.AddPotential(MPGraph<double, int>::PotentialVector(dataUnary, 2)));
//pots.push_back(g.AddPotential(MPGraph<double, int>::PotentialVector(dataUnary, 2)));
//pots.push_back(g.AddPotential(MPGraph<double, int>::PotentialVector(dataUnary, 2)));
pots.push_back(g.AddPotential(MPGraph<double, int>::PotentialVector(dataPair, 4)));
//pots.push_back(g.AddPotential(MPGraph<double, int>::PotentialVector(dataPairAsym, 4)));
std::vector<MPGraph<double, int>::RegionID> regs;
for (int k = 0; k < 3; ++k) {
regs.push_back(g.AddRegion(1.0, std::vector<int>{k}, pots[0]));
}
regs.push_back(g.AddRegion(1.0, { 0, 1 }, pots[1]));
regs.push_back(g.AddRegion(1.0, { 1, 2 }, pots[1]));
regs.push_back(g.AddRegion(1.0, { 0, 2 }, pots[1]));
g.AddConnection(regs[0], regs[3]);
g.AddConnection(regs[0], regs[5]);
g.AddConnection(regs[1], regs[3]);
g.AddConnection(regs[1], regs[4]);
g.AddConnection(regs[2], regs[4]);
g.AddConnection(regs[2], regs[5]);
g.AllocateMessageMemory();
//AsyncRMP<float, int> ARMP;
//ARMP.RunMP(g, 1.0f);
//return 0;
g.CopyMessageMemory();
CPrecisionTimer CTmr;
CTmr.Start();
CudaAsyncRMPThread<double, int> ARMP;
ARMP.CudaRunMP(g, 1.0, 100,1000, 100);
std::cout << CTmr.Stop() << std::endl;
g.DeallocateGpuGraph();
return 0;
}
|
d2b37f07d10dbbdcc5bc4242dff49533149598a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define BLOCK_SIZE 256
#define STR_SIZE 256
#define DEVICE 0
#define HALO \
1 // halo width along one direction when advancing to the next iteration
#define BENCH_PRINT
void run(int argc, char **argv);
int rows, cols;
int *data;
int **wall;
int *result;
#define M_SEED 9
int pyramid_height;
FILE *fp = fopen("result.txt", "w");
//#define BENCH_PRINT
void init(int argc, char **argv) {
if (argc == 4) {
cols = atoi(argv[1]);
rows = atoi(argv[2]);
pyramid_height = atoi(argv[3]);
} else {
fprintf(fp, "Usage: dynproc row_len col_len pyramid_height\n");
exit(0);
}
data = new int[rows * cols];
wall = new int *[rows];
for (int n = 0; n < rows; n++)
wall[n] = data + cols * n;
result = new int[cols];
int seed = M_SEED;
srand(seed);
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
wall[i][j] = rand() % 10;
}
}
#ifdef BENCH_PRINT
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
fprintf(fp, "%d ", wall[i][j]);
}
fprintf(fp, "\n");
}
#endif
}
void fatal(char *s) { fprintf(stderr, "error: %s\n", s); }
#define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max))
#define CLAMP_RANGE(x, min, max) x = (x < (min)) ? min : ((x > (max)) ? max : x)
#define MIN(a, b) ((a) <= (b) ? (a) : (b))
__global__ void dynproc_kernel(int iteration, int *gpuWall, int *gpuSrc,
int *gpuResults, int cols, int rows,
int startStep, int border) {
__shared__ int prev[BLOCK_SIZE];
// __shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx = threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE - iteration * HALO * 2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols * bx - border;
int blkXmax = blkX + BLOCK_SIZE - 1;
// calculate the global thread coordination
int xidx = blkX + tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols - 1) ? BLOCK_SIZE - 1 - (blkXmax - cols + 1)
: BLOCK_SIZE - 1;
int W = tx - 1;
int E = tx + 1;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if (IN_RANGE(xidx, 0, cols - 1)) {
prev[tx] = gpuSrc[xidx];
}
int res = 0;
int shortest = 0;
int w = 0;
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
bool computed;
for (int i = 0; i < iteration-1; i++) {
computed = false;
if (IN_RANGE(tx, i + 1, BLOCK_SIZE - i - 2) && isValid) {
int index = cols*(startStep+i)+xidx;
int *addr = gpuWall + index;
w = *addr;
computed = true;
int left = prev[W];
int right = prev[E];
int up = prev[tx];
// int shortest = MIN(left, up);
shortest = MIN(shortest, right);
// int index = cols * (startStep + i) + xidx;
// result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if (computed) // Assign the computation range
prev[tx] = shortest + w;
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed) {
gpuResults[xidx] = res;
}
}
/*
compute N time steps
*/
int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols,
int pyramid_height, int blockCols, int borderCols) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows - 1; t += pyramid_height) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
MIN(pyramid_height, rows - t - 1), gpuWall, gpuResult[src],
gpuResult[dst], cols, rows, t, borderCols);
}
return dst;
}
int main(int argc, char **argv) {
int num_devices;
hipGetDeviceCount(&num_devices);
if (num_devices > 1)
hipSetDevice(DEVICE);
run(argc, argv);
return EXIT_SUCCESS;
}
void run(int argc, char **argv) {
init(argc, argv);
/* --------------- pyramid parameters --------------- */
int borderCols = (pyramid_height)*HALO;
int smallBlockCol = BLOCK_SIZE - (pyramid_height)*HALO * 2;
int blockCols = cols / smallBlockCol + ((cols % smallBlockCol == 0) ? 0 : 1);
fprintf(fp,
"pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: "
"%d\nblockGrid:[%d]\ntargetBlock:[%d]\n",
pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols,
smallBlockCol);
int *gpuWall, *gpuResult[2];
int size = rows * cols;
hipMalloc((void **)&gpuResult[0], sizeof(int) * cols);
hipMalloc((void **)&gpuResult[1], sizeof(int) * cols);
hipMemcpy(gpuResult[0], data, sizeof(int) * cols, hipMemcpyHostToDevice);
hipMalloc((void **)&gpuWall, sizeof(int) * (size - cols));
hipMemcpy(gpuWall, data + cols, sizeof(int) * (size - cols),
hipMemcpyHostToDevice);
int final_ret = calc_path(gpuWall, gpuResult, rows, cols, pyramid_height,
blockCols, borderCols);
hipMemcpy(result, gpuResult[final_ret], sizeof(int) * cols,
hipMemcpyDeviceToHost);
#ifdef BENCH_PRINT
for (int i = 0; i < cols; i++)
fprintf(fp, "%d ", data[i]);
fprintf(fp, "\n");
for (int i = 0; i < cols; i++)
fprintf(fp, "%d ", result[i]);
fprintf(fp, "\n");
#endif
hipFree(gpuWall);
hipFree(gpuResult[0]);
hipFree(gpuResult[1]);
delete[] data;
delete[] wall;
delete[] result;
}
| d2b37f07d10dbbdcc5bc4242dff49533149598a4.cu | #include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define BLOCK_SIZE 256
#define STR_SIZE 256
#define DEVICE 0
#define HALO \
1 // halo width along one direction when advancing to the next iteration
#define BENCH_PRINT
void run(int argc, char **argv);
int rows, cols;
int *data;
int **wall;
int *result;
#define M_SEED 9
int pyramid_height;
FILE *fp = fopen("result.txt", "w");
//#define BENCH_PRINT
void init(int argc, char **argv) {
if (argc == 4) {
cols = atoi(argv[1]);
rows = atoi(argv[2]);
pyramid_height = atoi(argv[3]);
} else {
fprintf(fp, "Usage: dynproc row_len col_len pyramid_height\n");
exit(0);
}
data = new int[rows * cols];
wall = new int *[rows];
for (int n = 0; n < rows; n++)
wall[n] = data + cols * n;
result = new int[cols];
int seed = M_SEED;
srand(seed);
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
wall[i][j] = rand() % 10;
}
}
#ifdef BENCH_PRINT
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
fprintf(fp, "%d ", wall[i][j]);
}
fprintf(fp, "\n");
}
#endif
}
void fatal(char *s) { fprintf(stderr, "error: %s\n", s); }
#define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max))
#define CLAMP_RANGE(x, min, max) x = (x < (min)) ? min : ((x > (max)) ? max : x)
#define MIN(a, b) ((a) <= (b) ? (a) : (b))
__global__ void dynproc_kernel(int iteration, int *gpuWall, int *gpuSrc,
int *gpuResults, int cols, int rows,
int startStep, int border) {
__shared__ int prev[BLOCK_SIZE];
// __shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx = threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE - iteration * HALO * 2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols * bx - border;
int blkXmax = blkX + BLOCK_SIZE - 1;
// calculate the global thread coordination
int xidx = blkX + tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols - 1) ? BLOCK_SIZE - 1 - (blkXmax - cols + 1)
: BLOCK_SIZE - 1;
int W = tx - 1;
int E = tx + 1;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if (IN_RANGE(xidx, 0, cols - 1)) {
prev[tx] = gpuSrc[xidx];
}
int res = 0;
int shortest = 0;
int w = 0;
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
bool computed;
for (int i = 0; i < iteration-1; i++) {
computed = false;
if (IN_RANGE(tx, i + 1, BLOCK_SIZE - i - 2) && isValid) {
int index = cols*(startStep+i)+xidx;
int *addr = gpuWall + index;
w = *addr;
computed = true;
int left = prev[W];
int right = prev[E];
int up = prev[tx];
// int shortest = MIN(left, up);
shortest = MIN(shortest, right);
// int index = cols * (startStep + i) + xidx;
// result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if (computed) // Assign the computation range
prev[tx] = shortest + w;
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed) {
gpuResults[xidx] = res;
}
}
/*
compute N time steps
*/
int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols,
int pyramid_height, int blockCols, int borderCols) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows - 1; t += pyramid_height) {
int temp = src;
src = dst;
dst = temp;
dynproc_kernel<<<dimGrid, dimBlock>>>(
MIN(pyramid_height, rows - t - 1), gpuWall, gpuResult[src],
gpuResult[dst], cols, rows, t, borderCols);
}
return dst;
}
int main(int argc, char **argv) {
int num_devices;
cudaGetDeviceCount(&num_devices);
if (num_devices > 1)
cudaSetDevice(DEVICE);
run(argc, argv);
return EXIT_SUCCESS;
}
void run(int argc, char **argv) {
init(argc, argv);
/* --------------- pyramid parameters --------------- */
int borderCols = (pyramid_height)*HALO;
int smallBlockCol = BLOCK_SIZE - (pyramid_height)*HALO * 2;
int blockCols = cols / smallBlockCol + ((cols % smallBlockCol == 0) ? 0 : 1);
fprintf(fp,
"pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: "
"%d\nblockGrid:[%d]\ntargetBlock:[%d]\n",
pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols,
smallBlockCol);
int *gpuWall, *gpuResult[2];
int size = rows * cols;
cudaMalloc((void **)&gpuResult[0], sizeof(int) * cols);
cudaMalloc((void **)&gpuResult[1], sizeof(int) * cols);
cudaMemcpy(gpuResult[0], data, sizeof(int) * cols, cudaMemcpyHostToDevice);
cudaMalloc((void **)&gpuWall, sizeof(int) * (size - cols));
cudaMemcpy(gpuWall, data + cols, sizeof(int) * (size - cols),
cudaMemcpyHostToDevice);
int final_ret = calc_path(gpuWall, gpuResult, rows, cols, pyramid_height,
blockCols, borderCols);
cudaMemcpy(result, gpuResult[final_ret], sizeof(int) * cols,
cudaMemcpyDeviceToHost);
#ifdef BENCH_PRINT
for (int i = 0; i < cols; i++)
fprintf(fp, "%d ", data[i]);
fprintf(fp, "\n");
for (int i = 0; i < cols; i++)
fprintf(fp, "%d ", result[i]);
fprintf(fp, "\n");
#endif
cudaFree(gpuWall);
cudaFree(gpuResult[0]);
cudaFree(gpuResult[1]);
delete[] data;
delete[] wall;
delete[] result;
}
|
2ad0622184d55984e3ff307c060429f6a5e4b645.hip | // !!! This is a file automatically generated by hipify!!!
#include "mtbs_cu.h"
#include <pthread.h>
#include "tbs_sd.h"
static skrun_t *g_skruns;
static BOOL *g_mtbs_done;
static BOOL *skrun_dones;
static unsigned skrid_done_min;
static unsigned cur_skrid_host;
static BOOL checker_done;
static pthread_t checker;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
static hipStream_t strm_submit;
unsigned n_queued_kernels = MAX_QUEUED_KERNELS;
unsigned short *g_mATs;
unsigned char *g_mtb_epochs;
#include "sched_dyn.cuh"
static void
notify_done_skruns(unsigned n_checks)
{
unsigned min_new = skrid_done_min;
BOOL notify = FALSE;
unsigned i, idx;
idx = skrid_done_min;
for (i = 0; i < n_checks; i++) {
if (!skrun_dones[idx]) {
if (g_mtbs_done[idx]) {
notify = TRUE;
skrun_dones[idx] = TRUE;
g_mtbs_done[idx] = FALSE;
}
}
if (skrun_dones[idx]) {
if (min_new == idx) {
min_new = (min_new + 1) % n_queued_kernels;
notify = TRUE;
}
}
idx = (idx + 1) % n_queued_kernels;
}
skrid_done_min = min_new;
if (notify)
pthread_cond_broadcast(&cond);
}
static void *
skruns_checkfunc(void *arg)
{
while (!checker_done) {
unsigned n_checks = (cur_skrid_host + n_queued_kernels - skrid_done_min) % n_queued_kernels;
pthread_mutex_lock(&mutex);
if (n_checks > 0) {
notify_done_skruns(n_checks);
}
pthread_mutex_unlock(&mutex);
usleep(100);
}
return NULL;
}
static sk_t
submit_skrun_dyn(vstream_t vstream, skrun_t *skr)
{
skrid_t skrid;
pthread_mutex_lock(&mutex);
while (skrid_done_min == (cur_skrid_host + 1) % n_queued_kernels) {
/* full */
pthread_cond_wait(&cond, &mutex);
}
skrid = cur_skrid_host + 1;
skrun_dones[skrid - 1] = FALSE;
cuMemcpyHtoDAsync((hipDeviceptr_t)(g_skruns + cur_skrid_host), skr, sizeof(skrun_t), strm_submit);
/* No synchronization needed */
cur_skrid_host = (cur_skrid_host + 1) % n_queued_kernels;
pthread_mutex_unlock(&mutex);
return (sk_t)(long long)skrid;
}
static void
wait_skrun_dyn(sk_t sk, vstream_t vstream, int *pres)
{
skrun_t *skr;
skrid_t skrid = (skrid_t)(long long)sk;
pthread_mutex_lock(&mutex);
while (!checker_done && !skrun_dones[skrid - 1])
pthread_cond_wait(&cond, &mutex);
pthread_mutex_unlock(&mutex);
skr = g_skruns + (skrid - 1);
cuMemcpyDtoHAsync(pres, (hipDeviceptr_t)&skr->res, sizeof(int), strm_submit);
hipStreamSynchronize(strm_submit);
}
static void
init_skrun_dyn(void)
{
void *params[4];
unsigned i;
hipStreamCreate__(&strm_submit, hipStreamNonBlocking);
g_skruns = (skrun_t *)mtbs_cudaMalloc(sizeof(skrun_t) * n_queued_kernels);
hipMemAllocHost((void **)&g_mtbs_done, sizeof(BOOL) * n_queued_kernels);
for (i = 0; i < n_queued_kernels; i++) {
g_mtbs_done[i] = FALSE;
}
skrun_dones = (BOOL *)calloc(n_queued_kernels, sizeof(BOOL));
pthread_create(&checker, NULL, skruns_checkfunc, NULL);
g_mATs = (unsigned short *)mtbs_cudaMalloc(EPOCH_MAX * n_max_mtbs * sizeof(unsigned short));
g_mtb_epochs = (unsigned char *)mtbs_cudaMalloc(n_max_mtbs);
params[0] = &g_mATs;
params[1] = &g_mtb_epochs;
params[2] = &g_skruns;
params[3] = &g_mtbs_done;
if (!invoke_kernel_func("setup_sched_dyn", params)) {
exit(12);
}
}
static void
fini_skrun_dyn(void)
{
void *retval;
checker_done = TRUE;
pthread_join(checker, &retval);
mtbs_cudaFree(g_skruns);
mtbs_cudaFree(g_mATs);
mtbs_cudaFree(g_mtb_epochs);
}
sched_t sched_sd_dynamic = {
"dynamic",
TBS_TYPE_SD_DYNAMIC,
"func_macro_TB_dyn",
init_skrun_dyn,
fini_skrun_dyn,
submit_skrun_dyn,
wait_skrun_dyn,
};
| 2ad0622184d55984e3ff307c060429f6a5e4b645.cu | #include "mtbs_cu.h"
#include <pthread.h>
#include "tbs_sd.h"
static skrun_t *g_skruns;
static BOOL *g_mtbs_done;
static BOOL *skrun_dones;
static unsigned skrid_done_min;
static unsigned cur_skrid_host;
static BOOL checker_done;
static pthread_t checker;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
static CUstream strm_submit;
unsigned n_queued_kernels = MAX_QUEUED_KERNELS;
unsigned short *g_mATs;
unsigned char *g_mtb_epochs;
#include "sched_dyn.cuh"
static void
notify_done_skruns(unsigned n_checks)
{
unsigned min_new = skrid_done_min;
BOOL notify = FALSE;
unsigned i, idx;
idx = skrid_done_min;
for (i = 0; i < n_checks; i++) {
if (!skrun_dones[idx]) {
if (g_mtbs_done[idx]) {
notify = TRUE;
skrun_dones[idx] = TRUE;
g_mtbs_done[idx] = FALSE;
}
}
if (skrun_dones[idx]) {
if (min_new == idx) {
min_new = (min_new + 1) % n_queued_kernels;
notify = TRUE;
}
}
idx = (idx + 1) % n_queued_kernels;
}
skrid_done_min = min_new;
if (notify)
pthread_cond_broadcast(&cond);
}
static void *
skruns_checkfunc(void *arg)
{
while (!checker_done) {
unsigned n_checks = (cur_skrid_host + n_queued_kernels - skrid_done_min) % n_queued_kernels;
pthread_mutex_lock(&mutex);
if (n_checks > 0) {
notify_done_skruns(n_checks);
}
pthread_mutex_unlock(&mutex);
usleep(100);
}
return NULL;
}
static sk_t
submit_skrun_dyn(vstream_t vstream, skrun_t *skr)
{
skrid_t skrid;
pthread_mutex_lock(&mutex);
while (skrid_done_min == (cur_skrid_host + 1) % n_queued_kernels) {
/* full */
pthread_cond_wait(&cond, &mutex);
}
skrid = cur_skrid_host + 1;
skrun_dones[skrid - 1] = FALSE;
cuMemcpyHtoDAsync((CUdeviceptr)(g_skruns + cur_skrid_host), skr, sizeof(skrun_t), strm_submit);
/* No synchronization needed */
cur_skrid_host = (cur_skrid_host + 1) % n_queued_kernels;
pthread_mutex_unlock(&mutex);
return (sk_t)(long long)skrid;
}
static void
wait_skrun_dyn(sk_t sk, vstream_t vstream, int *pres)
{
skrun_t *skr;
skrid_t skrid = (skrid_t)(long long)sk;
pthread_mutex_lock(&mutex);
while (!checker_done && !skrun_dones[skrid - 1])
pthread_cond_wait(&cond, &mutex);
pthread_mutex_unlock(&mutex);
skr = g_skruns + (skrid - 1);
cuMemcpyDtoHAsync(pres, (CUdeviceptr)&skr->res, sizeof(int), strm_submit);
cuStreamSynchronize(strm_submit);
}
static void
init_skrun_dyn(void)
{
void *params[4];
unsigned i;
cuStreamCreate(&strm_submit, CU_STREAM_NON_BLOCKING);
g_skruns = (skrun_t *)mtbs_cudaMalloc(sizeof(skrun_t) * n_queued_kernels);
cuMemAllocHost((void **)&g_mtbs_done, sizeof(BOOL) * n_queued_kernels);
for (i = 0; i < n_queued_kernels; i++) {
g_mtbs_done[i] = FALSE;
}
skrun_dones = (BOOL *)calloc(n_queued_kernels, sizeof(BOOL));
pthread_create(&checker, NULL, skruns_checkfunc, NULL);
g_mATs = (unsigned short *)mtbs_cudaMalloc(EPOCH_MAX * n_max_mtbs * sizeof(unsigned short));
g_mtb_epochs = (unsigned char *)mtbs_cudaMalloc(n_max_mtbs);
params[0] = &g_mATs;
params[1] = &g_mtb_epochs;
params[2] = &g_skruns;
params[3] = &g_mtbs_done;
if (!invoke_kernel_func("setup_sched_dyn", params)) {
exit(12);
}
}
static void
fini_skrun_dyn(void)
{
void *retval;
checker_done = TRUE;
pthread_join(checker, &retval);
mtbs_cudaFree(g_skruns);
mtbs_cudaFree(g_mATs);
mtbs_cudaFree(g_mtb_epochs);
}
sched_t sched_sd_dynamic = {
"dynamic",
TBS_TYPE_SD_DYNAMIC,
"func_macro_TB_dyn",
init_skrun_dyn,
fini_skrun_dyn,
submit_skrun_dyn,
wait_skrun_dyn,
};
|
1688974bf0f32e2f17f377f4154ebaaa0a262b84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void _bcnn_add_scalar_kernel(int n, float a, float *y)
{
int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < n)
y[i] += a;
} | 1688974bf0f32e2f17f377f4154ebaaa0a262b84.cu | #include "includes.h"
__global__ void _bcnn_add_scalar_kernel(int n, float a, float *y)
{
int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < n)
y[i] += a;
} |
5cb38204bac9a10a4d9337a7592122a68a2fa782.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal s
*/
#include "common_magma.h"
#include "commonblas_s.h"
static __device__ void saxpy(float a,float *b, float *c) {
c[0] += a * b[0];
c[1] += a * b[1];
c[2] += a * b[2];
c[3] += a * b[3];
c[4] += a * b[4];
c[5] += a * b[5];
c[6] += a * b[6];
c[7] += a * b[7];
c[8] += a * b[8];
c[9] += a * b[9];
c[10] += a * b[10];
c[11] += a * b[11];
c[12] += a * b[12];
c[13] += a * b[13];
c[14] += a * b[14];
c[15] += a * b[15];
}
__global__ void
sgemm_kernel_T_N_32_32_8_8_8(float *C, const float *A, const float *B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta)
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose:
========
This routine computes
C = alpha* A^T*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=32 blk_N=32 blk_K=8 nthd_x=8 nthd_y=8
This code should run for any matrix size.
=============================================================== */
const int ibx = blockIdx.x *32;
const int iby = blockIdx.y *32;
const int tx = threadIdx.y ;
const int ty = threadIdx.x ;
int idt = tx * 8 + ty;
if( ty >=k )
A += __mul24(ibx ,lda)+0;
else
A += __mul24(ibx ,lda)+ty;
if( (ibx + tx ) >= m )
A += __mul24(0,lda);
else
A += __mul24(tx,lda);
if( (iby+tx) >=n )
B+= __mul24(iby+0,ldb);
else
B+= __mul24(iby+tx,ldb) ;
if( ty>=k)
B+=0;
else
B+= ty;
C += ibx +idt%32 +__mul24( iby+16*(idt/32),ldc);
lda = lda *8 ;
ldb = ldb *8 ;
int as1=0, as2=lda, as3=2*lda , as4 =3*lda;
int bs1=0 , bs2=ldb , bs3=2*ldb , bs4=3*ldb ;
switch(k){
case 1: as2=0 ; as3 = 0*lda;as4=0; bs2=0 ; bs3 = 0*ldb; bs4=0; break;
case 2: as2=lda ; as3 = 0*lda;as4=0; bs2=ldb ; bs3 = 0*ldb; bs4=0; break;
case 3: as2=lda ; as3 = 2*lda;as4=0; bs2=ldb ; bs3 = 2*ldb; bs4=0; break;
}
if( (ibx + tx ) >=m ) { as1=0; as2=0*lda; as3=0*lda ; as4 =0*lda; } else
if( (ibx + tx +8 ) >=m ) { as1=0; as2=0*lda; as3=0*lda ; as4 =0*lda; } else
if( (ibx + tx +16) >=m ) { as1=0; as2=1*lda; as3=0*lda ; as4 =0*lda; } else
if( (ibx + tx +24) >=m ) { as1=0; as2=1*lda; as3=2*lda ; as4 =0*lda; }
if( (iby + tx ) >=n ) { bs1=0; bs2=0*ldb; bs3=0*ldb ; bs4 =0*ldb; } else
if( (iby + tx +8 ) >=n ) { bs1=0; bs2=0*ldb; bs3=0*ldb ; bs4 =0*ldb; } else
if( (iby + tx +16) >=n ) { bs1=0; bs2=1*ldb; bs3=0*ldb ; bs4 =0*ldb; } else
if( (iby + tx +24) >=n ) { bs1=0; bs2=1*ldb; bs3=2*ldb ; bs4 =0*ldb; }
float b= B[bs1];
float b1=B[bs2];
float b2=B[bs3];
float b3=B[bs4];
float Ap[4]={A[as1], A[as2], A[as3],A[as4]};
const float *Bend = B + (k-k%8);
B+=8;
A+=8;
__shared__ float Bb[8][33];
__shared__ float ABb[32][9];
float Cb[16] = {0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0};
const int l = 17*(idt/32) ;
int idt1 = idt ;
idt = idt % 32 ;
if(k>15){
do {
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
saxpy(ABb[idt][0], &Bb[0][l], Cb);Ap[0]=A[as1];
saxpy(ABb[idt][1], &Bb[1][l], Cb);Ap[1]=A[as2];
saxpy(ABb[idt][2], &Bb[2][l], Cb);Ap[2]=A[as3];
saxpy(ABb[idt][3], &Bb[3][l], Cb);Ap[3]=A[as4];
saxpy(ABb[idt][4], &Bb[4][l], Cb);
b=B[bs1];
saxpy(ABb[idt][5], &Bb[5][l], Cb);
b1=B[bs2];
saxpy(ABb[idt][6], &Bb[6][l], Cb);
b2=B[bs3];
saxpy(ABb[idt][7], &Bb[7][l], Cb);
b3=B[bs4];
B += 8;
A += 8;
__syncthreads();
} while (B < Bend);
}
if(k>7){
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
as1 = k-k%8;
if(as1+ty>=k){ bs1=0*ldb;bs2=0*ldb;bs3=0*ldb;bs4=0*ldb;B-=8;}
if(as1+ty>=k){ as1=0*lda;as2=0*lda;as3=0*lda;as4=0*lda;A-=8;}
as1=0;
saxpy(ABb[idt][0], &Bb[0][l], Cb);
Ap[0]=A[as1];
saxpy(ABb[idt][1], &Bb[1][l], Cb);
Ap[1]=A[as2];
saxpy(ABb[idt][2], &Bb[2][l], Cb);
Ap[2]=A[as3];
saxpy(ABb[idt][3], &Bb[3][l], Cb);
Ap[3]=A[as4];
saxpy(ABb[idt][4], &Bb[4][l], Cb);
b=B[bs1];
saxpy(ABb[idt][5], &Bb[5][l], Cb);
b1=B[bs2];
saxpy(ABb[idt][6], &Bb[6][l], Cb);
b2=B[bs3];
saxpy(ABb[idt][7], &Bb[7][l], Cb);
b3=B[bs4];
}
k=k%8;
if ( k!=0){
__syncthreads();
Bb[ty][tx]= b;
Bb[ty][tx+8] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx][ty]= Ap[0];
ABb[tx+8][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
for(int i=0;i<k;i++){
saxpy(ABb[idt][i],&Bb[i][l], Cb);
}
}
if( (iby+16*(idt1/32+1))>=n) {
lda = n-iby-16*(idt1/32);
}
else {
lda = 16;
}
if( (ibx+idt) >= m )
lda = 0 ;
else lda = lda ;
switch(lda){
case 16:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc];
C[15*ldc] =alpha*Cb[15] + beta * C[15*ldc];
break;
case 15:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc];
break;
case 14:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
break;
case 13:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
break;
case 12:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
break;
case 11:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0] =alpha*Cb[0] + beta * C[0];
break;
case 0:
break;
}
}
extern "C" void
magmablas_sgemm_kernel_T_N_32_32_8_8_8(float *C,
const float *A,
const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta)
{
dim3 threads( 8, 8 );
dim3 grid(m/32+(m%32!=0),n/32+(n%32!=0));
hipLaunchKernelGGL(( sgemm_kernel_T_N_32_32_8_8_8), dim3(grid), dim3(threads), 0, magma_stream , C, A, B,
m, n, k,
lda, ldb, ldc,
alpha , beta ) ;
}
| 5cb38204bac9a10a4d9337a7592122a68a2fa782.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal s
*/
#include "common_magma.h"
#include "commonblas_s.h"
static __device__ void saxpy(float a,float *b, float *c) {
c[0] += a * b[0];
c[1] += a * b[1];
c[2] += a * b[2];
c[3] += a * b[3];
c[4] += a * b[4];
c[5] += a * b[5];
c[6] += a * b[6];
c[7] += a * b[7];
c[8] += a * b[8];
c[9] += a * b[9];
c[10] += a * b[10];
c[11] += a * b[11];
c[12] += a * b[12];
c[13] += a * b[13];
c[14] += a * b[14];
c[15] += a * b[15];
}
__global__ void
sgemm_kernel_T_N_32_32_8_8_8(float *C, const float *A, const float *B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta)
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose:
========
This routine computes
C = alpha* A^T*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=32 blk_N=32 blk_K=8 nthd_x=8 nthd_y=8
This code should run for any matrix size.
=============================================================== */
const int ibx = blockIdx.x *32;
const int iby = blockIdx.y *32;
const int tx = threadIdx.y ;
const int ty = threadIdx.x ;
int idt = tx * 8 + ty;
if( ty >=k )
A += __mul24(ibx ,lda)+0;
else
A += __mul24(ibx ,lda)+ty;
if( (ibx + tx ) >= m )
A += __mul24(0,lda);
else
A += __mul24(tx,lda);
if( (iby+tx) >=n )
B+= __mul24(iby+0,ldb);
else
B+= __mul24(iby+tx,ldb) ;
if( ty>=k)
B+=0;
else
B+= ty;
C += ibx +idt%32 +__mul24( iby+16*(idt/32),ldc);
lda = lda *8 ;
ldb = ldb *8 ;
int as1=0, as2=lda, as3=2*lda , as4 =3*lda;
int bs1=0 , bs2=ldb , bs3=2*ldb , bs4=3*ldb ;
switch(k){
case 1: as2=0 ; as3 = 0*lda;as4=0; bs2=0 ; bs3 = 0*ldb; bs4=0; break;
case 2: as2=lda ; as3 = 0*lda;as4=0; bs2=ldb ; bs3 = 0*ldb; bs4=0; break;
case 3: as2=lda ; as3 = 2*lda;as4=0; bs2=ldb ; bs3 = 2*ldb; bs4=0; break;
}
if( (ibx + tx ) >=m ) { as1=0; as2=0*lda; as3=0*lda ; as4 =0*lda; } else
if( (ibx + tx +8 ) >=m ) { as1=0; as2=0*lda; as3=0*lda ; as4 =0*lda; } else
if( (ibx + tx +16) >=m ) { as1=0; as2=1*lda; as3=0*lda ; as4 =0*lda; } else
if( (ibx + tx +24) >=m ) { as1=0; as2=1*lda; as3=2*lda ; as4 =0*lda; }
if( (iby + tx ) >=n ) { bs1=0; bs2=0*ldb; bs3=0*ldb ; bs4 =0*ldb; } else
if( (iby + tx +8 ) >=n ) { bs1=0; bs2=0*ldb; bs3=0*ldb ; bs4 =0*ldb; } else
if( (iby + tx +16) >=n ) { bs1=0; bs2=1*ldb; bs3=0*ldb ; bs4 =0*ldb; } else
if( (iby + tx +24) >=n ) { bs1=0; bs2=1*ldb; bs3=2*ldb ; bs4 =0*ldb; }
float b= B[bs1];
float b1=B[bs2];
float b2=B[bs3];
float b3=B[bs4];
float Ap[4]={A[as1], A[as2], A[as3],A[as4]};
const float *Bend = B + (k-k%8);
B+=8;
A+=8;
__shared__ float Bb[8][33];
__shared__ float ABb[32][9];
float Cb[16] = {0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0};
const int l = 17*(idt/32) ;
int idt1 = idt ;
idt = idt % 32 ;
if(k>15){
do {
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
saxpy(ABb[idt][0], &Bb[0][l], Cb);Ap[0]=A[as1];
saxpy(ABb[idt][1], &Bb[1][l], Cb);Ap[1]=A[as2];
saxpy(ABb[idt][2], &Bb[2][l], Cb);Ap[2]=A[as3];
saxpy(ABb[idt][3], &Bb[3][l], Cb);Ap[3]=A[as4];
saxpy(ABb[idt][4], &Bb[4][l], Cb);
b=B[bs1];
saxpy(ABb[idt][5], &Bb[5][l], Cb);
b1=B[bs2];
saxpy(ABb[idt][6], &Bb[6][l], Cb);
b2=B[bs3];
saxpy(ABb[idt][7], &Bb[7][l], Cb);
b3=B[bs4];
B += 8;
A += 8;
__syncthreads();
} while (B < Bend);
}
if(k>7){
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
as1 = k-k%8;
if(as1+ty>=k){ bs1=0*ldb;bs2=0*ldb;bs3=0*ldb;bs4=0*ldb;B-=8;}
if(as1+ty>=k){ as1=0*lda;as2=0*lda;as3=0*lda;as4=0*lda;A-=8;}
as1=0;
saxpy(ABb[idt][0], &Bb[0][l], Cb);
Ap[0]=A[as1];
saxpy(ABb[idt][1], &Bb[1][l], Cb);
Ap[1]=A[as2];
saxpy(ABb[idt][2], &Bb[2][l], Cb);
Ap[2]=A[as3];
saxpy(ABb[idt][3], &Bb[3][l], Cb);
Ap[3]=A[as4];
saxpy(ABb[idt][4], &Bb[4][l], Cb);
b=B[bs1];
saxpy(ABb[idt][5], &Bb[5][l], Cb);
b1=B[bs2];
saxpy(ABb[idt][6], &Bb[6][l], Cb);
b2=B[bs3];
saxpy(ABb[idt][7], &Bb[7][l], Cb);
b3=B[bs4];
}
k=k%8;
if ( k!=0){
__syncthreads();
Bb[ty][tx]= b;
Bb[ty][tx+8] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx][ty]= Ap[0];
ABb[tx+8][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
for(int i=0;i<k;i++){
saxpy(ABb[idt][i],&Bb[i][l], Cb);
}
}
if( (iby+16*(idt1/32+1))>=n) {
lda = n-iby-16*(idt1/32);
}
else {
lda = 16;
}
if( (ibx+idt) >= m )
lda = 0 ;
else lda = lda ;
switch(lda){
case 16:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc];
C[15*ldc] =alpha*Cb[15] + beta * C[15*ldc];
break;
case 15:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc];
break;
case 14:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
break;
case 13:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
break;
case 12:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
break;
case 11:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0] =alpha*Cb[0] + beta * C[0];
break;
case 0:
break;
}
}
extern "C" void
magmablas_sgemm_kernel_T_N_32_32_8_8_8(float *C,
const float *A,
const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta)
{
dim3 threads( 8, 8 );
dim3 grid(m/32+(m%32!=0),n/32+(n%32!=0));
sgemm_kernel_T_N_32_32_8_8_8<<< grid, threads, 0, magma_stream >>>(C, A, B,
m, n, k,
lda, ldb, ldc,
alpha , beta ) ;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.