hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
05c59cd18048f457f5bb16944c4471dee2ab7e05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float,float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double,double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<float16,float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float16* A, const float16* B, const float beta,
float16* C) {
// Note that cublas follows fortran order.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(cublasSgemmEx(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, HIPBLAS_DATA_HALF, ldb, A, HIPBLAS_DATA_HALF,
lda, &beta, C, HIPBLAS_DATA_HALF, N));
}
template <>
void caffe_gpu_gemm<float16,float16>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float16 alpha, const float16* A, const float16* B, const float16 beta,
float16* C) {
// Note that cublas follows fortran order.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasHgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha.data, &B->data, ldb, &A->data,
lda, &beta.data, &C->data, N));
}
template <>
void caffe_gpu_gemv<float,float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double,double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
#if !NATIVE_FP16
template <>
void caffe_gpu_gemv<float16, float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float16* A, const float16* x,
const float beta, float16* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int m = (cuTransA == HIPBLAS_OP_N) ? N : M;
int k = (cuTransA == HIPBLAS_OP_N) ? M : N;
int LDA = (cuTransA == HIPBLAS_OP_N) ? m : k;
// int LDB = (cuTransA == HIPBLAS_OP_N) ? k : m;
int LDC = m;
CUBLAS_CHECK(cublasSgemmEx(Caffe::cublas_handle(), cuTransA, HIPBLAS_OP_N,
m, 1, k, &alpha, A, HIPBLAS_DATA_HALF, LDA, x, HIPBLAS_DATA_HALF, k, &beta,
y, HIPBLAS_DATA_HALF, LDC));
}
#else
template <>
void caffe_gpu_gemv<float16, float16>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float16 alpha, const float16* A, const float16* x,
const float16 beta, float16* y) {
float alpha_fp32 = cpu_half2float(alpha);
float beta_fp32 = cpu_half2float(beta);
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int m = (cuTransA == HIPBLAS_OP_N) ? N : M;
int k = (cuTransA == HIPBLAS_OP_N) ? M : N;
int LDA = (cuTransA == HIPBLAS_OP_N) ? m : k;
// int LDB = (cuTransA == HIPBLAS_OP_N) ? k : m;
int LDC = m;
CUBLAS_CHECK(cublasSgemmEx(Caffe::cublas_handle(), cuTransA, HIPBLAS_OP_N,
m, 1, k, &alpha_fp32, A, HIPBLAS_DATA_HALF, LDA, x, HIPBLAS_DATA_HALF, k, &beta_fp32,
y, HIPBLAS_DATA_HALF, LDC));
}
#endif
template <>
void caffe_gpu_axpy<float,float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double,double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <typename T_STORE, typename T_MATH>
__global__
void axpy_kernel(const int N, const T_MATH alpha, const T_STORE *x, T_STORE *y)
{
for (int idx = threadIdx.x + blockDim.x*blockIdx.x; idx < N; idx += blockDim.x*gridDim.x) {
y[idx] = Get<T_STORE>( alpha * Get<T_MATH>(x[idx]) + Get<T_MATH>(y[idx]) );
}
}
template <>
void caffe_gpu_axpy<float16,float>(const int N, const float alpha, const float16* x, float16 *y) {
hipLaunchKernelGGL(( axpy_kernel<float16,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, x, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_axpy<float16,float16>(const int N, const float16 alpha, const float16* x, float16 *y) {
hipLaunchKernelGGL(( axpy_kernel<float16,float16>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, x, y);
CUDA_POST_KERNEL_CHECK;
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float,float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double,double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <typename T_STORE, typename T_MATH>
__global__
void scal_kernel(const int N, const T_MATH alpha, T_STORE *X)
{
for (int idx = threadIdx.x + blockDim.x*blockIdx.x; idx < N; idx += blockDim.x*gridDim.x) {
X[idx] = Get<T_STORE>( alpha * Get<T_MATH>(X[idx]));
}
}
template <>
void caffe_gpu_scal<float16,float>(const int N, const float alpha, float16 *X) {
hipLaunchKernelGGL(( scal_kernel<float16,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, X);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_scal<float16,float16>(const int N, const float16 alpha, float16 *X) {
hipLaunchKernelGGL(( scal_kernel<float16,float16>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, X);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_axpby<float,float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float,float>(N, beta, Y);
caffe_gpu_axpy<float,float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double,double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double,double>(N, beta, Y);
caffe_gpu_axpy<double,double>(N, alpha, X, Y);
}
template <typename T_STORE, typename T_MATH>
__global__
void axpby_kernel(const int N, const T_MATH alpha, const T_STORE* X,
const T_MATH beta, T_STORE* Y)
{
CUDA_KERNEL_LOOP(idx, N) {
Y[idx] = Get<T_STORE>( alpha * Get<T_MATH>(X[idx]) + beta * Get<T_MATH>(Y[idx]) );
}
}
template <>
void caffe_gpu_axpby<float16,float>(const int N, const float alpha, const float16* X,
const float beta, float16* Y)
{
hipLaunchKernelGGL(( axpby_kernel<float16,float>), dim3(CAFFE_GET_BLOCKS(N)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N,alpha,X,beta,Y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_axpby<float16,float16>(const int N, const float16 alpha, const float16* X,
const float16 beta, float16* Y)
{
hipLaunchKernelGGL(( axpby_kernel<float16,float>), dim3(CAFFE_GET_BLOCKS(N)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N,alpha,X,beta,Y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_dot<float,float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double,double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
struct float16_dot_reduce {
__host__ __device__
float operator()(const float& x, const float& y)
{
return x + y;
}
};
struct float16_dot_mult {
__host__ __device__
float operator()(float16& x, float16& y)
{
return Get<float>(x) * Get<float>(y);
}
};
template <typename Dtype, typename Mtype>
__global__
void gpu_dot_kernel(const int N, const Dtype *x, const Dtype *y, Mtype *out)
{
__shared__ Mtype cache[256];
const int tidx = threadIdx.x;
cache[tidx] = Get<Mtype>(0);
for (int i=tidx; i<N; i+=blockDim.x) {
cache[tidx] += Get<Mtype>(x[i]) * Get<Mtype>(y[i]);
}
__syncthreads();
for (int s=128; s > 0; s >>= 1) {
if (tidx < s) cache[tidx] += cache[tidx+s];
__syncthreads();
}
if (tidx == 0) *out = cache[tidx];
}
template <>
void caffe_gpu_dot<float16, float>(const int n, const float16* x, const float16* y,
float *out)
{
// float ret = thrust::inner_product(x, x+n, y, init, float16_dot_reduce(), float16_dot_mult());
// *out = ret;
float *res;
hipMalloc(&res, sizeof(float));
hipLaunchKernelGGL(( gpu_dot_kernel<float16,float>), dim3(1),dim3(256), 0, 0, n, x, y, res);
CUDA_POST_KERNEL_CHECK;
hipMemcpy(out,res,sizeof(float), hipMemcpyDeviceToHost);
hipFree(res);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_dot<float16, float16>(const int n, const float16* x, const float16* y, float16 *out)
{
float16 *res;
hipMalloc(&res, sizeof(float16));
hipLaunchKernelGGL(( gpu_dot_kernel<float16,float16>), dim3(1),dim3(256), 0, 0, n, x, y, res);
CUDA_POST_KERNEL_CHECK;
hipMemcpy(out, res, sizeof(float16), hipMemcpyDeviceToHost);
hipFree(res);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_asum<float,float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double,double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
struct float16_asum_reduce
{
__host__ __device__
float operator()(const float& a, const float16& b)
{
return a + fabs(Get<float>(b));
}
};
template <typename Dtype, typename Mtype>
__global__
void gpu_asum_kernel(const int N, const Dtype *x, Mtype *out)
{
__shared__ Mtype cache[256];
const int tidx = threadIdx.x;
cache[tidx] = Get<Mtype>(0);
for (int i=tidx; i<N; i+=blockDim.x) {
cache[tidx] += Get<Mtype>(fabs(x[i]));
}
__syncthreads();
for (int s=128; s > 0; s >>= 1) {
if (tidx < s) cache[tidx] += cache[tidx+s];
__syncthreads();
}
if (tidx == 0) *out = cache[tidx];
}
template <>
void caffe_gpu_asum<float16,float>(const int n, const float16* x, float* y)
{
// float init = 0.0f;
// float result = thrust::reduce(x, x+n, init, float16_asum_reduce());
// *y = result;
float *res;
hipMalloc(&res, sizeof(float));
hipLaunchKernelGGL(( gpu_asum_kernel<float16,float>), dim3(1),dim3(256), 0, 0, n,x,res);
CUDA_POST_KERNEL_CHECK;
hipMemcpy(y,res,sizeof(float),hipMemcpyDeviceToHost);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_asum<float16,float16>(const int n, const float16* x, float16* y)
{
float16 *res;
hipMalloc(&res, sizeof(float16));
hipLaunchKernelGGL(( gpu_asum_kernel<float16,float16>), dim3(1),dim3(256), 0, 0, n,x,res);
CUDA_POST_KERNEL_CHECK;
hipMemcpy(y,res,sizeof(float16),hipMemcpyDeviceToHost);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_scale<float,float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double,double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename T_STORE, typename T_MATH>
__global__
void scale_kernel(const int n, const T_MATH alpha, const T_STORE* x, T_STORE* y)
{
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = Get<T_STORE>( alpha * Get<T_MATH>(x[idx]) );
}
}
template <>
void caffe_gpu_scale<float16,float>(const int n, const float alpha, const float16 *x,
float16 *y)
{
hipLaunchKernelGGL(( scale_kernel<float16,float>), dim3(CAFFE_GET_BLOCKS(n)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n,alpha,x,y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_scale<float16,float16>(const int n, const float16 alpha, const float16 *x,
float16* y) {
hipLaunchKernelGGL(( scale_kernel<float16,float16>), dim3(CAFFE_GET_BLOCKS(n)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n,alpha,x,y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void set_kernel(const int n, const Mtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>(alpha);
}
}
template <typename Dtype, typename Mtype>
void caffe_gpu_set(const int N, const Mtype alpha, Dtype* Y) {
if (alpha == 0.) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype,Mtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
}
template void caffe_gpu_set<int,int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float,float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double,double>(const int N, const double alpha, double* Y);
template void caffe_gpu_set<float16,CAFFE_FP16_MTYPE>(const int N,
const CAFFE_FP16_MTYPE alpha, float16* Y);
template <typename Dtype, typename Mtype>
__global__ void add_scalar_kernel(const int n, const Mtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>(alpha + Get<Mtype>(y[index]));
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double,double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_add_scalar(const int N, const CAFFE_FP16_MTYPE alpha, float16* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float16,CAFFE_FP16_MTYPE>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( Get<Mtype>(a[index]) + Get<Mtype>(b[index]) );
}
}
template <>
void caffe_gpu_add<float,float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_add<double,double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double,double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_add<float16,CAFFE_FP16_MTYPE>(const int N, const float16* a, const float16* b,
float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float16,CAFFE_FP16_MTYPE>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( Get<Mtype>(a[index]) - Get<Mtype>(b[index]) );
}
}
template <>
void caffe_gpu_sub<float,float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_sub<double,double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double,double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_sub<float16,CAFFE_FP16_MTYPE>(const int N, const float16* a, const float16* b,
float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float16,CAFFE_FP16_MTYPE>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( Get<Mtype>(a[index]) * Get<Mtype>(b[index]) );
}
}
template <>
void caffe_gpu_mul<float, float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_mul<double,double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double,double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_mul<float16,float>(const int N, const float16* a,
const float16* b, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float16,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_mul<float16,float16>(const int N, const float16* a,
const float16* b, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float16,float16>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( Get<Mtype>(a[index]) / Get<Mtype>(b[index]) );
}
}
template <>
void caffe_gpu_div<float,float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_div<double,double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double,double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_div<float16,float>(const int N, const float16* a,
const float16* b, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float16,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_div<float16,float16>(const int N, const float16* a,
const float16* b, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float16,float16>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( abs(Get<Mtype>(a[index])) );
}
}
template <>
void caffe_gpu_abs<float,float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_abs<double,double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double,double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_abs<float16,float>(const int N, const float16* a, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float16,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_abs<float16,float16>(const int N, const float16* a, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float16,float16>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( exp(Get<Mtype>(a[index])) );
}
}
template <>
void caffe_gpu_exp<float,float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_exp<double,double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double,double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_exp<float16,float16>(const int N, const float16* a, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float16,float16>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_exp<float16,float>(const int N, const float16* a, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float16,float16>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<float16>(const int N, const float16* a, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float16>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype, typename Mtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Mtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( pow(Get<Mtype>(a[index]), alpha) );
}
}
template <>
void caffe_gpu_powx<float,float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_powx<double,double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double,double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_powx<float16,float16>(const int N, const float16* a,
const float16 alpha, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float16,float16>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_powx<float16,float>(const int N, const float16* a,
const float alpha, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float16,float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = Get<Dtype>( (Mtype(0) < Get<Mtype>(x[index]))
- (Get<Mtype>(x[index]) < Mtype(0))) );
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = Get<Dtype>( signbit(Get<Mtype>(x[index]))) );
__global__ void popc_kernel(const int n, const float* a,
const float* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(a[index]) ^
static_cast<uint32_t>(b[index]));
}
}
__global__ void popcll_kernel(const int n, const double* a,
const double* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popcll(static_cast<uint64_t>(a[index]) ^
static_cast<uint64_t>(b[index]));
}
}
__global__ void popch_kernel(const int n, const float16* a,
const float16* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(Get<float>(a[index])) ^
static_cast<uint32_t>(Get<float>(b[index])));
}
}
template <>
uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x,
const float* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( popc_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <>
uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x,
const double* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( popcll_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <>
uint32_t caffe_gpu_hamming_distance<float16>(const int n, const float16* x,
const float16* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( popch_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <typename T_IN, typename T_OUT>
__global__
void convert_kernel(const int n, const T_IN* in, T_OUT* out)
{
for (int idx=threadIdx.x+blockIdx.x*blockDim.x; idx<n; idx+=blockDim.x*gridDim.x) {
out[idx] = Get<T_OUT>(in[idx]);
}
}
template <typename T_IN, typename T_OUT>
void caffe_gpu_convert(const int n, const T_IN* in, T_OUT* out)
{
hipLaunchKernelGGL(( convert_kernel<T_IN,T_OUT>), dim3(n / 512 + 1), dim3(512), 0, 0, n, in, out);
}
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float,float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal<float,float>(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar<float,float>(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double,double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal<double,double>(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar<double,double>(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<float16,float>(const int n, const float a, const float b,
float16* r) {
thrust::device_vector<float> rf(n);
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), thrust::raw_pointer_cast(rf.data()), n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal<float,float>(n, range, thrust::raw_pointer_cast(rf.data()));
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar<float,float>(n, a, thrust::raw_pointer_cast(rf.data()));
}
caffe_gpu_convert<float,float16>(n, thrust::raw_pointer_cast(rf.data()), r);
}
template <>
void caffe_gpu_rng_uniform<float16,float16>(const int n, const float16 a, const float16 b,
float16* r) {
caffe_gpu_rng_uniform<float16,float>(n, a, b, r);
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float16* r) {
// TODO: call fp16-based version of hiprandGenerateNormal when it becomes available.
thrust::device_vector<float> rf(n);
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), thrust::raw_pointer_cast(rf.data()), n, mu, sigma));
caffe_gpu_convert<float,float16>(n, thrust::raw_pointer_cast(rf.data()), r);
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float16 mu, const float16 sigma,
float16* r) {
// TODO: call fp16-based version of hiprandGenerateNormal when it becomes available.
thrust::device_vector<float> rf(n);
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), thrust::raw_pointer_cast(rf.data()), n, mu, sigma));
caffe_gpu_convert<float,float16>(n, thrust::raw_pointer_cast(rf.data()), r);
}
} // namespace caffe
| 05c59cd18048f457f5bb16944c4471dee2ab7e05.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float,float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double,double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<float16,float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float16* A, const float16* B, const float beta,
float16* C) {
// Note that cublas follows fortran order.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemmEx(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, CUBLAS_DATA_HALF, ldb, A, CUBLAS_DATA_HALF,
lda, &beta, C, CUBLAS_DATA_HALF, N));
}
template <>
void caffe_gpu_gemm<float16,float16>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float16 alpha, const float16* A, const float16* B, const float16 beta,
float16* C) {
// Note that cublas follows fortran order.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasHgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha.data, &B->data, ldb, &A->data,
lda, &beta.data, &C->data, N));
}
template <>
void caffe_gpu_gemv<float,float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double,double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
#if !NATIVE_FP16
template <>
void caffe_gpu_gemv<float16, float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float16* A, const float16* x,
const float beta, float16* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
int m = (cuTransA == CUBLAS_OP_N) ? N : M;
int k = (cuTransA == CUBLAS_OP_N) ? M : N;
int LDA = (cuTransA == CUBLAS_OP_N) ? m : k;
// int LDB = (cuTransA == CUBLAS_OP_N) ? k : m;
int LDC = m;
CUBLAS_CHECK(cublasSgemmEx(Caffe::cublas_handle(), cuTransA, CUBLAS_OP_N,
m, 1, k, &alpha, A, CUBLAS_DATA_HALF, LDA, x, CUBLAS_DATA_HALF, k, &beta,
y, CUBLAS_DATA_HALF, LDC));
}
#else
template <>
void caffe_gpu_gemv<float16, float16>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float16 alpha, const float16* A, const float16* x,
const float16 beta, float16* y) {
float alpha_fp32 = cpu_half2float(alpha);
float beta_fp32 = cpu_half2float(beta);
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
int m = (cuTransA == CUBLAS_OP_N) ? N : M;
int k = (cuTransA == CUBLAS_OP_N) ? M : N;
int LDA = (cuTransA == CUBLAS_OP_N) ? m : k;
// int LDB = (cuTransA == CUBLAS_OP_N) ? k : m;
int LDC = m;
CUBLAS_CHECK(cublasSgemmEx(Caffe::cublas_handle(), cuTransA, CUBLAS_OP_N,
m, 1, k, &alpha_fp32, A, CUBLAS_DATA_HALF, LDA, x, CUBLAS_DATA_HALF, k, &beta_fp32,
y, CUBLAS_DATA_HALF, LDC));
}
#endif
template <>
void caffe_gpu_axpy<float,float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double,double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <typename T_STORE, typename T_MATH>
__global__
void axpy_kernel(const int N, const T_MATH alpha, const T_STORE *x, T_STORE *y)
{
for (int idx = threadIdx.x + blockDim.x*blockIdx.x; idx < N; idx += blockDim.x*gridDim.x) {
y[idx] = Get<T_STORE>( alpha * Get<T_MATH>(x[idx]) + Get<T_MATH>(y[idx]) );
}
}
template <>
void caffe_gpu_axpy<float16,float>(const int N, const float alpha, const float16* x, float16 *y) {
axpy_kernel<float16,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, alpha, x, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_axpy<float16,float16>(const int N, const float16 alpha, const float16* x, float16 *y) {
axpy_kernel<float16,float16><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, alpha, x, y);
CUDA_POST_KERNEL_CHECK;
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float,float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double,double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <typename T_STORE, typename T_MATH>
__global__
void scal_kernel(const int N, const T_MATH alpha, T_STORE *X)
{
for (int idx = threadIdx.x + blockDim.x*blockIdx.x; idx < N; idx += blockDim.x*gridDim.x) {
X[idx] = Get<T_STORE>( alpha * Get<T_MATH>(X[idx]));
}
}
template <>
void caffe_gpu_scal<float16,float>(const int N, const float alpha, float16 *X) {
scal_kernel<float16,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, alpha, X);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_scal<float16,float16>(const int N, const float16 alpha, float16 *X) {
scal_kernel<float16,float16><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, alpha, X);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_axpby<float,float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float,float>(N, beta, Y);
caffe_gpu_axpy<float,float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double,double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double,double>(N, beta, Y);
caffe_gpu_axpy<double,double>(N, alpha, X, Y);
}
template <typename T_STORE, typename T_MATH>
__global__
void axpby_kernel(const int N, const T_MATH alpha, const T_STORE* X,
const T_MATH beta, T_STORE* Y)
{
CUDA_KERNEL_LOOP(idx, N) {
Y[idx] = Get<T_STORE>( alpha * Get<T_MATH>(X[idx]) + beta * Get<T_MATH>(Y[idx]) );
}
}
template <>
void caffe_gpu_axpby<float16,float>(const int N, const float alpha, const float16* X,
const float beta, float16* Y)
{
axpby_kernel<float16,float><<<CAFFE_GET_BLOCKS(N),CAFFE_CUDA_NUM_THREADS>>>(N,alpha,X,beta,Y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_axpby<float16,float16>(const int N, const float16 alpha, const float16* X,
const float16 beta, float16* Y)
{
axpby_kernel<float16,float><<<CAFFE_GET_BLOCKS(N),CAFFE_CUDA_NUM_THREADS>>>(N,alpha,X,beta,Y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_dot<float,float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double,double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
struct float16_dot_reduce {
__host__ __device__
float operator()(const float& x, const float& y)
{
return x + y;
}
};
struct float16_dot_mult {
__host__ __device__
float operator()(float16& x, float16& y)
{
return Get<float>(x) * Get<float>(y);
}
};
template <typename Dtype, typename Mtype>
__global__
void gpu_dot_kernel(const int N, const Dtype *x, const Dtype *y, Mtype *out)
{
__shared__ Mtype cache[256];
const int tidx = threadIdx.x;
cache[tidx] = Get<Mtype>(0);
for (int i=tidx; i<N; i+=blockDim.x) {
cache[tidx] += Get<Mtype>(x[i]) * Get<Mtype>(y[i]);
}
__syncthreads();
for (int s=128; s > 0; s >>= 1) {
if (tidx < s) cache[tidx] += cache[tidx+s];
__syncthreads();
}
if (tidx == 0) *out = cache[tidx];
}
template <>
void caffe_gpu_dot<float16, float>(const int n, const float16* x, const float16* y,
float *out)
{
// float ret = thrust::inner_product(x, x+n, y, init, float16_dot_reduce(), float16_dot_mult());
// *out = ret;
float *res;
cudaMalloc(&res, sizeof(float));
gpu_dot_kernel<float16,float><<<1,256>>>(n, x, y, res);
CUDA_POST_KERNEL_CHECK;
cudaMemcpy(out,res,sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(res);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_dot<float16, float16>(const int n, const float16* x, const float16* y, float16 *out)
{
float16 *res;
cudaMalloc(&res, sizeof(float16));
gpu_dot_kernel<float16,float16><<<1,256>>>(n, x, y, res);
CUDA_POST_KERNEL_CHECK;
cudaMemcpy(out, res, sizeof(float16), cudaMemcpyDeviceToHost);
cudaFree(res);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_asum<float,float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double,double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
struct float16_asum_reduce
{
__host__ __device__
float operator()(const float& a, const float16& b)
{
return a + fabs(Get<float>(b));
}
};
template <typename Dtype, typename Mtype>
__global__
void gpu_asum_kernel(const int N, const Dtype *x, Mtype *out)
{
__shared__ Mtype cache[256];
const int tidx = threadIdx.x;
cache[tidx] = Get<Mtype>(0);
for (int i=tidx; i<N; i+=blockDim.x) {
cache[tidx] += Get<Mtype>(fabs(x[i]));
}
__syncthreads();
for (int s=128; s > 0; s >>= 1) {
if (tidx < s) cache[tidx] += cache[tidx+s];
__syncthreads();
}
if (tidx == 0) *out = cache[tidx];
}
template <>
void caffe_gpu_asum<float16,float>(const int n, const float16* x, float* y)
{
// float init = 0.0f;
// float result = thrust::reduce(x, x+n, init, float16_asum_reduce());
// *y = result;
float *res;
cudaMalloc(&res, sizeof(float));
gpu_asum_kernel<float16,float><<<1,256>>>(n,x,res);
CUDA_POST_KERNEL_CHECK;
cudaMemcpy(y,res,sizeof(float),cudaMemcpyDeviceToHost);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_asum<float16,float16>(const int n, const float16* x, float16* y)
{
float16 *res;
cudaMalloc(&res, sizeof(float16));
gpu_asum_kernel<float16,float16><<<1,256>>>(n,x,res);
CUDA_POST_KERNEL_CHECK;
cudaMemcpy(y,res,sizeof(float16),cudaMemcpyDeviceToHost);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_scale<float,float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double,double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename T_STORE, typename T_MATH>
__global__
void scale_kernel(const int n, const T_MATH alpha, const T_STORE* x, T_STORE* y)
{
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = Get<T_STORE>( alpha * Get<T_MATH>(x[idx]) );
}
}
template <>
void caffe_gpu_scale<float16,float>(const int n, const float alpha, const float16 *x,
float16 *y)
{
scale_kernel<float16,float><<<CAFFE_GET_BLOCKS(n),CAFFE_CUDA_NUM_THREADS>>>(n,alpha,x,y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_scale<float16,float16>(const int n, const float16 alpha, const float16 *x,
float16* y) {
scale_kernel<float16,float16><<<CAFFE_GET_BLOCKS(n),CAFFE_CUDA_NUM_THREADS>>>(n,alpha,x,y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void set_kernel(const int n, const Mtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>(alpha);
}
}
template <typename Dtype, typename Mtype>
void caffe_gpu_set(const int N, const Mtype alpha, Dtype* Y) {
if (alpha == 0.) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype,Mtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
}
template void caffe_gpu_set<int,int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float,float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double,double>(const int N, const double alpha, double* Y);
template void caffe_gpu_set<float16,CAFFE_FP16_MTYPE>(const int N,
const CAFFE_FP16_MTYPE alpha, float16* Y);
template <typename Dtype, typename Mtype>
__global__ void add_scalar_kernel(const int n, const Mtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>(alpha + Get<Mtype>(y[index]));
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double,double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_add_scalar(const int N, const CAFFE_FP16_MTYPE alpha, float16* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float16,CAFFE_FP16_MTYPE><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( Get<Mtype>(a[index]) + Get<Mtype>(b[index]) );
}
}
template <>
void caffe_gpu_add<float,float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_add<double,double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double,double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_add<float16,CAFFE_FP16_MTYPE>(const int N, const float16* a, const float16* b,
float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float16,CAFFE_FP16_MTYPE><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( Get<Mtype>(a[index]) - Get<Mtype>(b[index]) );
}
}
template <>
void caffe_gpu_sub<float,float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_sub<double,double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double,double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_sub<float16,CAFFE_FP16_MTYPE>(const int N, const float16* a, const float16* b,
float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float16,CAFFE_FP16_MTYPE><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( Get<Mtype>(a[index]) * Get<Mtype>(b[index]) );
}
}
template <>
void caffe_gpu_mul<float, float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_mul<double,double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double,double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_mul<float16,float>(const int N, const float16* a,
const float16* b, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float16,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_mul<float16,float16>(const int N, const float16* a,
const float16* b, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float16,float16><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( Get<Mtype>(a[index]) / Get<Mtype>(b[index]) );
}
}
template <>
void caffe_gpu_div<float,float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_div<double,double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double,double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_div<float16,float>(const int N, const float16* a,
const float16* b, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float16,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_div<float16,float16>(const int N, const float16* a,
const float16* b, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float16,float16><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( abs(Get<Mtype>(a[index])) );
}
}
template <>
void caffe_gpu_abs<float,float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_abs<double,double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double,double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_abs<float16,float>(const int N, const float16* a, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float16,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_abs<float16,float16>(const int N, const float16* a, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float16,float16><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( exp(Get<Mtype>(a[index])) );
}
}
template <>
void caffe_gpu_exp<float,float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_exp<double,double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double,double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_exp<float16,float16>(const int N, const float16* a, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float16,float16><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_exp<float16,float>(const int N, const float16* a, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float16,float16><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<float16>(const int N, const float16* a, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float16><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype, typename Mtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Mtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = Get<Dtype>( pow(Get<Mtype>(a[index]), alpha) );
}
}
template <>
void caffe_gpu_powx<float,float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_powx<double,double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double,double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_powx<float16,float16>(const int N, const float16* a,
const float16 alpha, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float16,float16><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_powx<float16,float>(const int N, const float16* a,
const float alpha, float16* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float16,float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = Get<Dtype>( (Mtype(0) < Get<Mtype>(x[index]))
- (Get<Mtype>(x[index]) < Mtype(0))) );
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = Get<Dtype>( signbit(Get<Mtype>(x[index]))) );
__global__ void popc_kernel(const int n, const float* a,
const float* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(a[index]) ^
static_cast<uint32_t>(b[index]));
}
}
__global__ void popcll_kernel(const int n, const double* a,
const double* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popcll(static_cast<uint64_t>(a[index]) ^
static_cast<uint64_t>(b[index]));
}
}
__global__ void popch_kernel(const int n, const float16* a,
const float16* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(Get<float>(a[index])) ^
static_cast<uint32_t>(Get<float>(b[index])));
}
}
template <>
uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x,
const float* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
popc_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <>
uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x,
const double* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
popcll_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <>
uint32_t caffe_gpu_hamming_distance<float16>(const int n, const float16* x,
const float16* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
popch_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <typename T_IN, typename T_OUT>
__global__
void convert_kernel(const int n, const T_IN* in, T_OUT* out)
{
for (int idx=threadIdx.x+blockIdx.x*blockDim.x; idx<n; idx+=blockDim.x*gridDim.x) {
out[idx] = Get<T_OUT>(in[idx]);
}
}
template <typename T_IN, typename T_OUT>
void caffe_gpu_convert(const int n, const T_IN* in, T_OUT* out)
{
convert_kernel<T_IN,T_OUT><<<n / 512 + 1, 512>>>(n, in, out);
}
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float,float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal<float,float>(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar<float,float>(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double,double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal<double,double>(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar<double,double>(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<float16,float>(const int n, const float a, const float b,
float16* r) {
thrust::device_vector<float> rf(n);
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), thrust::raw_pointer_cast(rf.data()), n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal<float,float>(n, range, thrust::raw_pointer_cast(rf.data()));
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar<float,float>(n, a, thrust::raw_pointer_cast(rf.data()));
}
caffe_gpu_convert<float,float16>(n, thrust::raw_pointer_cast(rf.data()), r);
}
template <>
void caffe_gpu_rng_uniform<float16,float16>(const int n, const float16 a, const float16 b,
float16* r) {
caffe_gpu_rng_uniform<float16,float>(n, a, b, r);
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float16* r) {
// TODO: call fp16-based version of curandGenerateNormal when it becomes available.
thrust::device_vector<float> rf(n);
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), thrust::raw_pointer_cast(rf.data()), n, mu, sigma));
caffe_gpu_convert<float,float16>(n, thrust::raw_pointer_cast(rf.data()), r);
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float16 mu, const float16 sigma,
float16* r) {
// TODO: call fp16-based version of curandGenerateNormal when it becomes available.
thrust::device_vector<float> rf(n);
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), thrust::raw_pointer_cast(rf.data()), n, mu, sigma));
caffe_gpu_convert<float,float16>(n, thrust::raw_pointer_cast(rf.data()), r);
}
} // namespace caffe
|
18bbb7a8be2b0c057ddc7e84642fad9788759584.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author sgazeos@gmail.com
//
#include <ops/declarable/helpers/weights.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __device__ void adjustWeightsKernelD(void* inputBuffer, Nd4jLong* inputShape,
void* weightsBuffer, Nd4jLong* weightsShape,
void* outputBuffer, Nd4jLong inputLength, Nd4jLong weightsLength,
Nd4jLong outputLength, int val) {
// typedef Nd4jLong T;
auto tid = threadIdx.x;
//int threadCount = gridDim.x * blockDim.x;
__shared__ T* outputPart;
__shared__ Nd4jLong offset;
//for (int e = 0; e < inputLength; e++) {
for (Nd4jLong e = tid; e < inputLength; e += blockDim.x) {
Nd4jLong xOffset = shape::getIndexOffset(e, inputShape, inputLength);
int current = *(reinterpret_cast<int*>(inputBuffer) + xOffset);
if (current == val) {
//printf("%lld\n", xOffset);
//Nd4jLong zOffset = shape::getIndexOffset(val, outputShape, outputLength);
if (weightsBuffer != nullptr) {
Nd4jLong yOffset = shape::getIndexOffset(e, weightsShape, weightsLength);
//atomicAdd();
//*reinterpret_cast<int *>(outputBuffer) += reinterpret_cast<int *>(weightsBuffer)[yOffset];
nd4j::math::atomics::nd4j_atomicAdd(reinterpret_cast<T *>(outputBuffer), reinterpret_cast<T *>(weightsBuffer)[yOffset]); //output->p(val, output->e<T>(val) + 1);
// atomicAdd(reinterpret_cast<int *>(outputBuffer), reinterpret_cast<int *>(weightsBuffer)[yOffset]); //output->p(val, output->e<T>(val) + 1);
}
else {
//*reinterpret_cast<int *>(outputBuffer) += int(1);
//printf("outputBuffer[0] = %d\n", static_cast<int>(*(reinterpret_cast<T *>(outputBuffer))));
nd4j::math::atomics::nd4j_atomicAdd(reinterpret_cast<T *>(outputBuffer), T(1)); //output->p(val, output->e<T>(val) + 1);
// atomicAdd(reinterpret_cast<int *>(outputBuffer), int(1)); //output->p(val, output->e<T>(val) + 1);
// printf("outputBuffer[%ld] = %d\n", zOffset, static_cast<int>(*(reinterpret_cast<T *>(outputBuffer) + zOffset)));
}
//printf("xOffset is %ld, zOffset is %ld\n", xOffset, zOffset);
}
}
// if (threadIdx.x + offset < outputLength)
// reinterpret_cast<T *>(outputBuffer)[threadIdx.x + offset] = outputPart[threadIdx.x];
}
template <typename T>
static __global__ void adjustWeightsKernel(void* inputBuffer, Nd4jLong* inputShape,
void* weightsBuffer, Nd4jLong* weightsShape,
void* outputBuffer, Nd4jLong* outputShape,
int minLength, int maxLength) {
//auto tid = blockIdx.x * blockDim.x + threadIdx.x; // * blockDim.x; // + threadIdx.x;
int threadCount = gridDim.x * blockDim.x;
Nd4jLong inputLength = shape::length(inputShape);
Nd4jLong weightsLength = 0;
if (weightsBuffer != nullptr)
weightsLength = shape::length(weightsShape);
Nd4jLong outputLength = shape::length(outputShape);
Nd4jLong borderLen = 1;//outputLength / gridDim.x + outputLength % gridDim.x;
for (Nd4jLong e = blockIdx.x; e < outputLength; e += threadCount) {
//if (blockIdx.x < outputLength) {
//if (e + threadCount < outputLength) {
Nd4jLong zOffset = shape::getIndexOffset(e, outputShape, outputLength);
//printf("%d %d %d\n", blockIdx.x, blockDim.x, threadIdx.x);
//Nd4jLong borderLen = 1;
T* outputBufferZ = reinterpret_cast<T*>(outputBuffer) + zOffset;
adjustWeightsKernelD<T>(inputBuffer, inputShape, weightsBuffer, weightsShape, (void*)outputBufferZ,
inputLength, weightsLength, outputLength, (int)zOffset);
}
}
template <typename T>
static void adjustWeights_(nd4j::LaunchContext * context, NDArray* input, NDArray* weights, NDArray* output, int minLength, int maxLength) {
// for (int e = 0; e < input->lengthOf(); e++) {
// int val = input->e<int>(e);
// if (val < maxLength) {
// if (weights != nullptr)
// output->p(val, output->e<T>(val) + weights->e<T>(e));
// else
// output->p(val, output->e<T>(val) + 1);
// }
// }
dim3 launchDims(256, 512, 8192);
auto stream = context->getCudaStream();
hipLaunchKernelGGL(( adjustWeightsKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, input->specialBuffer(),
input->getSpecialShapeInfo(), weights?weights->specialBuffer():nullptr, weights?weights->getSpecialShapeInfo():nullptr,
output->specialBuffer(), output->specialShapeInfo(), minLength, maxLength);
}
void adjustWeights(nd4j::LaunchContext * context, NDArray* input, NDArray* weights, NDArray* output, int minLength, int maxLength) {
BUILD_SINGLE_SELECTOR(output->dataType(), adjustWeights_, (context, input, weights, output, minLength, maxLength), GENERIC_NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void adjustWeights_, (nd4j::LaunchContext * context, NDArray* input, NDArray* weights, NDArray* output, int minLength, int maxLength), GENERIC_NUMERIC_TYPES);
}
}
} | 18bbb7a8be2b0c057ddc7e84642fad9788759584.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author sgazeos@gmail.com
//
#include <ops/declarable/helpers/weights.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __device__ void adjustWeightsKernelD(void* inputBuffer, Nd4jLong* inputShape,
void* weightsBuffer, Nd4jLong* weightsShape,
void* outputBuffer, Nd4jLong inputLength, Nd4jLong weightsLength,
Nd4jLong outputLength, int val) {
// typedef Nd4jLong T;
auto tid = threadIdx.x;
//int threadCount = gridDim.x * blockDim.x;
__shared__ T* outputPart;
__shared__ Nd4jLong offset;
//for (int e = 0; e < inputLength; e++) {
for (Nd4jLong e = tid; e < inputLength; e += blockDim.x) {
Nd4jLong xOffset = shape::getIndexOffset(e, inputShape, inputLength);
int current = *(reinterpret_cast<int*>(inputBuffer) + xOffset);
if (current == val) {
//printf("%lld\n", xOffset);
//Nd4jLong zOffset = shape::getIndexOffset(val, outputShape, outputLength);
if (weightsBuffer != nullptr) {
Nd4jLong yOffset = shape::getIndexOffset(e, weightsShape, weightsLength);
//atomicAdd();
//*reinterpret_cast<int *>(outputBuffer) += reinterpret_cast<int *>(weightsBuffer)[yOffset];
nd4j::math::atomics::nd4j_atomicAdd(reinterpret_cast<T *>(outputBuffer), reinterpret_cast<T *>(weightsBuffer)[yOffset]); //output->p(val, output->e<T>(val) + 1);
// atomicAdd(reinterpret_cast<int *>(outputBuffer), reinterpret_cast<int *>(weightsBuffer)[yOffset]); //output->p(val, output->e<T>(val) + 1);
}
else {
//*reinterpret_cast<int *>(outputBuffer) += int(1);
//printf("outputBuffer[0] = %d\n", static_cast<int>(*(reinterpret_cast<T *>(outputBuffer))));
nd4j::math::atomics::nd4j_atomicAdd(reinterpret_cast<T *>(outputBuffer), T(1)); //output->p(val, output->e<T>(val) + 1);
// atomicAdd(reinterpret_cast<int *>(outputBuffer), int(1)); //output->p(val, output->e<T>(val) + 1);
// printf("outputBuffer[%ld] = %d\n", zOffset, static_cast<int>(*(reinterpret_cast<T *>(outputBuffer) + zOffset)));
}
//printf("xOffset is %ld, zOffset is %ld\n", xOffset, zOffset);
}
}
// if (threadIdx.x + offset < outputLength)
// reinterpret_cast<T *>(outputBuffer)[threadIdx.x + offset] = outputPart[threadIdx.x];
}
template <typename T>
static __global__ void adjustWeightsKernel(void* inputBuffer, Nd4jLong* inputShape,
void* weightsBuffer, Nd4jLong* weightsShape,
void* outputBuffer, Nd4jLong* outputShape,
int minLength, int maxLength) {
//auto tid = blockIdx.x * blockDim.x + threadIdx.x; // * blockDim.x; // + threadIdx.x;
int threadCount = gridDim.x * blockDim.x;
Nd4jLong inputLength = shape::length(inputShape);
Nd4jLong weightsLength = 0;
if (weightsBuffer != nullptr)
weightsLength = shape::length(weightsShape);
Nd4jLong outputLength = shape::length(outputShape);
Nd4jLong borderLen = 1;//outputLength / gridDim.x + outputLength % gridDim.x;
for (Nd4jLong e = blockIdx.x; e < outputLength; e += threadCount) {
//if (blockIdx.x < outputLength) {
//if (e + threadCount < outputLength) {
Nd4jLong zOffset = shape::getIndexOffset(e, outputShape, outputLength);
//printf("%d %d %d\n", blockIdx.x, blockDim.x, threadIdx.x);
//Nd4jLong borderLen = 1;
T* outputBufferZ = reinterpret_cast<T*>(outputBuffer) + zOffset;
adjustWeightsKernelD<T>(inputBuffer, inputShape, weightsBuffer, weightsShape, (void*)outputBufferZ,
inputLength, weightsLength, outputLength, (int)zOffset);
}
}
template <typename T>
static void adjustWeights_(nd4j::LaunchContext * context, NDArray* input, NDArray* weights, NDArray* output, int minLength, int maxLength) {
// for (int e = 0; e < input->lengthOf(); e++) {
// int val = input->e<int>(e);
// if (val < maxLength) {
// if (weights != nullptr)
// output->p(val, output->e<T>(val) + weights->e<T>(e));
// else
// output->p(val, output->e<T>(val) + 1);
// }
// }
dim3 launchDims(256, 512, 8192);
auto stream = context->getCudaStream();
adjustWeightsKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(input->specialBuffer(),
input->getSpecialShapeInfo(), weights?weights->specialBuffer():nullptr, weights?weights->getSpecialShapeInfo():nullptr,
output->specialBuffer(), output->specialShapeInfo(), minLength, maxLength);
}
void adjustWeights(nd4j::LaunchContext * context, NDArray* input, NDArray* weights, NDArray* output, int minLength, int maxLength) {
BUILD_SINGLE_SELECTOR(output->dataType(), adjustWeights_, (context, input, weights, output, minLength, maxLength), GENERIC_NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void adjustWeights_, (nd4j::LaunchContext * context, NDArray* input, NDArray* weights, NDArray* output, int minLength, int maxLength), GENERIC_NUMERIC_TYPES);
}
}
} |
26b9fe5095de3676795137c631ea7961565d5096.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <stdlib.h>
// vec length is max for 1 block
__global__ void reduce(float *vec, float *res, size_t len) {
// len == 2^k
// 2 * idx + licznik
// licznik = len / (2^i)
// i -> numer iteracji (od 1)
// zalozenie -> len == 2^k dla k \in netural
__shared__ float svec[len];
svec[threadIdx.x * 2] = vec[threadIdx.x * 2];
svec[threadIdx.x * 2 + 1] = vec[threadIdx.x * 2 + 1];
int i = 1;
while (len > 2) {
len /= 2;
if (threadIdx.x % i == 0) {
svec[threadIdx.x * 2] += svec[threadIdx.x * 2 + i];
}
i *= 2;
__syncthreads();
}
*res = svec[0];
}
float main() {
float *vec_cpu;
size_t size = LEN * sizeof(float);
vec_cpu = (float*)malloc(size);
for (float i = 0; i < LEN; i++) {
vec_cpu[i] = i;
}
float *vec_gpu, *res_gpu;
hipMalloc((void**)&vec_gpu, size);
hipMemcpy(vec_gpu, vec_cpu, size, hipMemcpyHostToDevice);
hipMalloc((void**)&res_gpu, sizeof(float));
hipLaunchKernelGGL(( reduce), dim3(1), dim3(1024), 0, 0, vec_gpu, res_gpu, LEN);
hipMemcpy(res_cpu, res_gpu, size, hipMemcpyDeviceToHost);
hipFree(res_gpu);
hipFree(vec_gpu);
prfloatf("%d\n", *res_cpu);
free(res_cpu);
free(vec_cpu);
return 0;
} | 26b9fe5095de3676795137c631ea7961565d5096.cu | #include <cuda_runtime_api.h>
#include <stdlib.h>
// vec length is max for 1 block
__global__ void reduce(float *vec, float *res, size_t len) {
// len == 2^k
// 2 * idx + licznik
// licznik = len / (2^i)
// i -> numer iteracji (od 1)
// zalozenie -> len == 2^k dla k \in netural
__shared__ float svec[len];
svec[threadIdx.x * 2] = vec[threadIdx.x * 2];
svec[threadIdx.x * 2 + 1] = vec[threadIdx.x * 2 + 1];
int i = 1;
while (len > 2) {
len /= 2;
if (threadIdx.x % i == 0) {
svec[threadIdx.x * 2] += svec[threadIdx.x * 2 + i];
}
i *= 2;
__syncthreads();
}
*res = svec[0];
}
float main() {
float *vec_cpu;
size_t size = LEN * sizeof(float);
vec_cpu = (float*)malloc(size);
for (float i = 0; i < LEN; i++) {
vec_cpu[i] = i;
}
float *vec_gpu, *res_gpu;
cudaMalloc((void**)&vec_gpu, size);
cudaMemcpy(vec_gpu, vec_cpu, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&res_gpu, sizeof(float));
reduce<<<1, 1024>>>(vec_gpu, res_gpu, LEN);
cudaMemcpy(res_cpu, res_gpu, size, cudaMemcpyDeviceToHost);
cudaFree(res_gpu);
cudaFree(vec_gpu);
prfloatf("%d\n", *res_cpu);
free(res_cpu);
free(vec_cpu);
return 0;
} |
8ed5f4f2d4765b6c56a628b8516f42ecd655396c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "include/LBMkernels.cuh"
#include "include/utils.cuh"
#include "include/SWE.cuh"
#include "include/PDEfeq.cuh"
#include "include/BC.cuh"
#include "../include/structs.h"
#include "../include/macros.h"
__device__ void calculateMacroscopic(prec* localMacroscopic, prec* localf, prec e){
localMacroscopic[0] = localf[0] + (localf[1] + localf[2] + localf[3] + localf[4]) + (localf[5] + localf[6] + localf[7] + localf[8]);
localMacroscopic[1] = e * ((localf[1] - localf[3]) + (localf[5] - localf[6] - localf[7] + localf[8])) / localMacroscopic[0];
localMacroscopic[2] = e * ((localf[2] - localf[4]) + (localf[5] + localf[6] - localf[7] - localf[8])) / localMacroscopic[0];
}
__global__ void LBMpull(const configStruct config,
const prec* __restrict__ b, const unsigned char* __restrict__ binary1,
const unsigned char* __restrict__ binary2, const prec* __restrict__ f1,
prec* f2, prec* h) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < config.Lx*config.Ly) {
unsigned char b1 = binary1[i];
unsigned char b2 = binary2[i];
if(b1 != 0 || b2 != 0){
int ex[8] = {1,0,-1,0,1,-1,-1,1};
int ey[8] = {0,1,0,-1,1,1,-1,-1};
prec forcing[8];
#if PDE == 1
calculateForcingSWE(forcing, h, b, config.e, i, config.Lx, ex, ey);
#elif PDE == 5
calculateForcingUser(forcing, h, b, config.e, i, config.Lx, ex, ey);
#else
for (int j = 0; j < 8; j++)
forcing[j] = 0;
#endif
prec localf[9];
localf[0] = f1[i];
for (int j = 1; j < 9; j++){
if(((b1>>(j-1)) & 1) & (~(b2>>(j-1)) & 1))
localf[j] = f1[IDXcm(IDX(i, j, config.Lx, ex, ey), j, config.Lx, config.Ly)] + forcing[j-1];
else if((~(b1>>(j-1)) & 1) & (~(b2>>(j-1)) & 1))
localf[j] = f1[IDXcm(i, j, config.Lx, config.Ly)];
}
for (int j = 1; j < 9; j++)
if((~(b1>>(j-1)) & 1) & ((b2>>(j-1)) & 1))
#if BC1 == 1
OBC(localf, f1, i, j, config.Lx, config.Ly);
#elif BC1 == 2
PBC(localf, f1, i, j, config.Lx, config.Ly, ex, ey);
#elif BC1 == 3
BBBC(localf, j);
#elif BC1 == 4
SBC(localf, j, b1, b2);
#elif BC1 == 5
UBC1(localf, f1, i, j, config.Lx, config.Ly, ex, ey, b1, b2);
#elif BC1 == 6
UBC2(localf, f1, i, j, config.Lx, config.Ly, ex, ey, b1, b2);
#endif
#if BC2 != 0
for (int j = 1; j < 9; j++)
if(((b1>>(j-1)) & 1) & ((b2>>(j-1)) & 1))
#if BC2 == 1
localf[j] = OBC(localf, f1, i, j, config.Lx, config.Ly);
#elif BC2 == 2
localf[j] = PBC(localf, f1, i, j, config.Lx, config.Ly, ex, ey);
#elif BC2 == 3
localf[j] = BBBC(localf, j);
#elif BC2 == 4
localf[j] = SBC(localf, j, b1, b2);
#elif BC2 == 5
localf[j] = BC1User(localf, f1, i, j, config.Lx, config.Ly, ex, ey, b1, b2);
#elif BC2 == 6
localf[j] = BC2User(localf, f1, i, j, config.Lx, config.Ly, ex, ey, b1, b2);
#endif
#endif
prec localMacroscopic[3];
calculateMacroscopic(localMacroscopic, localf, config.e);
h[i] = localMacroscopic[0];
prec feq[9];
#if PDE == 1
calculateFeqSWE(feq, localMacroscopic, config.e);
#elif PDE == 2
calculateFeqHE(feq, localMacroscopic, config.e);
#elif PDE == 3
calculateFeqWE(feq, localMacroscopic, config.e);
#elif PDE == 4
calculateFeqNSE(feq, localMacroscopic, config.e);
#elif PDE == 5
calculateFeqUser(feq, localMacroscopic, config.e);
#endif
for (int j = 0; j < 9; j++)
f2[IDXcm(i, j, config.Lx, config.Ly)] = localf[j] - (localf[j] - feq[j]) / config.tau;
}
}
}
| 8ed5f4f2d4765b6c56a628b8516f42ecd655396c.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include "include/LBMkernels.cuh"
#include "include/utils.cuh"
#include "include/SWE.cuh"
#include "include/PDEfeq.cuh"
#include "include/BC.cuh"
#include "../include/structs.h"
#include "../include/macros.h"
__device__ void calculateMacroscopic(prec* localMacroscopic, prec* localf, prec e){
localMacroscopic[0] = localf[0] + (localf[1] + localf[2] + localf[3] + localf[4]) + (localf[5] + localf[6] + localf[7] + localf[8]);
localMacroscopic[1] = e * ((localf[1] - localf[3]) + (localf[5] - localf[6] - localf[7] + localf[8])) / localMacroscopic[0];
localMacroscopic[2] = e * ((localf[2] - localf[4]) + (localf[5] + localf[6] - localf[7] - localf[8])) / localMacroscopic[0];
}
__global__ void LBMpull(const configStruct config,
const prec* __restrict__ b, const unsigned char* __restrict__ binary1,
const unsigned char* __restrict__ binary2, const prec* __restrict__ f1,
prec* f2, prec* h) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < config.Lx*config.Ly) {
unsigned char b1 = binary1[i];
unsigned char b2 = binary2[i];
if(b1 != 0 || b2 != 0){
int ex[8] = {1,0,-1,0,1,-1,-1,1};
int ey[8] = {0,1,0,-1,1,1,-1,-1};
prec forcing[8];
#if PDE == 1
calculateForcingSWE(forcing, h, b, config.e, i, config.Lx, ex, ey);
#elif PDE == 5
calculateForcingUser(forcing, h, b, config.e, i, config.Lx, ex, ey);
#else
for (int j = 0; j < 8; j++)
forcing[j] = 0;
#endif
prec localf[9];
localf[0] = f1[i];
for (int j = 1; j < 9; j++){
if(((b1>>(j-1)) & 1) & (~(b2>>(j-1)) & 1))
localf[j] = f1[IDXcm(IDX(i, j, config.Lx, ex, ey), j, config.Lx, config.Ly)] + forcing[j-1];
else if((~(b1>>(j-1)) & 1) & (~(b2>>(j-1)) & 1))
localf[j] = f1[IDXcm(i, j, config.Lx, config.Ly)];
}
for (int j = 1; j < 9; j++)
if((~(b1>>(j-1)) & 1) & ((b2>>(j-1)) & 1))
#if BC1 == 1
OBC(localf, f1, i, j, config.Lx, config.Ly);
#elif BC1 == 2
PBC(localf, f1, i, j, config.Lx, config.Ly, ex, ey);
#elif BC1 == 3
BBBC(localf, j);
#elif BC1 == 4
SBC(localf, j, b1, b2);
#elif BC1 == 5
UBC1(localf, f1, i, j, config.Lx, config.Ly, ex, ey, b1, b2);
#elif BC1 == 6
UBC2(localf, f1, i, j, config.Lx, config.Ly, ex, ey, b1, b2);
#endif
#if BC2 != 0
for (int j = 1; j < 9; j++)
if(((b1>>(j-1)) & 1) & ((b2>>(j-1)) & 1))
#if BC2 == 1
localf[j] = OBC(localf, f1, i, j, config.Lx, config.Ly);
#elif BC2 == 2
localf[j] = PBC(localf, f1, i, j, config.Lx, config.Ly, ex, ey);
#elif BC2 == 3
localf[j] = BBBC(localf, j);
#elif BC2 == 4
localf[j] = SBC(localf, j, b1, b2);
#elif BC2 == 5
localf[j] = BC1User(localf, f1, i, j, config.Lx, config.Ly, ex, ey, b1, b2);
#elif BC2 == 6
localf[j] = BC2User(localf, f1, i, j, config.Lx, config.Ly, ex, ey, b1, b2);
#endif
#endif
prec localMacroscopic[3];
calculateMacroscopic(localMacroscopic, localf, config.e);
h[i] = localMacroscopic[0];
prec feq[9];
#if PDE == 1
calculateFeqSWE(feq, localMacroscopic, config.e);
#elif PDE == 2
calculateFeqHE(feq, localMacroscopic, config.e);
#elif PDE == 3
calculateFeqWE(feq, localMacroscopic, config.e);
#elif PDE == 4
calculateFeqNSE(feq, localMacroscopic, config.e);
#elif PDE == 5
calculateFeqUser(feq, localMacroscopic, config.e);
#endif
for (int j = 0; j < 9; j++)
f2[IDXcm(i, j, config.Lx, config.Ly)] = localf[j] - (localf[j] - feq[j]) / config.tau;
}
}
}
|
0bc7ee2b9b28129501cdbfc2624aaf38e6e9f596.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <memory>
#include <stdexcept>
#include <tuple>
#include <vector>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hiprand/hiprand.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include "tensor.h"
#define MAX(x, y) ((x>y) ? x : y)
double s_max_gflops = 0.;
int time_sgemm(Tensor<float> A, Tensor<float> B, Tensor<float> C, bool a_t, bool b_t, hipblasHandle_t cublas_handle) {
const float alpha = 1.f / static_cast<float>(A.dims()[1]);
const float beta = 1.f;
int m = C.dims()[0];
int k = a_t ? A.dims()[0] : A.dims()[1];
int n = C.dims()[1];
int numRepeats = 50;
// Warm up
hipblasStatus_t stat = hipblasSgemm(cublas_handle,
a_t ? HIPBLAS_OP_T : HIPBLAS_OP_N,
b_t ? HIPBLAS_OP_T : HIPBLAS_OP_N,
m,
n,
k,
&alpha,
A.begin(), A.dims()[0],
B.begin(), B.dims()[0],
&beta,
C.begin(), C.dims()[0]);
if (stat != HIPBLAS_STATUS_SUCCESS) {
throw std::runtime_error("sgemm failed");
}
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < numRepeats; ++i) {
hipblasStatus_t stat = hipblasSgemm(cublas_handle,
a_t ? HIPBLAS_OP_T : HIPBLAS_OP_N,
b_t ? HIPBLAS_OP_T : HIPBLAS_OP_N,
m,
n,
k,
&alpha,
A.begin(), A.dims()[0],
B.begin(), B.dims()[0],
&beta,
C.begin(), C.dims()[0]);
if (stat != HIPBLAS_STATUS_SUCCESS) {
throw std::runtime_error("sgemm failed");
}
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
return static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / numRepeats);
}
double cal_gflops(int m, int n, int k, double usec)
{
double flops = 2. * m * n * k;
double gflops = (1E-9*flops) / (1E-6*usec);
return gflops;
}
void gemm(int m, int n, int k, bool a_t, bool b_t, hipblasHandle_t cublas_handle, hiprandGenerator_t curand_gen)
{
double time, gflops;
float flag = 0.;
auto a = rand_float({m, k}, curand_gen);
auto b = rand_float({a_t ? m : (b_t ? n : k), b_t ? k : n}, curand_gen);
auto c = zeros({a_t ? k : m, n}, flag);
time = time_sgemm(a, b, c, a_t, b_t, cublas_handle);
gflops = cal_gflops(m, n, k, time);
s_max_gflops = MAX(gflops, s_max_gflops);
std::cout << std::setw(15) << std::setprecision(6) << time;
std::cout << std::setw(15) << std::setprecision(6) << gflops;
std::cout << std::endl;
}
int main(int argc, char **argv) {
int start = 64;
int end = 10240;
if (argc < 3) {
return 0;
}
start = std::atoi(argv[1]);
end = std::atoi(argv[2]);
hipFree(0);
hipblasHandle_t cublas_handle;
hipblasStatus_t status = hipblasCreate(&cublas_handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
std::cout << "CUBLAS init failed" << std::endl;
}
hiprandGenerator_t curand_gen;
hiprandCreateGenerator(&curand_gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(curand_gen, 123ULL);
std::vector<std::tuple<int, int, int, bool, bool>> problems;
for (int i = start; i <= end; i = i + 64) {
problems.push_back(std::make_tuple(i, i, i, false, false));
}
std::cout << "[Time and GFLOPS Result]" << std::endl;
std::cout << std::setfill(' ');
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(7) << "a_t" << std::setw(7) << "b_t";
std::cout << std::setw(15) << "Time (usec)" << std::setw(15) << "GFLOPS";
std::cout << std::endl;
for (const auto &problem : problems) {
int m, n, k;
bool a_t, b_t;
std::tie(m, n, k, a_t, b_t) = problem;
std::cout << std::setw(7) << m;
std::cout << std::setw(7) << n;
std::cout << std::setw(7) << k;
std::cout << std::setw(7) << a_t ? "t" : "n";
std::cout << std::setw(7) << b_t ? "t" : "n";
gemm(m, n, k, a_t, b_t, cublas_handle, curand_gen);
}
std::cout << "[Peak GFLOPS]" << std::endl << std::setprecision(6) << s_max_gflops << std::endl;
hipblasDestroy(cublas_handle);
hiprandDestroyGenerator(curand_gen);
return 0;
}
| 0bc7ee2b9b28129501cdbfc2624aaf38e6e9f596.cu | #include <cstdlib>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <memory>
#include <stdexcept>
#include <tuple>
#include <vector>
#include <cuda.h>
#include <cublas_v2.h>
#include <curand.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include "tensor.h"
#define MAX(x, y) ((x>y) ? x : y)
double s_max_gflops = 0.;
int time_sgemm(Tensor<float> A, Tensor<float> B, Tensor<float> C, bool a_t, bool b_t, cublasHandle_t cublas_handle) {
const float alpha = 1.f / static_cast<float>(A.dims()[1]);
const float beta = 1.f;
int m = C.dims()[0];
int k = a_t ? A.dims()[0] : A.dims()[1];
int n = C.dims()[1];
int numRepeats = 50;
// Warm up
cublasStatus_t stat = cublasSgemm(cublas_handle,
a_t ? CUBLAS_OP_T : CUBLAS_OP_N,
b_t ? CUBLAS_OP_T : CUBLAS_OP_N,
m,
n,
k,
&alpha,
A.begin(), A.dims()[0],
B.begin(), B.dims()[0],
&beta,
C.begin(), C.dims()[0]);
if (stat != CUBLAS_STATUS_SUCCESS) {
throw std::runtime_error("sgemm failed");
}
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < numRepeats; ++i) {
cublasStatus_t stat = cublasSgemm(cublas_handle,
a_t ? CUBLAS_OP_T : CUBLAS_OP_N,
b_t ? CUBLAS_OP_T : CUBLAS_OP_N,
m,
n,
k,
&alpha,
A.begin(), A.dims()[0],
B.begin(), B.dims()[0],
&beta,
C.begin(), C.dims()[0]);
if (stat != CUBLAS_STATUS_SUCCESS) {
throw std::runtime_error("sgemm failed");
}
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
return static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / numRepeats);
}
double cal_gflops(int m, int n, int k, double usec)
{
double flops = 2. * m * n * k;
double gflops = (1E-9*flops) / (1E-6*usec);
return gflops;
}
void gemm(int m, int n, int k, bool a_t, bool b_t, cublasHandle_t cublas_handle, curandGenerator_t curand_gen)
{
double time, gflops;
float flag = 0.;
auto a = rand_float({m, k}, curand_gen);
auto b = rand_float({a_t ? m : (b_t ? n : k), b_t ? k : n}, curand_gen);
auto c = zeros({a_t ? k : m, n}, flag);
time = time_sgemm(a, b, c, a_t, b_t, cublas_handle);
gflops = cal_gflops(m, n, k, time);
s_max_gflops = MAX(gflops, s_max_gflops);
std::cout << std::setw(15) << std::setprecision(6) << time;
std::cout << std::setw(15) << std::setprecision(6) << gflops;
std::cout << std::endl;
}
int main(int argc, char **argv) {
int start = 64;
int end = 10240;
if (argc < 3) {
return 0;
}
start = std::atoi(argv[1]);
end = std::atoi(argv[2]);
cudaFree(0);
cublasHandle_t cublas_handle;
cublasStatus_t status = cublasCreate(&cublas_handle);
if (status != CUBLAS_STATUS_SUCCESS) {
std::cout << "CUBLAS init failed" << std::endl;
}
curandGenerator_t curand_gen;
curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(curand_gen, 123ULL);
std::vector<std::tuple<int, int, int, bool, bool>> problems;
for (int i = start; i <= end; i = i + 64) {
problems.push_back(std::make_tuple(i, i, i, false, false));
}
std::cout << "[Time and GFLOPS Result]" << std::endl;
std::cout << std::setfill(' ');
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(7) << "a_t" << std::setw(7) << "b_t";
std::cout << std::setw(15) << "Time (usec)" << std::setw(15) << "GFLOPS";
std::cout << std::endl;
for (const auto &problem : problems) {
int m, n, k;
bool a_t, b_t;
std::tie(m, n, k, a_t, b_t) = problem;
std::cout << std::setw(7) << m;
std::cout << std::setw(7) << n;
std::cout << std::setw(7) << k;
std::cout << std::setw(7) << a_t ? "t" : "n";
std::cout << std::setw(7) << b_t ? "t" : "n";
gemm(m, n, k, a_t, b_t, cublas_handle, curand_gen);
}
std::cout << "[Peak GFLOPS]" << std::endl << std::setprecision(6) << s_max_gflops << std::endl;
cublasDestroy(cublas_handle);
curandDestroyGenerator(curand_gen);
return 0;
}
|
57a0ce720e54725253e874d152784c8ae6697e3e.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <array/DataTypeUtils.h>
#include <exceptions/allocation_exception.h>
#include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h>
#include <memory/MemoryCounter.h>
#include <system/op_boilerplate.h>
#include "../DataBuffer.h"
namespace sd {
void DataBuffer::expand(const uint64_t size) {
if (size > _lenInBytes) {
// allocate new buffer
int8_t* newBuffer = nullptr;
int8_t* newSpecialBuffer = nullptr;
ALLOCATE_SPECIAL(newSpecialBuffer, _workspace, size, int8_t);
// copy data from existing buffer
if (_primaryBuffer != nullptr) {
// there's non-zero chance that primary buffer doesn't exist yet
ALLOCATE(newBuffer, _workspace, size, int8_t);
std::memcpy(newBuffer, _primaryBuffer, _lenInBytes);
if (_isOwnerPrimary) {
auto ipb = reinterpret_cast<int8_t*>(_primaryBuffer);
RELEASE(ipb, _workspace);
}
_primaryBuffer = newBuffer;
_isOwnerPrimary = true;
}
hipMemcpy(newSpecialBuffer, _specialBuffer, _lenInBytes, hipMemcpyDeviceToDevice);
if (_isOwnerSpecial) {
auto isb = reinterpret_cast<int8_t*>(_specialBuffer);
RELEASE_SPECIAL(isb, _workspace);
}
_specialBuffer = newSpecialBuffer;
_lenInBytes = size;
_isOwnerSpecial = true;
}
}
void DataBuffer::showBufferLimited() {
#if defined(DEBUG_VEDA_LOGS)
float* x = (float*)_primaryBuffer;
size_t size = getLenInBytes();
size = size > 80 ? 80 : 0;
sd_debug("cpu: %p\n", (void*)x);
for (int i = 0; i < size / sizeof(float); i++) sd_debug("%f, ", x[i]);
sd_debug("%s", "\n");
#endif
}
void DataBuffer::showCounters(const char* msg1, const char* msg2) {
#if defined(HAVE_VEDA) && defined(DEBUG_VEDA_LOGS)
sd_debug("%s %s || primary %p special %p :: wP: %d wS: %d rP: %d rS: %d\n", msg1, msg2, _primaryBuffer,
_specialBuffer, (int)_writePrimary.load(), (int)_writeSpecial.load(), (int)_readPrimary.load(),
(int)_readSpecial.load());
#endif
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::allocateSpecial() {
if (_specialBuffer == nullptr && getLenInBytes() > 0) {
auto deviceId = sd::AffinityManager::currentDeviceId();
if (_workspace == nullptr)
if (!sd::memory::MemoryCounter::getInstance().validate(getLenInBytes()))
throw sd::allocation_exception::build("Requested amount exceeds device limits",
sd::memory::MemoryCounter::getInstance().deviceLimit(deviceId),
getLenInBytes());
ALLOCATE_SPECIAL(_specialBuffer, _workspace, getLenInBytes(), int8_t);
_isOwnerSpecial = true;
if (_workspace == nullptr) {
sd::memory::MemoryCounter::getInstance().countIn(deviceId, getLenInBytes());
sd::memory::MemoryCounter::getInstance().countIn(sd::memory::MemoryType::DEVICE, getLenInBytes());
}
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::syncToPrimary(const LaunchContext* context, const bool forceSync) {
if (isPrimaryActual() && !forceSync) {
return;
}
allocatePrimary();
auto res = hipStreamSynchronize(*context->getCudaStream());
if (res != 0) throw cuda_exception::build("DataBuffer::syncToPrimary failed to to some previous kernel failre", res);
res = hipMemcpy(_primaryBuffer, _specialBuffer, getLenInBytes(), hipMemcpyDeviceToHost);
if (res != 0) throw cuda_exception::build("DataBuffer::syncToPrimary hipMemcpy failed", res);
readPrimary();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::syncToSpecial(const bool forceSync) {
// in this case there's nothing to do here
if (_primaryBuffer == nullptr) return;
if (isSpecialActual() && !forceSync) {
return;
}
allocateSpecial();
auto res = hipMemcpy(_specialBuffer, _primaryBuffer, getLenInBytes(), hipMemcpyHostToDevice);
if (res != 0) throw cuda_exception::build("DataBuffer::syncToSpecial hipMemcpy failed", res);
readSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::deleteSpecial() {
if (_isOwnerSpecial && _specialBuffer != nullptr && getLenInBytes() != 0) {
auto p = reinterpret_cast<int8_t*>(_specialBuffer);
RELEASE_SPECIAL(p, _workspace);
_specialBuffer = nullptr;
_isOwnerSpecial = false;
// count out towards DataBuffer device, only if we're not in workspace
if (_workspace == nullptr) {
sd::memory::MemoryCounter::getInstance().countOut(_deviceId, getLenInBytes());
sd::memory::MemoryCounter::getInstance().countOut(sd::memory::MemoryType::DEVICE, getLenInBytes());
}
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setCountersToZero() {
_counter.store(0L);
_writePrimary.store(0L);
_writeSpecial.store(0L);
_readPrimary.store(0L);
_readSpecial.store(0L);
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyCounters(const DataBuffer& other) {
_counter.store(other._counter);
_writePrimary.store(other._readSpecial);
_writeSpecial.store(other._readPrimary);
_readPrimary.store(other._writeSpecial);
_readSpecial.store(other._writePrimary);
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyBufferFrom(const DataBuffer& other, size_t sizeToCopyinBytes, const sd::LongType offsetThis,
const sd::LongType offsetOther) { // copies only to special buffer
if (other._primaryBuffer == nullptr && other._specialBuffer == nullptr) return;
if (sizeToCopyinBytes == 0) sizeToCopyinBytes = other.getLenInBytes();
if (sizeToCopyinBytes == 0) return;
if (other.isPrimaryActual()) {
auto res = hipMemcpy(
static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType),
static_cast<const int8_t*>(other._primaryBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType),
sizeToCopyinBytes, hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyHostToDevice failed!", res);
other.readPrimary();
} else {
auto res = hipMemcpy(
static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType),
static_cast<const int8_t*>(other._specialBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType),
sizeToCopyinBytes, hipMemcpyDeviceToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyDeviceToDevice failed!", res);
other.readSpecial();
}
writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyBufferFromHost(const void* hostBuffer, size_t sizeToCopyinBytes, const sd::LongType offsetThis,
const sd::LongType offsetHostBuffer) { // copies only to special buffer
if (hostBuffer == nullptr) return;
if (sizeToCopyinBytes == 0) sizeToCopyinBytes = getLenInBytes();
if (sizeToCopyinBytes == 0) return;
auto res =
hipMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType),
static_cast<const int8_t*>(hostBuffer) + offsetHostBuffer * DataTypeUtils::sizeOfElement(_dataType),
sizeToCopyinBytes, hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFromHost: cudaMemcpy_cudaMemcpyHostToDevice failed!", res);
writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setSpecial(void* special, const bool isOwnerSpecial) {
deleteSpecial();
_specialBuffer = special;
_isOwnerSpecial = isOwnerSpecial;
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::allocateBuffers(const bool allocBoth) { // always allocate special buffer only (cuda case)
allocateSpecial();
if (allocBoth) allocatePrimary();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setToZeroBuffers(const bool both) {
hipMemsetAsync(special(), 0, getLenInBytes(), *LaunchContext::defaultContext()->getCudaStream());
auto res = hipStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
if (res != 0) throw cuda_exception::build("DataBuffer::setToZeroBuffers: streamSync failed!", res);
writeSpecial();
if (both) {
memset(primary(), 0, getLenInBytes());
readPrimary();
}
}
/////////////////////////
void DataBuffer::memcpy(const DataBuffer& dst, const DataBuffer& src) {
if (src._lenInBytes > dst._lenInBytes)
throw std::runtime_error("DataBuffer::memcpy: Source data buffer is larger than destination");
int res = 0;
if (src.isSpecialActual()) {
res = hipMemcpyAsync(dst._specialBuffer, src._specialBuffer, src.getLenInBytes(), hipMemcpyDeviceToDevice,
*LaunchContext::defaultContext()->getCudaStream());
} else if (src.isPrimaryActual()) {
res = hipMemcpyAsync(dst._specialBuffer, src._primaryBuffer, src.getLenInBytes(), hipMemcpyHostToDevice,
*LaunchContext::defaultContext()->getCudaStream());
}
if (res != 0) throw cuda_exception::build("DataBuffer::memcpy: hipMemcpyAsync failed!", res);
res = hipStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
if (res != 0) throw cuda_exception::build("DataBuffer::memcpy: streamSync failed!", res);
dst.writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::migrate() {
memory::Workspace* newWorkspace = nullptr;
void* newBuffer;
ALLOCATE_SPECIAL(newBuffer, newWorkspace, getLenInBytes(), int8_t);
auto res = hipMemcpy(newBuffer, _specialBuffer, getLenInBytes(), hipMemcpyDeviceToDevice);
if (res != 0) throw cuda_exception::build("DataBuffer::migrate: hipMemcpyAsync failed!", res);
if (_isOwnerSpecial) {
// now we're releasing original buffer
RELEASE_SPECIAL(_specialBuffer, _workspace);
}
_isOwnerSpecial = true;
_specialBuffer = newBuffer;
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::writePrimary() const { _writePrimary = ++_counter; }
void DataBuffer::writeSpecial() const { _writeSpecial = ++_counter; }
void DataBuffer::readPrimary() const { _readPrimary = ++_counter; }
void DataBuffer::readSpecial() const { _readSpecial = ++_counter; }
bool DataBuffer::isPrimaryActual() const {
return (_writePrimary.load() > _writeSpecial.load() || _readPrimary.load() > _writeSpecial.load());
}
bool DataBuffer::isSpecialActual() const {
return (_writeSpecial.load() > _writePrimary.load() || _readSpecial.load() > _writePrimary.load());
}
} // namespace sd
| 57a0ce720e54725253e874d152784c8ae6697e3e.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <array/DataTypeUtils.h>
#include <exceptions/allocation_exception.h>
#include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h>
#include <memory/MemoryCounter.h>
#include <system/op_boilerplate.h>
#include "../DataBuffer.h"
namespace sd {
void DataBuffer::expand(const uint64_t size) {
if (size > _lenInBytes) {
// allocate new buffer
int8_t* newBuffer = nullptr;
int8_t* newSpecialBuffer = nullptr;
ALLOCATE_SPECIAL(newSpecialBuffer, _workspace, size, int8_t);
// copy data from existing buffer
if (_primaryBuffer != nullptr) {
// there's non-zero chance that primary buffer doesn't exist yet
ALLOCATE(newBuffer, _workspace, size, int8_t);
std::memcpy(newBuffer, _primaryBuffer, _lenInBytes);
if (_isOwnerPrimary) {
auto ipb = reinterpret_cast<int8_t*>(_primaryBuffer);
RELEASE(ipb, _workspace);
}
_primaryBuffer = newBuffer;
_isOwnerPrimary = true;
}
cudaMemcpy(newSpecialBuffer, _specialBuffer, _lenInBytes, cudaMemcpyDeviceToDevice);
if (_isOwnerSpecial) {
auto isb = reinterpret_cast<int8_t*>(_specialBuffer);
RELEASE_SPECIAL(isb, _workspace);
}
_specialBuffer = newSpecialBuffer;
_lenInBytes = size;
_isOwnerSpecial = true;
}
}
void DataBuffer::showBufferLimited() {
#if defined(DEBUG_VEDA_LOGS)
float* x = (float*)_primaryBuffer;
size_t size = getLenInBytes();
size = size > 80 ? 80 : 0;
sd_debug("cpu: %p\n", (void*)x);
for (int i = 0; i < size / sizeof(float); i++) sd_debug("%f, ", x[i]);
sd_debug("%s", "\n");
#endif
}
void DataBuffer::showCounters(const char* msg1, const char* msg2) {
#if defined(HAVE_VEDA) && defined(DEBUG_VEDA_LOGS)
sd_debug("%s %s || primary %p special %p :: wP: %d wS: %d rP: %d rS: %d\n", msg1, msg2, _primaryBuffer,
_specialBuffer, (int)_writePrimary.load(), (int)_writeSpecial.load(), (int)_readPrimary.load(),
(int)_readSpecial.load());
#endif
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::allocateSpecial() {
if (_specialBuffer == nullptr && getLenInBytes() > 0) {
auto deviceId = sd::AffinityManager::currentDeviceId();
if (_workspace == nullptr)
if (!sd::memory::MemoryCounter::getInstance().validate(getLenInBytes()))
throw sd::allocation_exception::build("Requested amount exceeds device limits",
sd::memory::MemoryCounter::getInstance().deviceLimit(deviceId),
getLenInBytes());
ALLOCATE_SPECIAL(_specialBuffer, _workspace, getLenInBytes(), int8_t);
_isOwnerSpecial = true;
if (_workspace == nullptr) {
sd::memory::MemoryCounter::getInstance().countIn(deviceId, getLenInBytes());
sd::memory::MemoryCounter::getInstance().countIn(sd::memory::MemoryType::DEVICE, getLenInBytes());
}
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::syncToPrimary(const LaunchContext* context, const bool forceSync) {
if (isPrimaryActual() && !forceSync) {
return;
}
allocatePrimary();
auto res = cudaStreamSynchronize(*context->getCudaStream());
if (res != 0) throw cuda_exception::build("DataBuffer::syncToPrimary failed to to some previous kernel failre", res);
res = cudaMemcpy(_primaryBuffer, _specialBuffer, getLenInBytes(), cudaMemcpyDeviceToHost);
if (res != 0) throw cuda_exception::build("DataBuffer::syncToPrimary cudaMemcpy failed", res);
readPrimary();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::syncToSpecial(const bool forceSync) {
// in this case there's nothing to do here
if (_primaryBuffer == nullptr) return;
if (isSpecialActual() && !forceSync) {
return;
}
allocateSpecial();
auto res = cudaMemcpy(_specialBuffer, _primaryBuffer, getLenInBytes(), cudaMemcpyHostToDevice);
if (res != 0) throw cuda_exception::build("DataBuffer::syncToSpecial cudaMemcpy failed", res);
readSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::deleteSpecial() {
if (_isOwnerSpecial && _specialBuffer != nullptr && getLenInBytes() != 0) {
auto p = reinterpret_cast<int8_t*>(_specialBuffer);
RELEASE_SPECIAL(p, _workspace);
_specialBuffer = nullptr;
_isOwnerSpecial = false;
// count out towards DataBuffer device, only if we're not in workspace
if (_workspace == nullptr) {
sd::memory::MemoryCounter::getInstance().countOut(_deviceId, getLenInBytes());
sd::memory::MemoryCounter::getInstance().countOut(sd::memory::MemoryType::DEVICE, getLenInBytes());
}
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setCountersToZero() {
_counter.store(0L);
_writePrimary.store(0L);
_writeSpecial.store(0L);
_readPrimary.store(0L);
_readSpecial.store(0L);
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyCounters(const DataBuffer& other) {
_counter.store(other._counter);
_writePrimary.store(other._readSpecial);
_writeSpecial.store(other._readPrimary);
_readPrimary.store(other._writeSpecial);
_readSpecial.store(other._writePrimary);
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyBufferFrom(const DataBuffer& other, size_t sizeToCopyinBytes, const sd::LongType offsetThis,
const sd::LongType offsetOther) { // copies only to special buffer
if (other._primaryBuffer == nullptr && other._specialBuffer == nullptr) return;
if (sizeToCopyinBytes == 0) sizeToCopyinBytes = other.getLenInBytes();
if (sizeToCopyinBytes == 0) return;
if (other.isPrimaryActual()) {
auto res = cudaMemcpy(
static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType),
static_cast<const int8_t*>(other._primaryBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType),
sizeToCopyinBytes, cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyHostToDevice failed!", res);
other.readPrimary();
} else {
auto res = cudaMemcpy(
static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType),
static_cast<const int8_t*>(other._specialBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType),
sizeToCopyinBytes, cudaMemcpyDeviceToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyDeviceToDevice failed!", res);
other.readSpecial();
}
writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyBufferFromHost(const void* hostBuffer, size_t sizeToCopyinBytes, const sd::LongType offsetThis,
const sd::LongType offsetHostBuffer) { // copies only to special buffer
if (hostBuffer == nullptr) return;
if (sizeToCopyinBytes == 0) sizeToCopyinBytes = getLenInBytes();
if (sizeToCopyinBytes == 0) return;
auto res =
cudaMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType),
static_cast<const int8_t*>(hostBuffer) + offsetHostBuffer * DataTypeUtils::sizeOfElement(_dataType),
sizeToCopyinBytes, cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFromHost: cudaMemcpy_cudaMemcpyHostToDevice failed!", res);
writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setSpecial(void* special, const bool isOwnerSpecial) {
deleteSpecial();
_specialBuffer = special;
_isOwnerSpecial = isOwnerSpecial;
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::allocateBuffers(const bool allocBoth) { // always allocate special buffer only (cuda case)
allocateSpecial();
if (allocBoth) allocatePrimary();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setToZeroBuffers(const bool both) {
cudaMemsetAsync(special(), 0, getLenInBytes(), *LaunchContext::defaultContext()->getCudaStream());
auto res = cudaStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
if (res != 0) throw cuda_exception::build("DataBuffer::setToZeroBuffers: streamSync failed!", res);
writeSpecial();
if (both) {
memset(primary(), 0, getLenInBytes());
readPrimary();
}
}
/////////////////////////
void DataBuffer::memcpy(const DataBuffer& dst, const DataBuffer& src) {
if (src._lenInBytes > dst._lenInBytes)
throw std::runtime_error("DataBuffer::memcpy: Source data buffer is larger than destination");
int res = 0;
if (src.isSpecialActual()) {
res = cudaMemcpyAsync(dst._specialBuffer, src._specialBuffer, src.getLenInBytes(), cudaMemcpyDeviceToDevice,
*LaunchContext::defaultContext()->getCudaStream());
} else if (src.isPrimaryActual()) {
res = cudaMemcpyAsync(dst._specialBuffer, src._primaryBuffer, src.getLenInBytes(), cudaMemcpyHostToDevice,
*LaunchContext::defaultContext()->getCudaStream());
}
if (res != 0) throw cuda_exception::build("DataBuffer::memcpy: cudaMemcpyAsync failed!", res);
res = cudaStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
if (res != 0) throw cuda_exception::build("DataBuffer::memcpy: streamSync failed!", res);
dst.writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::migrate() {
memory::Workspace* newWorkspace = nullptr;
void* newBuffer;
ALLOCATE_SPECIAL(newBuffer, newWorkspace, getLenInBytes(), int8_t);
auto res = cudaMemcpy(newBuffer, _specialBuffer, getLenInBytes(), cudaMemcpyDeviceToDevice);
if (res != 0) throw cuda_exception::build("DataBuffer::migrate: cudaMemcpyAsync failed!", res);
if (_isOwnerSpecial) {
// now we're releasing original buffer
RELEASE_SPECIAL(_specialBuffer, _workspace);
}
_isOwnerSpecial = true;
_specialBuffer = newBuffer;
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::writePrimary() const { _writePrimary = ++_counter; }
void DataBuffer::writeSpecial() const { _writeSpecial = ++_counter; }
void DataBuffer::readPrimary() const { _readPrimary = ++_counter; }
void DataBuffer::readSpecial() const { _readSpecial = ++_counter; }
bool DataBuffer::isPrimaryActual() const {
return (_writePrimary.load() > _writeSpecial.load() || _readPrimary.load() > _writeSpecial.load());
}
bool DataBuffer::isSpecialActual() const {
return (_writeSpecial.load() > _writePrimary.load() || _readSpecial.load() > _writePrimary.load());
}
} // namespace sd
|
4cd23a07dffbd27e1d7a67721078ea8c06423a60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <iostream>
#include <string>
#include <vector>
#include "NCV.hpp"
using namespace std;
//==============================================================================
//
// Error handling helpers
//
//==============================================================================
static void stdDebugOutput(const string &msg)
{
cout << msg;
}
static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
void ncvDebugOutput(const string &msg)
{
debugOutputHandler(msg);
}
void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func)
{
debugOutputHandler = func;
}
//==============================================================================
//
// Memory wrappers and helpers
//
//==============================================================================
Ncv32u alignUp(Ncv32u what, Ncv32u alignment)
{
Ncv32u alignMask = alignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u res = (what + alignMask) & inverseAlignMask;
return res;
}
void NCVMemPtr::clear()
{
ptr = NULL;
memtype = NCVMemoryTypeNone;
}
void NCVMemSegment::clear()
{
begin.clear();
size = 0;
}
NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, size_t sz, hipStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
memcpy(dst, src, sz);
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType,
const void *src, Ncv32u srcPitch, NCVMemoryType srcType,
Ncv32u widthbytes, Ncv32u height, hipStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
for (Ncv32u i=0; i<height; i++)
{
memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
//===================================================================
//
// NCVMemStackAllocator class members implementation
//
//===================================================================
NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment_)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
begin(NULL),
end(NULL),
_memType(NCVMemoryTypeNone),
_alignment(alignment_),
bReusesMemory(false)
{
NcvBool bProperAlignment = (alignment_ & (alignment_ - 1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2");
}
NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, size_t capacity, Ncv32u alignment_, void *reusePtr)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
_memType(memT),
_alignment(alignment_)
{
NcvBool bProperAlignment = (alignment_ & (alignment_ - 1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2");
ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type");
allocBegin = NULL;
if (reusePtr == NULL && capacity != 0)
{
bReusesMemory = false;
switch (memT)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPageable:
allocBegin = (Ncv8u *)malloc(capacity);
break;
default:;
}
}
else
{
bReusesMemory = true;
allocBegin = (Ncv8u *)reusePtr;
}
if (capacity == 0)
{
allocBegin = (Ncv8u *)(0x1);
}
if (!isCounting())
{
begin = allocBegin;
end = begin + capacity;
}
}
NCVMemStackAllocator::~NCVMemStackAllocator()
{
if (allocBegin != NULL)
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction");
if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1)))
{
switch (_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipFree(allocBegin), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostFree(allocBegin), );
break;
case NCVMemoryTypeHostPageable:
free(allocBegin);
break;
default:;
}
}
allocBegin = NULL;
}
}
NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
size = alignUp(static_cast<Ncv32u>(size), this->_alignment);
this->currentSize += size;
this->_maxSize = ::max(this->_maxSize, this->currentSize);
if (!isCounting())
{
size_t availSize = end - begin;
ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY);
}
seg.begin.ptr = begin;
seg.begin.memtype = this->_memType;
seg.size = size;
begin += size;
return NCV_SUCCESS;
}
NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER);
currentSize -= seg.size;
begin -= seg.size;
seg.clear();
ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC);
return NCV_SUCCESS;
}
NcvBool NCVMemStackAllocator::isInitialized(void) const
{
return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL;
}
NcvBool NCVMemStackAllocator::isCounting(void) const
{
return this->_memType == NCVMemoryTypeNone;
}
NCVMemoryType NCVMemStackAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemStackAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemStackAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// NCVMemNativeAllocator class members implementation
//
//===================================================================
NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment_)
:
currentSize(0),
_maxSize(0),
_memType(memT),
_alignment(alignment_)
{
ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", );
}
NCVMemNativeAllocator::~NCVMemNativeAllocator()
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak");
}
NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
seg.begin.ptr = (Ncv8u *)malloc(size);
break;
default:;
}
this->currentSize += alignUp(static_cast<Ncv32u>(size), this->_alignment);
this->_maxSize = ::max(this->_maxSize, this->currentSize);
seg.begin.memtype = this->_memType;
seg.size = size;
return NCV_SUCCESS;
}
NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(currentSize >= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
currentSize -= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
free(seg.begin.ptr);
break;
default:;
}
seg.clear();
return NCV_SUCCESS;
}
NcvBool NCVMemNativeAllocator::isInitialized(void) const
{
return (this->_alignment != 0);
}
NcvBool NCVMemNativeAllocator::isCounting(void) const
{
return false;
}
NCVMemoryType NCVMemNativeAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemNativeAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemNativeAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// Time and timer routines
//
//===================================================================
typedef struct _NcvTimeMoment NcvTimeMoment;
#if defined(_WIN32) || defined(_WIN64)
#include <Windows.h>
typedef struct _NcvTimeMoment
{
LONGLONG moment, freq;
} NcvTimeMoment;
static void _ncvQueryMoment(NcvTimeMoment *t)
{
QueryPerformanceFrequency((LARGE_INTEGER *)&(t->freq));
QueryPerformanceCounter((LARGE_INTEGER *)&(t->moment));
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->moment / t->freq;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
#elif defined(__GNUC__)
#include <sys/time.h>
typedef struct _NcvTimeMoment
{
struct timeval tv;
struct timezone tz;
} NcvTimeMoment;
void _ncvQueryMoment(NcvTimeMoment *t)
{
gettimeofday(& t->tv, & t->tz);
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->tv.tv_sec + (double)t->tv.tv_usec;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return (((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000000 + (double)t2->tv.tv_usec - (double)t1->tv.tv_usec);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return ((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000;
}
#endif //#if defined(_WIN32) || defined(_WIN64)
struct _NcvTimer
{
NcvTimeMoment t1, t2;
};
NcvTimer ncvStartTimer(void)
{
struct _NcvTimer *t;
t = (struct _NcvTimer *)malloc(sizeof(struct _NcvTimer));
_ncvQueryMoment(&t->t1);
return t;
}
double ncvEndQueryTimerUs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMicroseconds(&t->t1, &t->t2);
free(t);
return res;
}
double ncvEndQueryTimerMs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMilliseconds(&t->t1, &t->t2);
free(t);
return res;
}
//===================================================================
//
// Operations with rectangles
//
//===================================================================
//from OpenCV
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights);
NCVStatus ncvGroupRectangles_host(NCVVector<NcvRect32u> &hypotheses,
Ncv32u &numHypotheses,
Ncv32u minNeighbors,
Ncv32f intersectEps,
NCVVector<Ncv32u> *hypothesesWeights)
{
ncvAssertReturn(hypotheses.memType() == NCVMemoryTypeHostPageable ||
hypotheses.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
if (hypothesesWeights != NULL)
{
ncvAssertReturn(hypothesesWeights->memType() == NCVMemoryTypeHostPageable ||
hypothesesWeights->memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
}
if (numHypotheses == 0)
{
return NCV_SUCCESS;
}
std::vector<NcvRect32u> rects(numHypotheses);
memcpy(&rects[0], hypotheses.ptr(), numHypotheses * sizeof(NcvRect32u));
std::vector<Ncv32u> weights;
if (hypothesesWeights != NULL)
{
groupRectangles(rects, minNeighbors, intersectEps, &weights);
}
else
{
groupRectangles(rects, minNeighbors, intersectEps, NULL);
}
numHypotheses = (Ncv32u)rects.size();
if (numHypotheses > 0)
{
memcpy(hypotheses.ptr(), &rects[0], numHypotheses * sizeof(NcvRect32u));
}
if (hypothesesWeights != NULL)
{
memcpy(hypothesesWeights->ptr(), &weights[0], numHypotheses * sizeof(Ncv32u));
}
return NCV_SUCCESS;
}
template <class T>
static NCVStatus drawRectsWrapperHost(T *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
T color)
{
ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects != 0, NCV_SUCCESS);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
for (Ncv32u i=0; i<numRects; i++)
{
NcvRect32u rect = h_rects[i];
if (rect.x < dstWidth)
{
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
{
h_dst[each*dstStride+rect.x] = color;
}
}
if (rect.x+rect.width-1 < dstWidth)
{
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
{
h_dst[each*dstStride+rect.x+rect.width-1] = color;
}
}
if (rect.y < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[rect.y*dstStride+j] = color;
}
}
if (rect.y + rect.height - 1 < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[(rect.y+rect.height-1)*dstStride+j] = color;
}
}
}
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv8u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv32u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
const Ncv32u NUMTHREADS_DRAWRECTS = 32;
const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5;
template <class T>
__global__ void drawRects(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
if (blockId > numRects * 4)
{
return;
}
NcvRect32u curRect = d_rects[blockId >> 2];
NcvBool bVertical = blockId & 0x1;
NcvBool bTopLeft = blockId & 0x2;
Ncv32u pt0x, pt0y;
if (bVertical)
{
Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1;
pt0y = curRect.y;
if (pt0x < dstWidth)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptY < pt0y + curRect.height && ptY < dstHeight)
{
d_dst[ptY * dstStride + pt0x] = color;
}
}
}
}
else
{
Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = curRect.x;
pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1;
if (pt0y < dstHeight)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptX < pt0x + curRect.width && ptX < dstWidth)
{
d_dst[pt0y * dstStride + ptX] = color;
}
}
}
}
}
template <class T>
static NCVStatus drawRectsWrapperDevice(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color,
hipStream_t cuStream)
{
(void)cuStream;
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
if (numRects == 0)
{
return NCV_SUCCESS;
}
dim3 grid(numRects * 4);
dim3 block(NUMTHREADS_DRAWRECTS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( drawRects<T>), dim3(grid), dim3(block), 0, 0, d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv8u color,
hipStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv32u color,
hipStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
#endif /* CUDA_DISABLER */ | 4cd23a07dffbd27e1d7a67721078ea8c06423a60.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <iostream>
#include <string>
#include <vector>
#include "NCV.hpp"
using namespace std;
//==============================================================================
//
// Error handling helpers
//
//==============================================================================
static void stdDebugOutput(const string &msg)
{
cout << msg;
}
static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
void ncvDebugOutput(const string &msg)
{
debugOutputHandler(msg);
}
void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func)
{
debugOutputHandler = func;
}
//==============================================================================
//
// Memory wrappers and helpers
//
//==============================================================================
Ncv32u alignUp(Ncv32u what, Ncv32u alignment)
{
Ncv32u alignMask = alignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u res = (what + alignMask) & inverseAlignMask;
return res;
}
void NCVMemPtr::clear()
{
ptr = NULL;
memtype = NCVMemoryTypeNone;
}
void NCVMemSegment::clear()
{
begin.clear();
size = 0;
}
NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, size_t sz, cudaStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
memcpy(dst, src, sz);
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType,
const void *src, Ncv32u srcPitch, NCVMemoryType srcType,
Ncv32u widthbytes, Ncv32u height, cudaStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
for (Ncv32u i=0; i<height; i++)
{
memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
//===================================================================
//
// NCVMemStackAllocator class members implementation
//
//===================================================================
NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment_)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
begin(NULL),
end(NULL),
_memType(NCVMemoryTypeNone),
_alignment(alignment_),
bReusesMemory(false)
{
NcvBool bProperAlignment = (alignment_ & (alignment_ - 1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2");
}
NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, size_t capacity, Ncv32u alignment_, void *reusePtr)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
_memType(memT),
_alignment(alignment_)
{
NcvBool bProperAlignment = (alignment_ & (alignment_ - 1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2");
ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type");
allocBegin = NULL;
if (reusePtr == NULL && capacity != 0)
{
bReusesMemory = false;
switch (memT)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaMallocHost(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPageable:
allocBegin = (Ncv8u *)malloc(capacity);
break;
default:;
}
}
else
{
bReusesMemory = true;
allocBegin = (Ncv8u *)reusePtr;
}
if (capacity == 0)
{
allocBegin = (Ncv8u *)(0x1);
}
if (!isCounting())
{
begin = allocBegin;
end = begin + capacity;
}
}
NCVMemStackAllocator::~NCVMemStackAllocator()
{
if (allocBegin != NULL)
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction");
if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1)))
{
switch (_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaFree(allocBegin), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaFreeHost(allocBegin), );
break;
case NCVMemoryTypeHostPageable:
free(allocBegin);
break;
default:;
}
}
allocBegin = NULL;
}
}
NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
size = alignUp(static_cast<Ncv32u>(size), this->_alignment);
this->currentSize += size;
this->_maxSize = std::max(this->_maxSize, this->currentSize);
if (!isCounting())
{
size_t availSize = end - begin;
ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY);
}
seg.begin.ptr = begin;
seg.begin.memtype = this->_memType;
seg.size = size;
begin += size;
return NCV_SUCCESS;
}
NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER);
currentSize -= seg.size;
begin -= seg.size;
seg.clear();
ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC);
return NCV_SUCCESS;
}
NcvBool NCVMemStackAllocator::isInitialized(void) const
{
return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL;
}
NcvBool NCVMemStackAllocator::isCounting(void) const
{
return this->_memType == NCVMemoryTypeNone;
}
NCVMemoryType NCVMemStackAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemStackAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemStackAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// NCVMemNativeAllocator class members implementation
//
//===================================================================
NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment_)
:
currentSize(0),
_maxSize(0),
_memType(memT),
_alignment(alignment_)
{
ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", );
}
NCVMemNativeAllocator::~NCVMemNativeAllocator()
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak");
}
NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaMallocHost(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
seg.begin.ptr = (Ncv8u *)malloc(size);
break;
default:;
}
this->currentSize += alignUp(static_cast<Ncv32u>(size), this->_alignment);
this->_maxSize = std::max(this->_maxSize, this->currentSize);
seg.begin.memtype = this->_memType;
seg.size = size;
return NCV_SUCCESS;
}
NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(currentSize >= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
currentSize -= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaFreeHost(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
free(seg.begin.ptr);
break;
default:;
}
seg.clear();
return NCV_SUCCESS;
}
NcvBool NCVMemNativeAllocator::isInitialized(void) const
{
return (this->_alignment != 0);
}
NcvBool NCVMemNativeAllocator::isCounting(void) const
{
return false;
}
NCVMemoryType NCVMemNativeAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemNativeAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemNativeAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// Time and timer routines
//
//===================================================================
typedef struct _NcvTimeMoment NcvTimeMoment;
#if defined(_WIN32) || defined(_WIN64)
#include <Windows.h>
typedef struct _NcvTimeMoment
{
LONGLONG moment, freq;
} NcvTimeMoment;
static void _ncvQueryMoment(NcvTimeMoment *t)
{
QueryPerformanceFrequency((LARGE_INTEGER *)&(t->freq));
QueryPerformanceCounter((LARGE_INTEGER *)&(t->moment));
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->moment / t->freq;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
#elif defined(__GNUC__)
#include <sys/time.h>
typedef struct _NcvTimeMoment
{
struct timeval tv;
struct timezone tz;
} NcvTimeMoment;
void _ncvQueryMoment(NcvTimeMoment *t)
{
gettimeofday(& t->tv, & t->tz);
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->tv.tv_sec + (double)t->tv.tv_usec;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return (((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000000 + (double)t2->tv.tv_usec - (double)t1->tv.tv_usec);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return ((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000;
}
#endif //#if defined(_WIN32) || defined(_WIN64)
struct _NcvTimer
{
NcvTimeMoment t1, t2;
};
NcvTimer ncvStartTimer(void)
{
struct _NcvTimer *t;
t = (struct _NcvTimer *)malloc(sizeof(struct _NcvTimer));
_ncvQueryMoment(&t->t1);
return t;
}
double ncvEndQueryTimerUs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMicroseconds(&t->t1, &t->t2);
free(t);
return res;
}
double ncvEndQueryTimerMs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMilliseconds(&t->t1, &t->t2);
free(t);
return res;
}
//===================================================================
//
// Operations with rectangles
//
//===================================================================
//from OpenCV
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights);
NCVStatus ncvGroupRectangles_host(NCVVector<NcvRect32u> &hypotheses,
Ncv32u &numHypotheses,
Ncv32u minNeighbors,
Ncv32f intersectEps,
NCVVector<Ncv32u> *hypothesesWeights)
{
ncvAssertReturn(hypotheses.memType() == NCVMemoryTypeHostPageable ||
hypotheses.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
if (hypothesesWeights != NULL)
{
ncvAssertReturn(hypothesesWeights->memType() == NCVMemoryTypeHostPageable ||
hypothesesWeights->memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
}
if (numHypotheses == 0)
{
return NCV_SUCCESS;
}
std::vector<NcvRect32u> rects(numHypotheses);
memcpy(&rects[0], hypotheses.ptr(), numHypotheses * sizeof(NcvRect32u));
std::vector<Ncv32u> weights;
if (hypothesesWeights != NULL)
{
groupRectangles(rects, minNeighbors, intersectEps, &weights);
}
else
{
groupRectangles(rects, minNeighbors, intersectEps, NULL);
}
numHypotheses = (Ncv32u)rects.size();
if (numHypotheses > 0)
{
memcpy(hypotheses.ptr(), &rects[0], numHypotheses * sizeof(NcvRect32u));
}
if (hypothesesWeights != NULL)
{
memcpy(hypothesesWeights->ptr(), &weights[0], numHypotheses * sizeof(Ncv32u));
}
return NCV_SUCCESS;
}
template <class T>
static NCVStatus drawRectsWrapperHost(T *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
T color)
{
ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects != 0, NCV_SUCCESS);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
for (Ncv32u i=0; i<numRects; i++)
{
NcvRect32u rect = h_rects[i];
if (rect.x < dstWidth)
{
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
{
h_dst[each*dstStride+rect.x] = color;
}
}
if (rect.x+rect.width-1 < dstWidth)
{
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
{
h_dst[each*dstStride+rect.x+rect.width-1] = color;
}
}
if (rect.y < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[rect.y*dstStride+j] = color;
}
}
if (rect.y + rect.height - 1 < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[(rect.y+rect.height-1)*dstStride+j] = color;
}
}
}
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv8u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv32u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
const Ncv32u NUMTHREADS_DRAWRECTS = 32;
const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5;
template <class T>
__global__ void drawRects(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
if (blockId > numRects * 4)
{
return;
}
NcvRect32u curRect = d_rects[blockId >> 2];
NcvBool bVertical = blockId & 0x1;
NcvBool bTopLeft = blockId & 0x2;
Ncv32u pt0x, pt0y;
if (bVertical)
{
Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1;
pt0y = curRect.y;
if (pt0x < dstWidth)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptY < pt0y + curRect.height && ptY < dstHeight)
{
d_dst[ptY * dstStride + pt0x] = color;
}
}
}
}
else
{
Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = curRect.x;
pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1;
if (pt0y < dstHeight)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptX < pt0x + curRect.width && ptX < dstWidth)
{
d_dst[pt0y * dstStride + ptX] = color;
}
}
}
}
}
template <class T>
static NCVStatus drawRectsWrapperDevice(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color,
cudaStream_t cuStream)
{
(void)cuStream;
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
if (numRects == 0)
{
return NCV_SUCCESS;
}
dim3 grid(numRects * 4);
dim3 block(NUMTHREADS_DRAWRECTS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
drawRects<T><<<grid, block>>>(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv8u color,
cudaStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv32u color,
cudaStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
#endif /* CUDA_DISABLER */ |
e000a218a019b7770ccfb6791901f9268071c536.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <iostream>
#include "../../shared/include/utility.h"
int main()
{
std::cout << "==== Sample 04 - Legacy Thread Scheduling ====\n" << std::endl;
/*
This code will launch a particular test kernel.
It will launch 4 threads in total.
The program code is structured such that each
thread enters one of 4 possible branches and then
atomically increments a GPU variable N times:
.---- N operations by Thread 0
----X
/ '---- N operations by Thread 1
----X
\ .---- N operations by Thread 2
----X
'---- N operations by Thread 3
Each thread will document consecutive ranges of
values it observed for the incremented variable.
Basically, this will give us an idea how threads
take turns running in this branching scenario.
Expected output: 4 consecutive ranges, one for
each thread, taking 128 consecutive turns until
they have completed their N steps.
Disclaimer: behavior depends somewhat on compiler's
effort to optimize code. Results may vary.
*/
constexpr int N = 128;
// Using a utility function for demonstration
samplesutil::run2NestedBranchesForNSteps(N);
return 0;
}
| e000a218a019b7770ccfb6791901f9268071c536.cu | #include <cuda_runtime_api.h>
#include <iostream>
#include "../../shared/include/utility.h"
int main()
{
std::cout << "==== Sample 04 - Legacy Thread Scheduling ====\n" << std::endl;
/*
This code will launch a particular test kernel.
It will launch 4 threads in total.
The program code is structured such that each
thread enters one of 4 possible branches and then
atomically increments a GPU variable N times:
.---- N operations by Thread 0
----X
/ '---- N operations by Thread 1
----X
\ .---- N operations by Thread 2
----X
'---- N operations by Thread 3
Each thread will document consecutive ranges of
values it observed for the incremented variable.
Basically, this will give us an idea how threads
take turns running in this branching scenario.
Expected output: 4 consecutive ranges, one for
each thread, taking 128 consecutive turns until
they have completed their N steps.
Disclaimer: behavior depends somewhat on compiler's
effort to optimize code. Results may vary.
*/
constexpr int N = 128;
// Using a utility function for demonstration
samplesutil::run2NestedBranchesForNSteps(N);
return 0;
}
|
a7306626f2bbe2b84be95f7e90d51468b564d0a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
#define N 100
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
HANDLE_ERROR( hipMalloc( (void**)&dev_a, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_c, N * sizeof(int) ) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( hipMemcpy( dev_a, a, N * sizeof(int),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b, b, N * sizeof(int),
hipMemcpyHostToDevice ) );
hipLaunchKernelGGL(( add), dim3(10),dim3(10), 0, 0, dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( hipMemcpy( c, dev_c, N * sizeof(int),
hipMemcpyDeviceToHost ) );
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
HANDLE_ERROR( hipFree( dev_a ) );
HANDLE_ERROR( hipFree( dev_b ) );
HANDLE_ERROR( hipFree( dev_c ) );
return 0;
}
| a7306626f2bbe2b84be95f7e90d51468b564d0a8.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
#define N 100
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(int) ) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( cudaMemcpy( dev_a, a, N * sizeof(int),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b, b, N * sizeof(int),
cudaMemcpyHostToDevice ) );
add<<<10,10>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( cudaMemcpy( c, dev_c, N * sizeof(int),
cudaMemcpyDeviceToHost ) );
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
HANDLE_ERROR( cudaFree( dev_a ) );
HANDLE_ERROR( cudaFree( dev_b ) );
HANDLE_ERROR( cudaFree( dev_c ) );
return 0;
}
|
b4d4dd58764306ecd14793f5754d4a9e7bf26312.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
#include "boost/math/common_factor.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel_constellation(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int* dilation_h, const int* dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += c_im * height * width;
for (int i = 0; i < kernel_h; ++i) {
const int dilation_h_ = dilation_h[i];
const int h_im = h_offset + dilation_h_;
for (int j = 0; j < kernel_w; ++j) {
const int dilation_w_ = dilation_w[j];
const int w_im = w_offset + dilation_w_;
*data_col_ptr = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[ h_im * width + w_im] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu_constellation(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_h, const int* dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_extent_h ) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_extent_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel_constellation<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu_constellation<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_h, const int* dilation_w, float* data_col);
template void im2col_gpu_constellation<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_h, const int* dilation_w, double* data_col);
template <typename Dtype>
__global__ void im2col_gpu_constellation_selected_kernel(const int n, const int n_elements,
const int n_processed,
const Dtype* data_im,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int* dilation_h, const int* dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int ch_col_buffer = index / n_elements;
int w_col_buffer = index % n_elements;
Dtype* data_col_ptr = data_col;
data_col_ptr += ch_col_buffer * kernel_h * kernel_w * n_elements + w_col_buffer;
int temp_index = n_processed + w_col_buffer + ch_col_buffer * height_col * width_col;
const int h_index = temp_index / width_col;
const int h_col = h_index % height_col;
const int w_col = temp_index % width_col;
const int c_im = h_index / height_col;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
const Dtype* data_im_ptr = data_im;
data_im_ptr += c_im * height * width ;
for (int i = 0; i < kernel_h; ++i) {
const int dilation_h_ = dilation_h[i];
const int h_im = h_offset + dilation_h_;
for (int j = 0; j < kernel_w; ++j) {
const int dilation_w_ = dilation_w[j];
const int w_im = w_offset + dilation_w_;
*data_col_ptr = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[ h_im * width + w_im] : 0;
data_col_ptr += n_elements;
}
}
}
}
template <typename Dtype>
void im2col_gpu_constellation_selected(const Dtype* data_im, const int channels,
const int n_elements, const int n_processed,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_h, const int* dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_extent_h ) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_extent_w) / stride_w + 1;
int num_kernels = channels * n_elements;
// return;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_constellation_selected_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, n_elements, n_processed,
data_im, height, width,
kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// EXPLICIT
template void im2col_gpu_constellation_selected<float>(const float* data_im,
const int channels,const int n_elements, const int n_processed,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_h, const int* dilation_w,
float* data_col);
template void im2col_gpu_constellation_selected<double>(const double* data_im,
const int channels,const int n_elements, const int n_processed,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_h, const int* dilation_w,
double* data_col);
///////COL2IM CONSTELLATION
template <typename Dtype>
__global__ void col2im_gpu_constellation_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_shift_h,const int* dilation_shift_w,
const int* dilation_shift_index_h,const int* dilation_shift_index_w,
const int* dilation_inc_h,const int* dilation_inc_w,
const int* dilation_start_index_h, const int* dilation_start_index_w,
const int height_col, const int width_col,
Dtype* data_im, const int accumulate) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
// compute the start and end of the output
int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
// for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
// for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
const int h_k_start = (h_im - h_col_start * stride_h);
const int w_k_start = (w_im - w_col_start * stride_w);
const int start_index_h = dilation_start_index_h[h_k_start];
const int start_index_w = dilation_start_index_w[w_k_start];
const int h_k_desired = dilation_inc_h[start_index_h];
const int w_k_desired = dilation_inc_w[start_index_w];
const int start_shift_h = abs(h_k_desired - h_k_start) / stride_h;
const int start_shift_w = abs(w_k_desired - w_k_start) / stride_w;
h_col_start += start_shift_h;
w_col_start += start_shift_w;
int shift_index_h = start_index_h;
for (int h_col = h_col_start; h_col < h_col_end;
h_col += dilation_shift_h[shift_index_h], shift_index_h = dilation_shift_index_h[shift_index_h] )
{
int shift_index_w = start_index_w;
for (int w_col = w_col_start; w_col < w_col_end;
w_col += dilation_shift_w[shift_index_w], shift_index_w = dilation_shift_index_w[shift_index_w] )
{
const int h_k = shift_index_h;
const int w_k = shift_index_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
data_im[index] = val + data_im[index] * accumulate;
}
}
template <typename Dtype>
void col2im_gpu_constellation(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_shift_h,const int* dilation_shift_w,
const int* dilation_shift_index_h,const int* dilation_shift_index_w,
const int* dilation_inc_h,const int* dilation_inc_w,
const int* dilation_start_index_h, const int* dilation_start_index_w,
Dtype* data_im, const int accumulate) {
int height_col = (height + 2 * pad_h - kernel_extent_h ) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_extent_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
// if(accumulate)
// std::cout << "col 22 im accumulate = " << accumulate << "\n";
// in the input space, diff will be back propagated by dilation_shift_index_h or w
hipLaunchKernelGGL(( col2im_gpu_constellation_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w,
stride_h, stride_w,
kernel_extent_h, kernel_extent_w,
dilation_shift_h, dilation_shift_w,
dilation_shift_index_h, dilation_shift_index_w,
dilation_inc_h, dilation_inc_w,
dilation_start_index_h, dilation_start_index_w,
height_col, width_col, data_im, accumulate);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu_constellation<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_shift_h,const int* dilation_shift_w,
const int* dilation_shift_index_h,const int* dilation_shift_index_w,
const int* dilation_inc_h,const int* dilation_inc_w,
const int* dilation_start_index_h, const int* dilation_start_index_w,
float* data_im, const int accumulate);
template void col2im_gpu_constellation<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_shift_h,const int* dilation_shift_w,
const int* dilation_shift_index_h,const int* dilation_shift_index_w,
const int* dilation_inc_h,const int* dilation_inc_w,
const int* dilation_start_index_h, const int* dilation_start_index_w,
double* data_im, const int accumulate);
///------------************* CONSTELLATION CODE ENDS
template <typename Dtype>
__global__ void im2col_gpu_selected_kernel(const int n, const int n_elements,
const int n_processed,
const Dtype* data_im,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int ch_col_buffer = index / n_elements;
int w_col_buffer = index % n_elements;
Dtype* data_col_ptr = data_col;
data_col_ptr += ch_col_buffer * kernel_h * kernel_w * n_elements + w_col_buffer;
int temp_index = n_processed + w_col_buffer + ch_col_buffer * height_col * width_col;
const int h_index = temp_index / width_col;
const int h_col = h_index % height_col;
const int w_col = temp_index % width_col;
const int c_im = h_index / height_col;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] :0;
data_col_ptr += n_elements;
}
}
}
}
template <typename Dtype>
void im2col_gpu_selected(const Dtype* data_im, const int channels,
const int n_elements, const int n_processed,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * n_elements;
// return;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_selected_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, n_elements, n_processed,
data_im, height, width,
kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// EXPLICIT
template void im2col_gpu_selected<float>(const float* data_im,
const int channels,const int n_elements, const int n_processed,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
float* data_col);
template void im2col_gpu_selected<double>(const double* data_im,
const int channels,const int n_elements, const int n_processed,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_selected_kernel(const int n, const Dtype* data_col,
const int height, const int width,
const int n_elements,
const int index_min, const int index_max,
const int crop_min_x, const int crop_min_y,
const int crop_width, const int crop_height,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int crop_x = index % crop_width;
const int crop_y = ( index / crop_width) % crop_height;
// const int w_im = crop_x + crop_min_x + pad_w;
// const int h_im = crop_y + crop_min_y + pad_h;
const int c_im = index / (crop_width * crop_height);
const int temp_index = (c_im * height + crop_y + crop_min_y) * width + crop_x + crop_min_x;
const int w_im = temp_index % width + pad_w;
const int h_im = (temp_index / width) % height + pad_h;
// const int c_im = temp_index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int col_index = h_col * width_col + w_col;
if(col_index >= index_min && col_index <= index_max)
{
col_index -= index_min;
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
// int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
// height_col + h_col) * width_col + w_col;
// int data_col_index = c_im * kernel_h * kernel_w * n_elements +
// h_k * kernel_w * n_elements +
// w_k * n_elements +
// col_index;
int data_col_index = ((c_im * kernel_h + h_k)* kernel_w + w_k) * n_elements + col_index;
val += data_col[data_col_index];
}
}
}
}
data_im[temp_index] += val;
}
}
template <typename Dtype>
void col2im_gpu_selected(const Dtype* data_col, const int channels, const int n_elements,
const int index_min, const int index_max,
const int crop_min_x, const int crop_min_y,
const int crop_width, const int crop_height,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * (crop_width * crop_height);
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( col2im_gpu_selected_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, n_elements,
index_min, index_max,
crop_min_x, crop_min_y, crop_width, crop_height,
kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu_selected<float>(const float* data_col, const int channels, const int n_elements,
const int index_min, const int index_max,
const int crop_min_x, const int crop_min_y,
const int crop_width, const int crop_height,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu_selected<double>(const double* data_col, const int channels, const int n_elements,
const int index_min, const int index_max,
const int crop_min_x, const int crop_min_y,
const int crop_width, const int crop_height,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im, const int accumulate,
int lcm_by_stride_h, int lcm_by_stride_w) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
int h_k_start = (h_im - h_col_start * stride_h);
int w_k_start = (w_im - w_col_start * stride_w);
// obtain the valid starting index in col to employ lcm of column and stride
// otherwise the shift will only encounter invalid locations
// if (h_k_start % dilation_h)
{
while(h_k_start % dilation_h)
{
h_col_start++;
h_k_start = (h_im - h_col_start * stride_h);
}
}
// if (w_k_start % dilation_w)
{
while(w_k_start % dilation_w)
{
w_col_start++;
w_k_start = (w_im - w_col_start * stride_w);
}
}
// for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
// for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
for (int h_col = h_col_start; h_col < h_col_end; h_col += lcm_by_stride_h) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += lcm_by_stride_w) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
// if (h_k % dilation_h == 0 && w_k % dilation_w == 0)
{
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val + data_im[index] * accumulate;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im, const int accumulate) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
// if(accumulate)
// std::cout << "col 22 im accumulate = " << accumulate << "\n";
// in the input space diff will be back propagated at lcm of dilation and stride
// but in the output space, locations will be divided by stride
const int lcm_dilation_stride_h = boost::math::lcm(dilation_h, stride_h);
const int lcm_dilation_stride_w = boost::math::lcm(dilation_w, stride_w);
const int lcm_by_stride_h = lcm_dilation_stride_h / stride_h;
const int lcm_by_stride_w = lcm_dilation_stride_w / stride_w;
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im, accumulate,
lcm_by_stride_h, lcm_by_stride_w);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im, const int accumulate);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im, const int accumulate);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
| b4d4dd58764306ecd14793f5754d4a9e7bf26312.cu | #include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
#include "boost/math/common_factor.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel_constellation(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int* dilation_h, const int* dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += c_im * height * width;
for (int i = 0; i < kernel_h; ++i) {
const int dilation_h_ = dilation_h[i];
const int h_im = h_offset + dilation_h_;
for (int j = 0; j < kernel_w; ++j) {
const int dilation_w_ = dilation_w[j];
const int w_im = w_offset + dilation_w_;
*data_col_ptr = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[ h_im * width + w_im] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu_constellation(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_h, const int* dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_extent_h ) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_extent_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel_constellation<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu_constellation<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_h, const int* dilation_w, float* data_col);
template void im2col_gpu_constellation<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_h, const int* dilation_w, double* data_col);
template <typename Dtype>
__global__ void im2col_gpu_constellation_selected_kernel(const int n, const int n_elements,
const int n_processed,
const Dtype* data_im,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int* dilation_h, const int* dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int ch_col_buffer = index / n_elements;
int w_col_buffer = index % n_elements;
Dtype* data_col_ptr = data_col;
data_col_ptr += ch_col_buffer * kernel_h * kernel_w * n_elements + w_col_buffer;
int temp_index = n_processed + w_col_buffer + ch_col_buffer * height_col * width_col;
const int h_index = temp_index / width_col;
const int h_col = h_index % height_col;
const int w_col = temp_index % width_col;
const int c_im = h_index / height_col;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
const Dtype* data_im_ptr = data_im;
data_im_ptr += c_im * height * width ;
for (int i = 0; i < kernel_h; ++i) {
const int dilation_h_ = dilation_h[i];
const int h_im = h_offset + dilation_h_;
for (int j = 0; j < kernel_w; ++j) {
const int dilation_w_ = dilation_w[j];
const int w_im = w_offset + dilation_w_;
*data_col_ptr = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[ h_im * width + w_im] : 0;
data_col_ptr += n_elements;
}
}
}
}
template <typename Dtype>
void im2col_gpu_constellation_selected(const Dtype* data_im, const int channels,
const int n_elements, const int n_processed,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_h, const int* dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_extent_h ) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_extent_w) / stride_w + 1;
int num_kernels = channels * n_elements;
// return;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_constellation_selected_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, n_elements, n_processed,
data_im, height, width,
kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// EXPLICIT
template void im2col_gpu_constellation_selected<float>(const float* data_im,
const int channels,const int n_elements, const int n_processed,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_h, const int* dilation_w,
float* data_col);
template void im2col_gpu_constellation_selected<double>(const double* data_im,
const int channels,const int n_elements, const int n_processed,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_h, const int* dilation_w,
double* data_col);
///////COL2IM CONSTELLATION
template <typename Dtype>
__global__ void col2im_gpu_constellation_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_shift_h,const int* dilation_shift_w,
const int* dilation_shift_index_h,const int* dilation_shift_index_w,
const int* dilation_inc_h,const int* dilation_inc_w,
const int* dilation_start_index_h, const int* dilation_start_index_w,
const int height_col, const int width_col,
Dtype* data_im, const int accumulate) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
// compute the start and end of the output
int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
// for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
// for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
const int h_k_start = (h_im - h_col_start * stride_h);
const int w_k_start = (w_im - w_col_start * stride_w);
const int start_index_h = dilation_start_index_h[h_k_start];
const int start_index_w = dilation_start_index_w[w_k_start];
const int h_k_desired = dilation_inc_h[start_index_h];
const int w_k_desired = dilation_inc_w[start_index_w];
const int start_shift_h = abs(h_k_desired - h_k_start) / stride_h;
const int start_shift_w = abs(w_k_desired - w_k_start) / stride_w;
h_col_start += start_shift_h;
w_col_start += start_shift_w;
int shift_index_h = start_index_h;
for (int h_col = h_col_start; h_col < h_col_end;
h_col += dilation_shift_h[shift_index_h], shift_index_h = dilation_shift_index_h[shift_index_h] )
{
int shift_index_w = start_index_w;
for (int w_col = w_col_start; w_col < w_col_end;
w_col += dilation_shift_w[shift_index_w], shift_index_w = dilation_shift_index_w[shift_index_w] )
{
const int h_k = shift_index_h;
const int w_k = shift_index_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
data_im[index] = val + data_im[index] * accumulate;
}
}
template <typename Dtype>
void col2im_gpu_constellation(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_shift_h,const int* dilation_shift_w,
const int* dilation_shift_index_h,const int* dilation_shift_index_w,
const int* dilation_inc_h,const int* dilation_inc_w,
const int* dilation_start_index_h, const int* dilation_start_index_w,
Dtype* data_im, const int accumulate) {
int height_col = (height + 2 * pad_h - kernel_extent_h ) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_extent_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
// if(accumulate)
// std::cout << "col 22 im accumulate = " << accumulate << "\n";
// in the input space, diff will be back propagated by dilation_shift_index_h or w
col2im_gpu_constellation_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w,
stride_h, stride_w,
kernel_extent_h, kernel_extent_w,
dilation_shift_h, dilation_shift_w,
dilation_shift_index_h, dilation_shift_index_w,
dilation_inc_h, dilation_inc_w,
dilation_start_index_h, dilation_start_index_w,
height_col, width_col, data_im, accumulate);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu_constellation<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_shift_h,const int* dilation_shift_w,
const int* dilation_shift_index_h,const int* dilation_shift_index_w,
const int* dilation_inc_h,const int* dilation_inc_w,
const int* dilation_start_index_h, const int* dilation_start_index_w,
float* data_im, const int accumulate);
template void col2im_gpu_constellation<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int kernel_extent_h, const int kernel_extent_w,
const int* dilation_shift_h,const int* dilation_shift_w,
const int* dilation_shift_index_h,const int* dilation_shift_index_w,
const int* dilation_inc_h,const int* dilation_inc_w,
const int* dilation_start_index_h, const int* dilation_start_index_w,
double* data_im, const int accumulate);
///------------************* CONSTELLATION CODE ENDS
template <typename Dtype>
__global__ void im2col_gpu_selected_kernel(const int n, const int n_elements,
const int n_processed,
const Dtype* data_im,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int ch_col_buffer = index / n_elements;
int w_col_buffer = index % n_elements;
Dtype* data_col_ptr = data_col;
data_col_ptr += ch_col_buffer * kernel_h * kernel_w * n_elements + w_col_buffer;
int temp_index = n_processed + w_col_buffer + ch_col_buffer * height_col * width_col;
const int h_index = temp_index / width_col;
const int h_col = h_index % height_col;
const int w_col = temp_index % width_col;
const int c_im = h_index / height_col;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] :0;
data_col_ptr += n_elements;
}
}
}
}
template <typename Dtype>
void im2col_gpu_selected(const Dtype* data_im, const int channels,
const int n_elements, const int n_processed,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * n_elements;
// return;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_selected_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, n_elements, n_processed,
data_im, height, width,
kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// EXPLICIT
template void im2col_gpu_selected<float>(const float* data_im,
const int channels,const int n_elements, const int n_processed,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
float* data_col);
template void im2col_gpu_selected<double>(const double* data_im,
const int channels,const int n_elements, const int n_processed,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_selected_kernel(const int n, const Dtype* data_col,
const int height, const int width,
const int n_elements,
const int index_min, const int index_max,
const int crop_min_x, const int crop_min_y,
const int crop_width, const int crop_height,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int crop_x = index % crop_width;
const int crop_y = ( index / crop_width) % crop_height;
// const int w_im = crop_x + crop_min_x + pad_w;
// const int h_im = crop_y + crop_min_y + pad_h;
const int c_im = index / (crop_width * crop_height);
const int temp_index = (c_im * height + crop_y + crop_min_y) * width + crop_x + crop_min_x;
const int w_im = temp_index % width + pad_w;
const int h_im = (temp_index / width) % height + pad_h;
// const int c_im = temp_index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int col_index = h_col * width_col + w_col;
if(col_index >= index_min && col_index <= index_max)
{
col_index -= index_min;
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
// int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
// height_col + h_col) * width_col + w_col;
// int data_col_index = c_im * kernel_h * kernel_w * n_elements +
// h_k * kernel_w * n_elements +
// w_k * n_elements +
// col_index;
int data_col_index = ((c_im * kernel_h + h_k)* kernel_w + w_k) * n_elements + col_index;
val += data_col[data_col_index];
}
}
}
}
data_im[temp_index] += val;
}
}
template <typename Dtype>
void col2im_gpu_selected(const Dtype* data_col, const int channels, const int n_elements,
const int index_min, const int index_max,
const int crop_min_x, const int crop_min_y,
const int crop_width, const int crop_height,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * (crop_width * crop_height);
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_selected_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, n_elements,
index_min, index_max,
crop_min_x, crop_min_y, crop_width, crop_height,
kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu_selected<float>(const float* data_col, const int channels, const int n_elements,
const int index_min, const int index_max,
const int crop_min_x, const int crop_min_y,
const int crop_width, const int crop_height,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu_selected<double>(const double* data_col, const int channels, const int n_elements,
const int index_min, const int index_max,
const int crop_min_x, const int crop_min_y,
const int crop_width, const int crop_height,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im, const int accumulate,
int lcm_by_stride_h, int lcm_by_stride_w) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
int h_k_start = (h_im - h_col_start * stride_h);
int w_k_start = (w_im - w_col_start * stride_w);
// obtain the valid starting index in col to employ lcm of column and stride
// otherwise the shift will only encounter invalid locations
// if (h_k_start % dilation_h)
{
while(h_k_start % dilation_h)
{
h_col_start++;
h_k_start = (h_im - h_col_start * stride_h);
}
}
// if (w_k_start % dilation_w)
{
while(w_k_start % dilation_w)
{
w_col_start++;
w_k_start = (w_im - w_col_start * stride_w);
}
}
// for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
// for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
for (int h_col = h_col_start; h_col < h_col_end; h_col += lcm_by_stride_h) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += lcm_by_stride_w) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
// if (h_k % dilation_h == 0 && w_k % dilation_w == 0)
{
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val + data_im[index] * accumulate;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im, const int accumulate) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
// if(accumulate)
// std::cout << "col 22 im accumulate = " << accumulate << "\n";
// in the input space diff will be back propagated at lcm of dilation and stride
// but in the output space, locations will be divided by stride
const int lcm_dilation_stride_h = boost::math::lcm(dilation_h, stride_h);
const int lcm_dilation_stride_w = boost::math::lcm(dilation_w, stride_w);
const int lcm_by_stride_h = lcm_dilation_stride_h / stride_h;
const int lcm_by_stride_w = lcm_dilation_stride_w / stride_w;
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im, accumulate,
lcm_by_stride_h, lcm_by_stride_w);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im, const int accumulate);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im, const int accumulate);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
|
c2088264474c28445d9cb5b57228837068979b55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Author : Kim, KyoungHo (rain_woo@korea.ac.kr)
Ki-Hwan Kim (wbkifun@korea.ac.kr)
Written date : 2009. 6. 11
last update :
Copyright : GNU GPL
*/
__global__ void update_e( int idx0, int Nz, int Nyz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz ) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk + idx0;
int eidx = idx + Nyz;
extern __shared__ float hs[];
float* hx = (float*) hs;
float* hy = (float*) &hx[blockDim.x+1];
float* hz = (float*) &hy[blockDim.x+1];
hx[tk] = Hx[idx];
hy[tk] = Hy[idx];
hz[tk] = Hz[idx];
if ( tk==blockDim.x-1 ) {
hx[tk+1] = Hx[idx+1];
hy[tk+1] = Hy[idx+1];
}
__syncthreads();
Ex[eidx] += CEx[idx]*( Hz[idx+Nz] - hz[tk] - hy[tk+1] + hy[tk] );
Ey[eidx] += CEy[idx]*( hx[tk+1] - hx[tk] - Hz[idx+Nyz] + hz[tk] );
Ez[eidx] += CEz[idx]*( Hy[idx+Nyz] - hy[tk] - Hx[idx+Nz] + hx[tk] );
}
__global__ void update_h( int idx0, int Nz, int Nyz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz ) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk + idx0;
int eidx = idx + Nyz;
extern __shared__ float es[];
float* ex = (float*) es;
float* ey = (float*) &ex[blockDim.x+1];
float* ez = (float*) &ey[blockDim.x+1];
ex[tk+1] = Ex[eidx];
ey[tk+1] = Ey[eidx];
ez[tk] = Ez[eidx];
if ( tk==0 ) {
ex[0] = Ex[eidx-1];
ey[0] = Ey[eidx-1];
}
__syncthreads();
Hx[idx] -= 0.5*( ez[tk] - Ez[eidx-Nz] - ey[tk+1] + ey[tk] );
Hy[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + Ez[eidx-Nyz] );
Hz[idx] -= 0.5*( ey[tk+1] - Ey[eidx-Nyz] - ex[tk+1] + Ex[eidx-Nz] );
}
| c2088264474c28445d9cb5b57228837068979b55.cu | /*
Author : Kim, KyoungHo (rain_woo@korea.ac.kr)
Ki-Hwan Kim (wbkifun@korea.ac.kr)
Written date : 2009. 6. 11
last update :
Copyright : GNU GPL
*/
__global__ void update_e( int idx0, int Nz, int Nyz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz ) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk + idx0;
int eidx = idx + Nyz;
extern __shared__ float hs[];
float* hx = (float*) hs;
float* hy = (float*) &hx[blockDim.x+1];
float* hz = (float*) &hy[blockDim.x+1];
hx[tk] = Hx[idx];
hy[tk] = Hy[idx];
hz[tk] = Hz[idx];
if ( tk==blockDim.x-1 ) {
hx[tk+1] = Hx[idx+1];
hy[tk+1] = Hy[idx+1];
}
__syncthreads();
Ex[eidx] += CEx[idx]*( Hz[idx+Nz] - hz[tk] - hy[tk+1] + hy[tk] );
Ey[eidx] += CEy[idx]*( hx[tk+1] - hx[tk] - Hz[idx+Nyz] + hz[tk] );
Ez[eidx] += CEz[idx]*( Hy[idx+Nyz] - hy[tk] - Hx[idx+Nz] + hx[tk] );
}
__global__ void update_h( int idx0, int Nz, int Nyz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz ) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk + idx0;
int eidx = idx + Nyz;
extern __shared__ float es[];
float* ex = (float*) es;
float* ey = (float*) &ex[blockDim.x+1];
float* ez = (float*) &ey[blockDim.x+1];
ex[tk+1] = Ex[eidx];
ey[tk+1] = Ey[eidx];
ez[tk] = Ez[eidx];
if ( tk==0 ) {
ex[0] = Ex[eidx-1];
ey[0] = Ey[eidx-1];
}
__syncthreads();
Hx[idx] -= 0.5*( ez[tk] - Ez[eidx-Nz] - ey[tk+1] + ey[tk] );
Hy[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + Ez[eidx-Nyz] );
Hz[idx] -= 0.5*( ey[tk+1] - Ey[eidx-Nyz] - ex[tk+1] + Ex[eidx-Nz] );
}
|
b5587d29bac9397dbab07a4169a30cdeb7cd9246.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This module is used to copy the cover of the computational domain to
* the appropriate boundary zones. For reference;
* -UP is used when moving to the positive direction along y-axis (+y)
* -RIGHT is used when moving to the positive direction along x-axis (+x)
* -BACK is used when moving to the positive direction along z-axis (+z)
*
* Check astaroth-code/doc/boundcond.png for an illustrative picture
* (Additionally, the logic behind "per_xy_edges" etc. is such that if both
* x and y are periodic, then the edges between CZ_BOT and CZ_TOP
* can be copied. This is based on the assumption, that if z is not periodic,
* then special rules are applied when copying data to the planes at z coordinates
* CZ_BOT-BOUND_SIZE ... CZ_BOT and CZ_TOP ... CZ_TOP+BOUND_SIZE)
*/
#include "boundcond_cuda_generic.cuh"
#include "gpu/cuda/core/dconsts_core.cuh"
#include "gpu/cuda/core/errorhandler_cuda.cuh"
//Copies the front and back of the computational domain to an appropriate
//boundary zone (does not include the edges and corners of the boundary zone)
__global__ void per_z_sides(Grid d_grid)
{
int iz, iz_bound;
if (blockIdx.z < 3) { //Copy front of the computational domain to the boundary zone at the back
iz = blockIdx.z + d_nz_min;
iz_bound = blockIdx.z + d_nz_max;
}
else { //Copy back of the computational domain to the boundary zone at the front
iz = (blockIdx.z-3) + d_nz_max - 3;
iz_bound = (blockIdx.z-3);
}
int ix,iy;
ix = threadIdx.x + blockIdx.x*blockDim.x + d_nx_min;
iy = threadIdx.y + blockIdx.y*blockDim.y + d_ny_min;
if (ix < d_nx_max && iy < d_ny_max) {
int grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
int bound_idx = ix + iy*d_mx + iz_bound*d_mx*d_my;
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
}
}
//Copies the top and bottom of the computational domain to an appropriate
//boundary zone (does not include the edges and corners of the boundary zone)
__global__ void per_y_sides(Grid d_grid)
{
int iy, iy_bound;
if (blockIdx.z < 3) { //Copy bottom of the computational domain to the boundary zone at the top
iy = blockIdx.z + d_ny_min;
iy_bound = blockIdx.z + d_ny_max;
}
else { //Copy top of the computational domain to the boundary zone at the bottom
iy = (blockIdx.z-3) + (d_ny_max - 3);
iy_bound = (blockIdx.z-3);
}
int ix,iz;
ix = threadIdx.x + blockIdx.x*blockDim.x + d_nx_min;
iz = threadIdx.y + blockIdx.y*blockDim.y + d_nz_min;
if (ix < d_nx_max && iz < d_nz_max) {
int grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
int bound_idx = ix + iy_bound*d_mx + iz*d_mx*d_my;
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
}
}
//Copies the left and right sides of the computational domain to an appropriate
//boundary zone (does not include the edges and corners of the boundary zone)
__global__ void per_x_sides(Grid d_grid)
{
int ix, ix_bound;
if (threadIdx.x < 3) { //Copy left of the computational domain to the boundary zone at the right
ix = threadIdx.x + d_nx_min;
ix_bound = threadIdx.x + d_nx_max;
}
else { //Copy right of the computational domain to the boundary zone at the left
ix = (threadIdx.x-3) + (d_nx_max - 3);
ix_bound = (d_nx_min-3) +(threadIdx.x-3);
}
int iy,iz;
iz = threadIdx.z + blockIdx.z*blockDim.z + d_nz_min;//Don't add edges
iy = threadIdx.y + blockIdx.y*blockDim.y + d_ny_min;
int grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
int bound_idx = ix_bound + iy*d_mx + iz*d_mx*d_my;
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
/*//Uncommented this TODO recheck if causes issues
//Normal periodic boundary if shearing is not included
int sid_depth = threadIdx.x;
int sid_y = threadIdx.y;
if (d_LSHEAR == 0 || (d_DELTA_Y > -1.0e-30 && d_DELTA_Y < 1.0e-30) ) { //Meanign if d_DELTA_Y == 0, but reals are not that stable.
if (iy < d_ny_max && iz < d_nz_max) {
int grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
int bound_idx = ix_bound + iy*d_mx + iz*d_mx*d_my;
d_lnrho[bound_idx] = d_lnrho[grid_idx];
d_uu_x[bound_idx] = d_uu_x[grid_idx];
d_uu_y[bound_idx] = d_uu_y[grid_idx];
d_uu_z[bound_idx] = d_uu_z[grid_idx];
}
} else {
if (iy < d_ny_max && iz < d_nz_max) {
int bound_idx = ix_bound + iy*d_mx + iz*d_mx*d_my;
//Allocate shared memory for interpolation arrays
__shared__ real s_coord_interp[INTERP_ORDER][INTERP_NPOINTS][INTERP_DEPTH];
__shared__ real s_val_interp[INTERP_ORDER][INTERP_NPOINTS][INTERP_DEPTH];
//Perform the interpolation in assign the values to the boundaries
d_lnrho[bound_idx] = interp_shear(d_lnrho, ix, iy, iz, sid_depth, sid_y, s_coord_interp, s_val_interp);
d_uu_x[bound_idx] = interp_shear(d_uu_x, ix, iy, iz, sid_depth, sid_y, s_coord_interp, s_val_interp);
d_uu_y[bound_idx] = interp_shear(d_uu_y, ix, iy, iz, sid_depth, sid_y, s_coord_interp, s_val_interp);
d_uu_z[bound_idx] = interp_shear(d_uu_z, ix, iy, iz, sid_depth, sid_y, s_coord_interp, s_val_interp);
}
}
*/
}
//Copy the edges from upper front & back and bottom front & back to
//the appropriate boundary zones
//(Requires thread dims of (32, 3, 3) and blockDims of (ceil((real) d_nx / (real)tpb.x), 1, 4)
__global__ void per_yz_edges(Grid d_grid)
{
int ix, iy, iz;
int grid_idx, bound_idx;
ix = threadIdx.x + blockIdx.x*blockDim.x + d_nx_min; //x index skips the boundary and starts from the computational domain
iy = threadIdx.y + d_ny_min;
iz = threadIdx.z + d_nz_min;
switch(blockIdx.z)
{
case 0: //Copy upper front edge of the computational domain to the boundary zone at bottom back
grid_idx = ix + (iy + d_ny-3)*d_mx + iz*d_mx*d_my;
bound_idx = ix +
(iy-BOUND_SIZE)*d_mx +
(iz+d_nz)*d_mx*d_my;
break;
case 1: //Copy bottom front edge of the computational domain to the boundary zone at upper back
grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
bound_idx = ix +
(iy+d_ny)*d_mx +
(iz+d_nz)*d_mx*d_my;
break;
case 2: //Copy upper back edge of the computational domain to the boundary zone at bottom front
grid_idx = ix + (iy + d_ny-3)*d_mx + (iz + d_nz-3)*d_mx*d_my;
bound_idx = ix +
(iy-BOUND_SIZE)*d_mx +
(iz-BOUND_SIZE)*d_mx*d_my;
break;
case 3: //Copy bottom back edge of the computational domain to the boundary zone at upper front
grid_idx = ix + iy*d_mx + (iz + d_nz-3)*d_mx*d_my;
bound_idx = ix +
(iy+d_ny)*d_mx +
(iz-BOUND_SIZE)*d_mx*d_my;
break;
}
if (ix < d_nx_max) {
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
}
}
//Copy the edges from front left & right and back left & right to
//the appropriate boundary zones
//(Requires thread dims of (3, 32, 3) and blockDims of (1, ceil((real) d_ny / (real)tpb.y), 4))
__global__ void per_xz_edges(Grid d_grid)
{
int ix, iy, iz;
int grid_idx, bound_idx;
ix = threadIdx.x + d_nx_min;
iy = threadIdx.y + blockIdx.y*blockDim.y + d_ny_min;
iz = threadIdx.z + d_nz_min;
switch(blockIdx.z)
{
case 0: //Copy front left edge of the computational domain to the boundary zone at right back
grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
bound_idx = (ix + d_nx) +
iy*d_mx +
(iz+d_nz)*d_mx*d_my;
break;
case 1: //Copy right front edge of the computational domain to the boundary zone at left back
grid_idx = (ix + d_nx-3) + iy*d_mx + iz*d_mx*d_my;
bound_idx = (ix-BOUND_SIZE) +
iy*d_mx +
(iz+d_nz)*d_mx*d_my;
break;
case 2: //Copy left back edge of the computational domain to the boundary zone at right front
grid_idx = ix + iy*d_mx + (iz + d_nz-3)*d_mx*d_my;
bound_idx = (ix + d_nx) +
iy*d_mx +
(iz-BOUND_SIZE)*d_mx*d_my;
break;
case 3: //Copy right back edge of the computational domain to the boundary zone at left front
grid_idx = (ix + d_nx-3) + iy*d_mx + (iz + d_nz-3)*d_mx*d_my;
bound_idx = (ix-BOUND_SIZE) +
iy*d_mx +
(iz-BOUND_SIZE)*d_mx*d_my;
break;
}
if (iy < d_ny_max) {
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
}
}
//Copy the edges from upper left & right and bottom left & right to
//the appropriate boundary zones
//(Requires thread dims of (3, 3, 32) and blockDims of (1, 4, ceil((real) d_nz / (real)tpb.z)))
__global__ void per_xy_edges(Grid d_grid)
{
int ix, iy, iz;
int grid_idx, bound_idx;
ix = threadIdx.x + d_nx_min;
iy = threadIdx.y + d_ny_min;
iz = threadIdx.z + blockIdx.z*blockDim.z + d_nz_min;
switch(blockIdx.y)
{
case 0: //Copy upper left edge of the computational domain to the boundary zone at bottom right
grid_idx = ix + (iy + d_ny-3)*d_mx + iz*d_mx*d_my;
bound_idx = (ix + d_nx) +
(iy-BOUND_SIZE)*d_mx +
iz*d_mx*d_my;
break;
case 1: //Copy upper right edge of the computational domain to the boundary zone at bottom left
grid_idx = (ix + d_nx-3) + (iy + d_ny-3)*d_mx + iz*d_mx*d_my;
bound_idx = (ix-BOUND_SIZE) +
(iy-BOUND_SIZE)*d_mx +
iz*d_mx*d_my;
break;
case 2: //Copy bottom left edge of the computational domain to the boundary zone at upper right
grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
bound_idx = (ix + d_nx) +
(iy + d_ny)*d_mx +
iz*d_mx*d_my;
break;
case 3: //Copy bottom right edge of the computational domain to the boundary zone at upper left
grid_idx = (ix + d_nx-3) + iy*d_mx + iz*d_mx*d_my;
bound_idx = (ix-BOUND_SIZE) +
(iy + d_ny)*d_mx +
iz*d_mx*d_my;
break;
}
if (iz < d_nz_max) {
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
}
}
//Copies the corners of the computational domain to appropriate boundary areas
//Uses x,y,z to determine the index inside the block and blockIdx.z to determine
//which one of the eight corners to copy.
//(Requires thread dims of (3, 3, 3) and blockDims of (1, 1, 8))
__global__ void per_xyz_corners(Grid d_grid)
{
int ix, iy, iz;
int grid_idx, bound_idx;
ix = threadIdx.x + d_nx_min;
iy = threadIdx.y + d_ny_min;
iz = threadIdx.z + d_nz_min;
switch(blockIdx.z)
{
case 0: //Copy the bottom left front corner to boundary zone at upper right back (x=0, y=0, z=0)
grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
bound_idx = (ix + d_nx) +
(iy + d_ny)*d_mx +
(iz + d_nz)*d_mx*d_my;
break;
case 1: //Copy the bottom left back corner to boundary zone at upper right front (x=0, y=0, z=1)
grid_idx = ix + iy*d_mx + (iz+d_nz-3)*d_mx*d_my;
bound_idx = (ix + d_nx) +
(iy + d_ny)*d_mx +
(iz - BOUND_SIZE)*d_mx*d_my;
break;
case 2: //Copy the upper left front corner to boundary zone at bottom right back (x=0, y=1, z=0)
grid_idx = ix + (iy+d_ny-3)*d_mx + iz*d_mx*d_my;
bound_idx = (ix + d_nx) +
(iy - BOUND_SIZE)*d_mx +
(iz + d_nz)*d_mx*d_my;
break;
case 3: //Copy the upper left back corner to boundary zone at bottom right front (x=0, y=1, z=1)
grid_idx = ix + (iy+d_ny-3)*d_mx + (iz+d_nz-3)*d_mx*d_my;
bound_idx = (ix + d_nx) +
(iy - BOUND_SIZE)*d_mx +
(iz - BOUND_SIZE)*d_mx*d_my;
break;
case 4: //Copy the bottom right front corner to boundary zone at upper left back (Do x=1, y=0, z=0)
grid_idx = (ix+d_nx-3) + iy*d_mx + iz*d_mx*d_my;
bound_idx = (ix - BOUND_SIZE) +
(iy + d_ny)*d_mx +
(iz + d_nz)*d_mx*d_my;
break;
case 5: //Copy the bottom right back corner to boundary zone at upper left front (x=1, y=0, z=1)
grid_idx = (ix+d_nx-3) + iy*d_mx + (iz+d_nz-3)*d_mx*d_my;
bound_idx = (ix - BOUND_SIZE) +
(iy + d_ny)*d_mx +
(iz - BOUND_SIZE)*d_mx*d_my;
break;
case 6: //Copy the upper right front corner to boundary zone at bottom left back (x=1, y=1, z=0)
grid_idx = (ix+d_nx-3) + (iy+d_ny-3)*d_mx + iz*d_mx*d_my;
bound_idx = (ix - BOUND_SIZE) +
(iy - BOUND_SIZE)*d_mx +
(iz + d_nz)*d_mx*d_my;
break;
case 7: //Copy the upper right back corner to boundary zone at bottom left front (x=1, y=1, z=1)
grid_idx = (ix+d_nx-3) +
(iy+d_ny-3)*d_mx +
(iz+d_nz-3)*d_mx*d_my;
bound_idx = (ix - BOUND_SIZE) +
(iy - BOUND_SIZE)*d_mx +
(iz - BOUND_SIZE)*d_mx*d_my;
break;
}
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
}
//Define boundcond types
#define PERIODIC_BOUNDCONDS 0
#define SHEARING_BOUNDCONDS 1
#define BOUNDCOND_TYPE_X PERIODIC_BOUNDCONDS
#define BOUNDCOND_TYPE_Y PERIODIC_BOUNDCONDS
#define BOUNDCOND_TYPE_Z PERIODIC_BOUNDCONDS
void boundcond_cuda_generic(Grid* d_grid, CParamConfig* cparams, hipStream_t stream)
{
//Quick summary:
//The point in a 3D cuboid is copied to a location, where the location index is
//offset in 1, 2 or 3 axes
//f.ex.
// -Points that are copied by adding an offset in only one axis
// (for example from the front of the computational domain to the boundary in the back)
// (Functions: per_z_sides, per_x_sides, per_y_sides)
//
// -Points that are offset in two axes, for example the top left edge (not including the corner)
// of the computational domain is copied to the boundary to the bottom right of the grid
// (Functions: per_xy_edges per_xz_edges per_yz_edges)
//
// -Points that are offset in all three axes, e.g. the corners. For example the front top right
// 3*3*3 cube of the computational domain is copied to the boundary zone in back bottom left in the grid.
// (Function: per_xyz_corners)
//
// BOUNDCOND_TYPE_X, BOUNDCOND_TYPE_Y and BOUNDCOND_TYPE_Z are used to determine how
// the boundaries in their respective axis are supposed to be copied.
//--------X BOUNDS---------------
switch (BOUNDCOND_TYPE_X) {
case PERIODIC_BOUNDCONDS: {
//Copy periodic x sides
const dim3 tpb(6, 4, 1);
const dim3 bpg(
1,
(unsigned int)ceil((real) cparams->ny / (real)tpb.y),
(unsigned int)ceil((real) cparams->nz / (real)tpb.z));
hipLaunchKernelGGL(( per_x_sides), dim3(bpg), dim3(tpb), 0, stream, *d_grid);
CUDA_ERRCHK_KERNEL();
//Copy periodic xy edges
if (BOUNDCOND_TYPE_Y == PERIODIC_BOUNDCONDS) {
const dim3 tpb(3, 3, 32);
const dim3 bpg(
1,
4,
(unsigned int)ceil((float) cparams->nz / tpb.z));
hipLaunchKernelGGL(( per_xy_edges), dim3(bpg), dim3(tpb), 0, stream, *d_grid);
CUDA_ERRCHK_KERNEL();
}
//Copy periodic xz edges
if (BOUNDCOND_TYPE_Z == PERIODIC_BOUNDCONDS) {
const dim3 tpb(3, 32, 3);
const dim3 bpg(
1,
(unsigned int)ceil((real) cparams->ny / (real)tpb.y),
4);
hipLaunchKernelGGL(( per_xz_edges), dim3(bpg), dim3(tpb), 0, stream, *d_grid);
CUDA_ERRCHK_KERNEL();
}
//If fully periodic, copy all corners
if ((BOUNDCOND_TYPE_Y == PERIODIC_BOUNDCONDS) && (BOUNDCOND_TYPE_Z == PERIODIC_BOUNDCONDS)) {
const dim3 tpb(3, 3, 3);
const dim3 bpg(1, 1, 8);
hipLaunchKernelGGL(( per_xyz_corners), dim3(bpg), dim3(tpb), 0, stream, *d_grid);
CUDA_ERRCHK_KERNEL();
}
break;
}
default:
printf("INVALID X TYPE IN BOUNDCOND_CUDA!\n");
exit(EXIT_FAILURE);
}
//--------------------------------
//--------Y BOUNDS--------------
switch (BOUNDCOND_TYPE_Y) {
//Do periodic bounds for y sides
case PERIODIC_BOUNDCONDS: {
const dim3 tpb(32, 32, 1);
const dim3 bpg(
(unsigned int)ceil((real) cparams->nx / (real)tpb.x),
(unsigned int)ceil((real) cparams->nz / (real)tpb.y),
6);
hipLaunchKernelGGL(( per_y_sides), dim3(bpg), dim3(tpb), 0, stream, *d_grid);
CUDA_ERRCHK_KERNEL();
//Copy periodic yz edges
if (BOUNDCOND_TYPE_Z == PERIODIC_BOUNDCONDS) {
const dim3 tpb(32, 3, 3);
const dim3 bpg(
(unsigned int)ceil((real) cparams->nx / (real)tpb.x),
1,
4);
hipLaunchKernelGGL(( per_yz_edges), dim3(bpg), dim3(tpb), 0, stream, *d_grid);
CUDA_ERRCHK_KERNEL();
}
break;
}
default:
printf("INVALID Y TYPE IN BOUNDCOND_CUDA!\n");
exit(EXIT_FAILURE);
}
//--------------------------------
//---------Z BOUNDS----------------
switch (BOUNDCOND_TYPE_Z) {
//Do periodic bounds for z sides
case PERIODIC_BOUNDCONDS: {
const dim3 tpb(32, 32, 1);
const dim3 bpg((unsigned int)ceil((real) cparams->nx / (real)tpb.x),
(unsigned int)ceil((real) cparams->ny / (real)tpb.y),
6);
hipLaunchKernelGGL(( per_z_sides), dim3(bpg), dim3(tpb), 0, stream, *d_grid);
CUDA_ERRCHK_KERNEL();
break;
}
default:
printf("INVALID Z TYPE IN BOUNDCOND_CUDA!\n");
exit(EXIT_FAILURE);
}
//--------------------------------
}
void periodic_xy_boundconds_cuda_generic(Grid* d_grid, CParamConfig* cparams, hipStream_t stream)
{
//Copy periodic x sides
{
const dim3 tpb(6, 4, 1);
const dim3 bpg(1,
(unsigned int)ceil((real) cparams->ny / tpb.y),
(unsigned int)ceil((real) cparams->nz / tpb.z));
hipLaunchKernelGGL(( per_x_sides), dim3(bpg), dim3(tpb), 0, stream, *d_grid);
CUDA_ERRCHK_KERNEL();
}
//Copy periodic xy edges
{
const dim3 tpb(3, 3, 32);
const dim3 bpg(1,
4,
(unsigned int)ceil((real) cparams->nz / tpb.z));
hipLaunchKernelGGL(( per_xy_edges), dim3(bpg), dim3(tpb), 0, stream, *d_grid);
CUDA_ERRCHK_KERNEL();
}
//Copy periodic y sides
{
const dim3 tpb(32, 32, 1);
const dim3 bpg((unsigned int)ceil((real) cparams->nx / tpb.x),
(unsigned int)ceil((real) cparams->nz / tpb.y),
6);
hipLaunchKernelGGL(( per_y_sides), dim3(bpg), dim3(tpb), 0, stream, *d_grid);
CUDA_ERRCHK_KERNEL();
}
}
| b5587d29bac9397dbab07a4169a30cdeb7cd9246.cu | /*
* This module is used to copy the cover of the computational domain to
* the appropriate boundary zones. For reference;
* -UP is used when moving to the positive direction along y-axis (+y)
* -RIGHT is used when moving to the positive direction along x-axis (+x)
* -BACK is used when moving to the positive direction along z-axis (+z)
*
* Check astaroth-code/doc/boundcond.png for an illustrative picture
* (Additionally, the logic behind "per_xy_edges" etc. is such that if both
* x and y are periodic, then the edges between CZ_BOT and CZ_TOP
* can be copied. This is based on the assumption, that if z is not periodic,
* then special rules are applied when copying data to the planes at z coordinates
* CZ_BOT-BOUND_SIZE ... CZ_BOT and CZ_TOP ... CZ_TOP+BOUND_SIZE)
*/
#include "boundcond_cuda_generic.cuh"
#include "gpu/cuda/core/dconsts_core.cuh"
#include "gpu/cuda/core/errorhandler_cuda.cuh"
//Copies the front and back of the computational domain to an appropriate
//boundary zone (does not include the edges and corners of the boundary zone)
__global__ void per_z_sides(Grid d_grid)
{
int iz, iz_bound;
if (blockIdx.z < 3) { //Copy front of the computational domain to the boundary zone at the back
iz = blockIdx.z + d_nz_min;
iz_bound = blockIdx.z + d_nz_max;
}
else { //Copy back of the computational domain to the boundary zone at the front
iz = (blockIdx.z-3) + d_nz_max - 3;
iz_bound = (blockIdx.z-3);
}
int ix,iy;
ix = threadIdx.x + blockIdx.x*blockDim.x + d_nx_min;
iy = threadIdx.y + blockIdx.y*blockDim.y + d_ny_min;
if (ix < d_nx_max && iy < d_ny_max) {
int grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
int bound_idx = ix + iy*d_mx + iz_bound*d_mx*d_my;
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
}
}
//Copies the top and bottom of the computational domain to an appropriate
//boundary zone (does not include the edges and corners of the boundary zone)
__global__ void per_y_sides(Grid d_grid)
{
int iy, iy_bound;
if (blockIdx.z < 3) { //Copy bottom of the computational domain to the boundary zone at the top
iy = blockIdx.z + d_ny_min;
iy_bound = blockIdx.z + d_ny_max;
}
else { //Copy top of the computational domain to the boundary zone at the bottom
iy = (blockIdx.z-3) + (d_ny_max - 3);
iy_bound = (blockIdx.z-3);
}
int ix,iz;
ix = threadIdx.x + blockIdx.x*blockDim.x + d_nx_min;
iz = threadIdx.y + blockIdx.y*blockDim.y + d_nz_min;
if (ix < d_nx_max && iz < d_nz_max) {
int grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
int bound_idx = ix + iy_bound*d_mx + iz*d_mx*d_my;
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
}
}
//Copies the left and right sides of the computational domain to an appropriate
//boundary zone (does not include the edges and corners of the boundary zone)
__global__ void per_x_sides(Grid d_grid)
{
int ix, ix_bound;
if (threadIdx.x < 3) { //Copy left of the computational domain to the boundary zone at the right
ix = threadIdx.x + d_nx_min;
ix_bound = threadIdx.x + d_nx_max;
}
else { //Copy right of the computational domain to the boundary zone at the left
ix = (threadIdx.x-3) + (d_nx_max - 3);
ix_bound = (d_nx_min-3) +(threadIdx.x-3);
}
int iy,iz;
iz = threadIdx.z + blockIdx.z*blockDim.z + d_nz_min;//Don't add edges
iy = threadIdx.y + blockIdx.y*blockDim.y + d_ny_min;
int grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
int bound_idx = ix_bound + iy*d_mx + iz*d_mx*d_my;
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
/*//Uncommented this TODO recheck if causes issues
//Normal periodic boundary if shearing is not included
int sid_depth = threadIdx.x;
int sid_y = threadIdx.y;
if (d_LSHEAR == 0 || (d_DELTA_Y > -1.0e-30 && d_DELTA_Y < 1.0e-30) ) { //Meanign if d_DELTA_Y == 0, but reals are not that stable.
if (iy < d_ny_max && iz < d_nz_max) {
int grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
int bound_idx = ix_bound + iy*d_mx + iz*d_mx*d_my;
d_lnrho[bound_idx] = d_lnrho[grid_idx];
d_uu_x[bound_idx] = d_uu_x[grid_idx];
d_uu_y[bound_idx] = d_uu_y[grid_idx];
d_uu_z[bound_idx] = d_uu_z[grid_idx];
}
} else {
if (iy < d_ny_max && iz < d_nz_max) {
int bound_idx = ix_bound + iy*d_mx + iz*d_mx*d_my;
//Allocate shared memory for interpolation arrays
__shared__ real s_coord_interp[INTERP_ORDER][INTERP_NPOINTS][INTERP_DEPTH];
__shared__ real s_val_interp[INTERP_ORDER][INTERP_NPOINTS][INTERP_DEPTH];
//Perform the interpolation in assign the values to the boundaries
d_lnrho[bound_idx] = interp_shear(d_lnrho, ix, iy, iz, sid_depth, sid_y, s_coord_interp, s_val_interp);
d_uu_x[bound_idx] = interp_shear(d_uu_x, ix, iy, iz, sid_depth, sid_y, s_coord_interp, s_val_interp);
d_uu_y[bound_idx] = interp_shear(d_uu_y, ix, iy, iz, sid_depth, sid_y, s_coord_interp, s_val_interp);
d_uu_z[bound_idx] = interp_shear(d_uu_z, ix, iy, iz, sid_depth, sid_y, s_coord_interp, s_val_interp);
}
}
*/
}
//Copy the edges from upper front & back and bottom front & back to
//the appropriate boundary zones
//(Requires thread dims of (32, 3, 3) and blockDims of (ceil((real) d_nx / (real)tpb.x), 1, 4)
__global__ void per_yz_edges(Grid d_grid)
{
int ix, iy, iz;
int grid_idx, bound_idx;
ix = threadIdx.x + blockIdx.x*blockDim.x + d_nx_min; //x index skips the boundary and starts from the computational domain
iy = threadIdx.y + d_ny_min;
iz = threadIdx.z + d_nz_min;
switch(blockIdx.z)
{
case 0: //Copy upper front edge of the computational domain to the boundary zone at bottom back
grid_idx = ix + (iy + d_ny-3)*d_mx + iz*d_mx*d_my;
bound_idx = ix +
(iy-BOUND_SIZE)*d_mx +
(iz+d_nz)*d_mx*d_my;
break;
case 1: //Copy bottom front edge of the computational domain to the boundary zone at upper back
grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
bound_idx = ix +
(iy+d_ny)*d_mx +
(iz+d_nz)*d_mx*d_my;
break;
case 2: //Copy upper back edge of the computational domain to the boundary zone at bottom front
grid_idx = ix + (iy + d_ny-3)*d_mx + (iz + d_nz-3)*d_mx*d_my;
bound_idx = ix +
(iy-BOUND_SIZE)*d_mx +
(iz-BOUND_SIZE)*d_mx*d_my;
break;
case 3: //Copy bottom back edge of the computational domain to the boundary zone at upper front
grid_idx = ix + iy*d_mx + (iz + d_nz-3)*d_mx*d_my;
bound_idx = ix +
(iy+d_ny)*d_mx +
(iz-BOUND_SIZE)*d_mx*d_my;
break;
}
if (ix < d_nx_max) {
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
}
}
//Copy the edges from front left & right and back left & right to
//the appropriate boundary zones
//(Requires thread dims of (3, 32, 3) and blockDims of (1, ceil((real) d_ny / (real)tpb.y), 4))
__global__ void per_xz_edges(Grid d_grid)
{
int ix, iy, iz;
int grid_idx, bound_idx;
ix = threadIdx.x + d_nx_min;
iy = threadIdx.y + blockIdx.y*blockDim.y + d_ny_min;
iz = threadIdx.z + d_nz_min;
switch(blockIdx.z)
{
case 0: //Copy front left edge of the computational domain to the boundary zone at right back
grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
bound_idx = (ix + d_nx) +
iy*d_mx +
(iz+d_nz)*d_mx*d_my;
break;
case 1: //Copy right front edge of the computational domain to the boundary zone at left back
grid_idx = (ix + d_nx-3) + iy*d_mx + iz*d_mx*d_my;
bound_idx = (ix-BOUND_SIZE) +
iy*d_mx +
(iz+d_nz)*d_mx*d_my;
break;
case 2: //Copy left back edge of the computational domain to the boundary zone at right front
grid_idx = ix + iy*d_mx + (iz + d_nz-3)*d_mx*d_my;
bound_idx = (ix + d_nx) +
iy*d_mx +
(iz-BOUND_SIZE)*d_mx*d_my;
break;
case 3: //Copy right back edge of the computational domain to the boundary zone at left front
grid_idx = (ix + d_nx-3) + iy*d_mx + (iz + d_nz-3)*d_mx*d_my;
bound_idx = (ix-BOUND_SIZE) +
iy*d_mx +
(iz-BOUND_SIZE)*d_mx*d_my;
break;
}
if (iy < d_ny_max) {
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
}
}
//Copy the edges from upper left & right and bottom left & right to
//the appropriate boundary zones
//(Requires thread dims of (3, 3, 32) and blockDims of (1, 4, ceil((real) d_nz / (real)tpb.z)))
__global__ void per_xy_edges(Grid d_grid)
{
int ix, iy, iz;
int grid_idx, bound_idx;
ix = threadIdx.x + d_nx_min;
iy = threadIdx.y + d_ny_min;
iz = threadIdx.z + blockIdx.z*blockDim.z + d_nz_min;
switch(blockIdx.y)
{
case 0: //Copy upper left edge of the computational domain to the boundary zone at bottom right
grid_idx = ix + (iy + d_ny-3)*d_mx + iz*d_mx*d_my;
bound_idx = (ix + d_nx) +
(iy-BOUND_SIZE)*d_mx +
iz*d_mx*d_my;
break;
case 1: //Copy upper right edge of the computational domain to the boundary zone at bottom left
grid_idx = (ix + d_nx-3) + (iy + d_ny-3)*d_mx + iz*d_mx*d_my;
bound_idx = (ix-BOUND_SIZE) +
(iy-BOUND_SIZE)*d_mx +
iz*d_mx*d_my;
break;
case 2: //Copy bottom left edge of the computational domain to the boundary zone at upper right
grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
bound_idx = (ix + d_nx) +
(iy + d_ny)*d_mx +
iz*d_mx*d_my;
break;
case 3: //Copy bottom right edge of the computational domain to the boundary zone at upper left
grid_idx = (ix + d_nx-3) + iy*d_mx + iz*d_mx*d_my;
bound_idx = (ix-BOUND_SIZE) +
(iy + d_ny)*d_mx +
iz*d_mx*d_my;
break;
}
if (iz < d_nz_max) {
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
}
}
//Copies the corners of the computational domain to appropriate boundary areas
//Uses x,y,z to determine the index inside the block and blockIdx.z to determine
//which one of the eight corners to copy.
//(Requires thread dims of (3, 3, 3) and blockDims of (1, 1, 8))
__global__ void per_xyz_corners(Grid d_grid)
{
int ix, iy, iz;
int grid_idx, bound_idx;
ix = threadIdx.x + d_nx_min;
iy = threadIdx.y + d_ny_min;
iz = threadIdx.z + d_nz_min;
switch(blockIdx.z)
{
case 0: //Copy the bottom left front corner to boundary zone at upper right back (x=0, y=0, z=0)
grid_idx = ix + iy*d_mx + iz*d_mx*d_my;
bound_idx = (ix + d_nx) +
(iy + d_ny)*d_mx +
(iz + d_nz)*d_mx*d_my;
break;
case 1: //Copy the bottom left back corner to boundary zone at upper right front (x=0, y=0, z=1)
grid_idx = ix + iy*d_mx + (iz+d_nz-3)*d_mx*d_my;
bound_idx = (ix + d_nx) +
(iy + d_ny)*d_mx +
(iz - BOUND_SIZE)*d_mx*d_my;
break;
case 2: //Copy the upper left front corner to boundary zone at bottom right back (x=0, y=1, z=0)
grid_idx = ix + (iy+d_ny-3)*d_mx + iz*d_mx*d_my;
bound_idx = (ix + d_nx) +
(iy - BOUND_SIZE)*d_mx +
(iz + d_nz)*d_mx*d_my;
break;
case 3: //Copy the upper left back corner to boundary zone at bottom right front (x=0, y=1, z=1)
grid_idx = ix + (iy+d_ny-3)*d_mx + (iz+d_nz-3)*d_mx*d_my;
bound_idx = (ix + d_nx) +
(iy - BOUND_SIZE)*d_mx +
(iz - BOUND_SIZE)*d_mx*d_my;
break;
case 4: //Copy the bottom right front corner to boundary zone at upper left back (Do x=1, y=0, z=0)
grid_idx = (ix+d_nx-3) + iy*d_mx + iz*d_mx*d_my;
bound_idx = (ix - BOUND_SIZE) +
(iy + d_ny)*d_mx +
(iz + d_nz)*d_mx*d_my;
break;
case 5: //Copy the bottom right back corner to boundary zone at upper left front (x=1, y=0, z=1)
grid_idx = (ix+d_nx-3) + iy*d_mx + (iz+d_nz-3)*d_mx*d_my;
bound_idx = (ix - BOUND_SIZE) +
(iy + d_ny)*d_mx +
(iz - BOUND_SIZE)*d_mx*d_my;
break;
case 6: //Copy the upper right front corner to boundary zone at bottom left back (x=1, y=1, z=0)
grid_idx = (ix+d_nx-3) + (iy+d_ny-3)*d_mx + iz*d_mx*d_my;
bound_idx = (ix - BOUND_SIZE) +
(iy - BOUND_SIZE)*d_mx +
(iz + d_nz)*d_mx*d_my;
break;
case 7: //Copy the upper right back corner to boundary zone at bottom left front (x=1, y=1, z=1)
grid_idx = (ix+d_nx-3) +
(iy+d_ny-3)*d_mx +
(iz+d_nz-3)*d_mx*d_my;
bound_idx = (ix - BOUND_SIZE) +
(iy - BOUND_SIZE)*d_mx +
(iz - BOUND_SIZE)*d_mx*d_my;
break;
}
for (int i=0; i < NUM_ARRS; ++i)
d_grid.arr[i][bound_idx] = d_grid.arr[i][grid_idx];
}
//Define boundcond types
#define PERIODIC_BOUNDCONDS 0
#define SHEARING_BOUNDCONDS 1
#define BOUNDCOND_TYPE_X PERIODIC_BOUNDCONDS
#define BOUNDCOND_TYPE_Y PERIODIC_BOUNDCONDS
#define BOUNDCOND_TYPE_Z PERIODIC_BOUNDCONDS
void boundcond_cuda_generic(Grid* d_grid, CParamConfig* cparams, cudaStream_t stream)
{
//Quick summary:
//The point in a 3D cuboid is copied to a location, where the location index is
//offset in 1, 2 or 3 axes
//f.ex.
// -Points that are copied by adding an offset in only one axis
// (for example from the front of the computational domain to the boundary in the back)
// (Functions: per_z_sides, per_x_sides, per_y_sides)
//
// -Points that are offset in two axes, for example the top left edge (not including the corner)
// of the computational domain is copied to the boundary to the bottom right of the grid
// (Functions: per_xy_edges per_xz_edges per_yz_edges)
//
// -Points that are offset in all three axes, e.g. the corners. For example the front top right
// 3*3*3 cube of the computational domain is copied to the boundary zone in back bottom left in the grid.
// (Function: per_xyz_corners)
//
// BOUNDCOND_TYPE_X, BOUNDCOND_TYPE_Y and BOUNDCOND_TYPE_Z are used to determine how
// the boundaries in their respective axis are supposed to be copied.
//--------X BOUNDS---------------
switch (BOUNDCOND_TYPE_X) {
case PERIODIC_BOUNDCONDS: {
//Copy periodic x sides
const dim3 tpb(6, 4, 1);
const dim3 bpg(
1,
(unsigned int)ceil((real) cparams->ny / (real)tpb.y),
(unsigned int)ceil((real) cparams->nz / (real)tpb.z));
per_x_sides<<<bpg, tpb, 0, stream>>>(*d_grid);
CUDA_ERRCHK_KERNEL();
//Copy periodic xy edges
if (BOUNDCOND_TYPE_Y == PERIODIC_BOUNDCONDS) {
const dim3 tpb(3, 3, 32);
const dim3 bpg(
1,
4,
(unsigned int)ceil((float) cparams->nz / tpb.z));
per_xy_edges<<<bpg, tpb, 0, stream>>>(*d_grid);
CUDA_ERRCHK_KERNEL();
}
//Copy periodic xz edges
if (BOUNDCOND_TYPE_Z == PERIODIC_BOUNDCONDS) {
const dim3 tpb(3, 32, 3);
const dim3 bpg(
1,
(unsigned int)ceil((real) cparams->ny / (real)tpb.y),
4);
per_xz_edges<<<bpg, tpb, 0, stream>>>(*d_grid);
CUDA_ERRCHK_KERNEL();
}
//If fully periodic, copy all corners
if ((BOUNDCOND_TYPE_Y == PERIODIC_BOUNDCONDS) && (BOUNDCOND_TYPE_Z == PERIODIC_BOUNDCONDS)) {
const dim3 tpb(3, 3, 3);
const dim3 bpg(1, 1, 8);
per_xyz_corners<<<bpg, tpb, 0, stream>>>(*d_grid);
CUDA_ERRCHK_KERNEL();
}
break;
}
default:
printf("INVALID X TYPE IN BOUNDCOND_CUDA!\n");
exit(EXIT_FAILURE);
}
//--------------------------------
//--------Y BOUNDS--------------
switch (BOUNDCOND_TYPE_Y) {
//Do periodic bounds for y sides
case PERIODIC_BOUNDCONDS: {
const dim3 tpb(32, 32, 1);
const dim3 bpg(
(unsigned int)ceil((real) cparams->nx / (real)tpb.x),
(unsigned int)ceil((real) cparams->nz / (real)tpb.y),
6);
per_y_sides<<<bpg, tpb, 0, stream>>>(*d_grid);
CUDA_ERRCHK_KERNEL();
//Copy periodic yz edges
if (BOUNDCOND_TYPE_Z == PERIODIC_BOUNDCONDS) {
const dim3 tpb(32, 3, 3);
const dim3 bpg(
(unsigned int)ceil((real) cparams->nx / (real)tpb.x),
1,
4);
per_yz_edges<<<bpg, tpb, 0, stream>>>(*d_grid);
CUDA_ERRCHK_KERNEL();
}
break;
}
default:
printf("INVALID Y TYPE IN BOUNDCOND_CUDA!\n");
exit(EXIT_FAILURE);
}
//--------------------------------
//---------Z BOUNDS----------------
switch (BOUNDCOND_TYPE_Z) {
//Do periodic bounds for z sides
case PERIODIC_BOUNDCONDS: {
const dim3 tpb(32, 32, 1);
const dim3 bpg((unsigned int)ceil((real) cparams->nx / (real)tpb.x),
(unsigned int)ceil((real) cparams->ny / (real)tpb.y),
6);
per_z_sides<<<bpg, tpb, 0, stream>>>(*d_grid);
CUDA_ERRCHK_KERNEL();
break;
}
default:
printf("INVALID Z TYPE IN BOUNDCOND_CUDA!\n");
exit(EXIT_FAILURE);
}
//--------------------------------
}
void periodic_xy_boundconds_cuda_generic(Grid* d_grid, CParamConfig* cparams, cudaStream_t stream)
{
//Copy periodic x sides
{
const dim3 tpb(6, 4, 1);
const dim3 bpg(1,
(unsigned int)ceil((real) cparams->ny / tpb.y),
(unsigned int)ceil((real) cparams->nz / tpb.z));
per_x_sides<<<bpg, tpb, 0, stream>>>(*d_grid);
CUDA_ERRCHK_KERNEL();
}
//Copy periodic xy edges
{
const dim3 tpb(3, 3, 32);
const dim3 bpg(1,
4,
(unsigned int)ceil((real) cparams->nz / tpb.z));
per_xy_edges<<<bpg, tpb, 0, stream>>>(*d_grid);
CUDA_ERRCHK_KERNEL();
}
//Copy periodic y sides
{
const dim3 tpb(32, 32, 1);
const dim3 bpg((unsigned int)ceil((real) cparams->nx / tpb.x),
(unsigned int)ceil((real) cparams->nz / tpb.y),
6);
per_y_sides<<<bpg, tpb, 0, stream>>>(*d_grid);
CUDA_ERRCHK_KERNEL();
}
}
|
9dad09c459d3ff0260954ce796ccc9566a7d2b29.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <culibtest.h>
typedef unsigned int(*dfuncptr)(unsigned int);
// __constant__ unsigned int d_twelve = 0;
__device__ unsigned int someDeviceFunction(unsigned int N) {
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
return idx;
// return d_twelve;
}
__device__ dfuncptr someDeviceFunction_ptr = someDeviceFunction;
int main(int argc, char * argv[]){
hipError_t status = hipSuccess;
printf("helloWorld Example:\n");
culibtest::SomeClass obj = culibtest::SomeClass();
obj.sayHello();
obj.setPrivateInt(12);
printf("privateInt %u\n", obj.getPrivateInt());
//someDeviceFunction_ptr = &someDeviceFunction; // bad
dfuncptr h_someDeviceFunction_ptr;
status = hipMemcpyFromSymbol(&h_someDeviceFunction_ptr, someDeviceFunction_ptr, sizeof(dfuncptr));
if (status != hipSuccess) {
printf("Error, could not get devidce pointer.\n");
return 1;
}
//printf("%p\n", h_someDeviceFunction_ptr);
unsigned int sum = obj.launchRandomKernal(h_someDeviceFunction_ptr, 1024);
unsigned int expected = 523776; // 1023th trianular number
status = hipDeviceSynchronize();
if (hipSuccess != status) {
printf("cuda error %s:%d!\n\t%d:%s\n", __FILE__, __LINE__, status, hipGetErrorString(status));
}
status = hipGetLastError();
if (hipSuccess != status) {
printf("cuda error %s:%d!\n\t%d:%s\n", __FILE__, __LINE__, status, hipGetErrorString(status));
}
printf("sum: %u, expected: %u\n", sum, expected);
unsigned int l_twelve = 0;
status = hipMemcpyFromSymbol(&l_twelve, d_twelve, sizeof(unsigned int));
if (hipSuccess != status) {
printf("cuda error %s:%d!\n\t%d:%s\n", __FILE__, __LINE__, status, hipGetErrorString(status));
}
printf("l_twelve %u\n", l_twelve);
}
| 9dad09c459d3ff0260954ce796ccc9566a7d2b29.cu | #include <stdio.h>
#include <culibtest.h>
typedef unsigned int(*dfuncptr)(unsigned int);
// __constant__ unsigned int d_twelve = 0;
__device__ unsigned int someDeviceFunction(unsigned int N) {
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
return idx;
// return d_twelve;
}
__device__ dfuncptr someDeviceFunction_ptr = someDeviceFunction;
int main(int argc, char * argv[]){
cudaError_t status = cudaSuccess;
printf("helloWorld Example:\n");
culibtest::SomeClass obj = culibtest::SomeClass();
obj.sayHello();
obj.setPrivateInt(12);
printf("privateInt %u\n", obj.getPrivateInt());
//someDeviceFunction_ptr = &someDeviceFunction; // bad
dfuncptr h_someDeviceFunction_ptr;
status = cudaMemcpyFromSymbol(&h_someDeviceFunction_ptr, someDeviceFunction_ptr, sizeof(dfuncptr));
if (status != cudaSuccess) {
printf("Error, could not get devidce pointer.\n");
return 1;
}
//printf("%p\n", h_someDeviceFunction_ptr);
unsigned int sum = obj.launchRandomKernal(h_someDeviceFunction_ptr, 1024);
unsigned int expected = 523776; // 1023th trianular number
status = cudaDeviceSynchronize();
if (cudaSuccess != status) {
printf("cuda error %s:%d!\n\t%d:%s\n", __FILE__, __LINE__, status, cudaGetErrorString(status));
}
status = cudaGetLastError();
if (cudaSuccess != status) {
printf("cuda error %s:%d!\n\t%d:%s\n", __FILE__, __LINE__, status, cudaGetErrorString(status));
}
printf("sum: %u, expected: %u\n", sum, expected);
unsigned int l_twelve = 0;
status = cudaMemcpyFromSymbol(&l_twelve, d_twelve, sizeof(unsigned int));
if (cudaSuccess != status) {
printf("cuda error %s:%d!\n\t%d:%s\n", __FILE__, __LINE__, status, cudaGetErrorString(status));
}
printf("l_twelve %u\n", l_twelve);
}
|
601f09696dbcf5f5c6e0a944f99f7fbe9ba69e85.hip | // !!! This is a file automatically generated by hipify!!!
#include "rapidjson/document.h"
#include "rapidjson/istreamwrapper.h"
#include "camera.cuh"
#include "hittable.cuh"
#include "material.cuh"
#include "sphere_hip.cuh"
#include "triangle.cuh"
#include <stdio.h>
#include <vector>
#include <fstream>
#include "FileReader.cuh"
using namespace rapidjson;
using namespace std;
void FileReader::read_obj_file(char *dir, vector<hittable*> &vec_obj_list, material *mat_ptr) {
FILE *f;
fopen_s(&f, dir, "r");
if (f == NULL) {
exit(9);
}
vector<vec3> points, p_normals;
//vector<hittable*> vec_objs;
char line[100];
//int obj_id = 0;
if (f != NULL) {
while (fgets(line, 100, f) != NULL) {
int p = 0;
while ((line[p] >= 'a'&&line[p] <= 'z') || (line[p] >= 'A'&&line[p] <= 'Z')) p++;
/*if (line[0] == 'o') {
obj_id++;
}*/
if (line[0] == 'v'&&line[1] != 'n') {
double x, y, z;
sscanf_s(line + p, "%lf%lf%lf", &x, &y, &z);
vec3 temp = vec3(x, y, z);
//cout<<x*500<<' '<<y*500<<' '<<z*500<<endl;
points.push_back(temp);
}
else if (line[0] == 'v'&&line[1] == 'n') {
double x, y, z;
//has_normal = true;
sscanf_s(line + p, "%lf%lf%lf", &x, &y, &z);
vec3 temp = vec3(x, y, z);
//cout<<x*500<<' '<<y*500<<' '<<z*500<<endl;
p_normals.push_back(temp);
}
else if (line[0] == 'f') {
int x, y, z;
sscanf_s(line + p, "%d%d%d", &x, &y, &z);
//list[list_size++]
//if (obj_id == 1)
/*if (mat_ptr->type == type_lambertian) {
vec_objs.push_back(new triangle(points[x - 1], points[y - 1], points[z - 1],
new lambertian(((lambertian*)mat_ptr)->albedo)));
}else if (mat_ptr->type == type_metal) {
vec_objs.push_back(new triangle(points[x - 1], points[y - 1], points[z - 1],
new metal(((metal*)mat_ptr)->albedo)));
}*/
vec_obj_list.push_back(new triangle(points[x - 1], points[y - 1], points[z - 1],
mat_ptr));
}
}
/*list_size = vec_objs.size();
list = (hittable**)malloc(list_size * sizeof(hittable*));
for (int i = 0; i < list_size; i++) {
list[i] = vec_objs[i];
}*/
fclose(f);
}
}
bool FileReader::readfile_to_render(
const char *path, //
int &nx, int &ny, int &ns, //
camera *&c, //
hittable **&obj_list,
int &o_list_size, //
material **&mat_list,
int &m_list_size)
{
ifstream inputfile(path);
IStreamWrapper _TMP_ISW(inputfile);
Document json_tree;
json_tree.ParseStream(_TMP_ISW);
cerr << (nx = json_tree["nx"].GetInt()) << endl;
cerr << (ny = json_tree["ny"].GetInt()) << endl;
cerr << (ns = json_tree["ns"].GetInt()) << endl;
c = new camera(
vec3(
json_tree["camera"]["lookfrom"][0].GetDouble(),
json_tree["camera"]["lookfrom"][1].GetDouble(),
json_tree["camera"]["lookfrom"][2].GetDouble()),
vec3(
json_tree["camera"]["lookat"][0].GetDouble(),
json_tree["camera"]["lookat"][1].GetDouble(),
json_tree["camera"]["lookat"][2].GetDouble()),
vec3(
json_tree["camera"]["vup"][0].GetDouble(),
json_tree["camera"]["vup"][1].GetDouble(),
json_tree["camera"]["vup"][2].GetDouble()),
json_tree["camera"]["vfov"].GetDouble(),
json_tree["camera"]["aspect"].GetDouble(),
json_tree["camera"]["aperture"].GetDouble(),
json_tree["camera"]["focus_dist"].GetDouble());
cerr << (m_list_size = json_tree["materials"].Size()) << endl;
mat_list = new material *[m_list_size];
for (int i = 0; i < m_list_size; i++)
{
if (strcmp(json_tree["materials"][i]["type"].GetString(), "lamberian") == 0)
{
mat_list[i] = new lambertian(
vec3(
json_tree["materials"][i]["albedo"][0].GetDouble(),
json_tree["materials"][i]["albedo"][1].GetDouble(),
json_tree["materials"][i]["albedo"][2].GetDouble()));
}
else if ((strcmp(json_tree["materials"][i]["type"].GetString(), "metal") == 0))
{
mat_list[i] = new metal(
vec3(
json_tree["materials"][i]["albedo"][0].GetDouble(),
json_tree["materials"][i]["albedo"][1].GetDouble(),
json_tree["materials"][i]["albedo"][2].GetDouble()),
json_tree["materials"][i]["fuzz"].GetDouble());
}
else if (((strcmp(json_tree["materials"][i]["type"].GetString(), "dieletric") == 0)))
{
mat_list[i] = new dielectric(
vec3(
json_tree["materials"][i]["albedo"][0].GetDouble(),
json_tree["materials"][i]["albedo"][1].GetDouble(),
json_tree["materials"][i]["albedo"][2].GetDouble()),
json_tree["materials"][i]["ref_idx"].GetDouble());
}
}
int sphere_cnt = json_tree["spheres"].Size(), obj_cnt = json_tree["objfile"].Size();
//cout << (o_list_size = sphere_cnt + obj_cnt) << endl;
//obj_list = new hittable *[sphere_cnt + obj_cnt];
vector<hittable*> vec_obj_list;
for (int i = 0; i < sphere_cnt; i++)
{
//obj_list[i] =
vec_obj_list.push_back(
new sphere(
vec3(
json_tree["spheres"][i]["center"][0].GetDouble(),
json_tree["spheres"][i]["center"][1].GetDouble(),
json_tree["spheres"][i]["center"][2].GetDouble()),
json_tree["spheres"][i]["radius"].GetDouble(),
mat_list[json_tree["spheres"][i]["material"].GetInt() - 1]
)
);
}
//o_list_size = sphere_cnt;
for (int i = 0; i < obj_cnt; i++) {
hittable **tmp_list;
int tmp_list_size;
read_obj_file((char*)(json_tree["objfile"][i]["dir"].GetString()), vec_obj_list, mat_list[json_tree["objfile"][i]["material"].GetInt() - 1]);
//memcpy(obj_list + o_list_size, tmp_list, tmp_list_size * sizeof(hittable*));
//o_list_size += tmp_list_size;
}
o_list_size = vec_obj_list.size();
obj_list = new hittable*[o_list_size];
for (int i = 0; i < o_list_size; i++) {
obj_list[i] = vec_obj_list[i];
}
return 1;
} | 601f09696dbcf5f5c6e0a944f99f7fbe9ba69e85.cu | #include "rapidjson/document.h"
#include "rapidjson/istreamwrapper.h"
#include "camera.cuh"
#include "hittable.cuh"
#include "material.cuh"
#include "sphere.cuh"
#include "triangle.cuh"
#include <stdio.h>
#include <vector>
#include <fstream>
#include "FileReader.cuh"
using namespace rapidjson;
using namespace std;
void FileReader::read_obj_file(char *dir, vector<hittable*> &vec_obj_list, material *mat_ptr) {
FILE *f;
fopen_s(&f, dir, "r");
if (f == NULL) {
exit(9);
}
vector<vec3> points, p_normals;
//vector<hittable*> vec_objs;
char line[100];
//int obj_id = 0;
if (f != NULL) {
while (fgets(line, 100, f) != NULL) {
int p = 0;
while ((line[p] >= 'a'&&line[p] <= 'z') || (line[p] >= 'A'&&line[p] <= 'Z')) p++;
/*if (line[0] == 'o') {
obj_id++;
}*/
if (line[0] == 'v'&&line[1] != 'n') {
double x, y, z;
sscanf_s(line + p, "%lf%lf%lf", &x, &y, &z);
vec3 temp = vec3(x, y, z);
//cout<<x*500<<' '<<y*500<<' '<<z*500<<endl;
points.push_back(temp);
}
else if (line[0] == 'v'&&line[1] == 'n') {
double x, y, z;
//has_normal = true;
sscanf_s(line + p, "%lf%lf%lf", &x, &y, &z);
vec3 temp = vec3(x, y, z);
//cout<<x*500<<' '<<y*500<<' '<<z*500<<endl;
p_normals.push_back(temp);
}
else if (line[0] == 'f') {
int x, y, z;
sscanf_s(line + p, "%d%d%d", &x, &y, &z);
//list[list_size++]
//if (obj_id == 1)
/*if (mat_ptr->type == type_lambertian) {
vec_objs.push_back(new triangle(points[x - 1], points[y - 1], points[z - 1],
new lambertian(((lambertian*)mat_ptr)->albedo)));
}else if (mat_ptr->type == type_metal) {
vec_objs.push_back(new triangle(points[x - 1], points[y - 1], points[z - 1],
new metal(((metal*)mat_ptr)->albedo)));
}*/
vec_obj_list.push_back(new triangle(points[x - 1], points[y - 1], points[z - 1],
mat_ptr));
}
}
/*list_size = vec_objs.size();
list = (hittable**)malloc(list_size * sizeof(hittable*));
for (int i = 0; i < list_size; i++) {
list[i] = vec_objs[i];
}*/
fclose(f);
}
}
bool FileReader::readfile_to_render(
const char *path, // 配置文件的相对路径
int &nx, int &ny, int &ns, // 画布大小,采样次数
camera *&c, // 摄像机
hittable **&obj_list,
int &o_list_size, // 需要渲染的物体的数组和这个数组的长度
material **&mat_list,
int &m_list_size)
{
ifstream inputfile(path);
IStreamWrapper _TMP_ISW(inputfile);
Document json_tree;
json_tree.ParseStream(_TMP_ISW);
cerr << (nx = json_tree["nx"].GetInt()) << endl;
cerr << (ny = json_tree["ny"].GetInt()) << endl;
cerr << (ns = json_tree["ns"].GetInt()) << endl;
c = new camera(
vec3(
json_tree["camera"]["lookfrom"][0].GetDouble(),
json_tree["camera"]["lookfrom"][1].GetDouble(),
json_tree["camera"]["lookfrom"][2].GetDouble()),
vec3(
json_tree["camera"]["lookat"][0].GetDouble(),
json_tree["camera"]["lookat"][1].GetDouble(),
json_tree["camera"]["lookat"][2].GetDouble()),
vec3(
json_tree["camera"]["vup"][0].GetDouble(),
json_tree["camera"]["vup"][1].GetDouble(),
json_tree["camera"]["vup"][2].GetDouble()),
json_tree["camera"]["vfov"].GetDouble(),
json_tree["camera"]["aspect"].GetDouble(),
json_tree["camera"]["aperture"].GetDouble(),
json_tree["camera"]["focus_dist"].GetDouble());
cerr << (m_list_size = json_tree["materials"].Size()) << endl;
mat_list = new material *[m_list_size];
for (int i = 0; i < m_list_size; i++)
{
if (strcmp(json_tree["materials"][i]["type"].GetString(), "lamberian") == 0)
{
mat_list[i] = new lambertian(
vec3(
json_tree["materials"][i]["albedo"][0].GetDouble(),
json_tree["materials"][i]["albedo"][1].GetDouble(),
json_tree["materials"][i]["albedo"][2].GetDouble()));
}
else if ((strcmp(json_tree["materials"][i]["type"].GetString(), "metal") == 0))
{
mat_list[i] = new metal(
vec3(
json_tree["materials"][i]["albedo"][0].GetDouble(),
json_tree["materials"][i]["albedo"][1].GetDouble(),
json_tree["materials"][i]["albedo"][2].GetDouble()),
json_tree["materials"][i]["fuzz"].GetDouble());
}
else if (((strcmp(json_tree["materials"][i]["type"].GetString(), "dieletric") == 0)))
{
mat_list[i] = new dielectric(
vec3(
json_tree["materials"][i]["albedo"][0].GetDouble(),
json_tree["materials"][i]["albedo"][1].GetDouble(),
json_tree["materials"][i]["albedo"][2].GetDouble()),
json_tree["materials"][i]["ref_idx"].GetDouble());
}
}
int sphere_cnt = json_tree["spheres"].Size(), obj_cnt = json_tree["objfile"].Size();
//cout << (o_list_size = sphere_cnt + obj_cnt) << endl;
//obj_list = new hittable *[sphere_cnt + obj_cnt];
vector<hittable*> vec_obj_list;
for (int i = 0; i < sphere_cnt; i++)
{
//obj_list[i] =
vec_obj_list.push_back(
new sphere(
vec3(
json_tree["spheres"][i]["center"][0].GetDouble(),
json_tree["spheres"][i]["center"][1].GetDouble(),
json_tree["spheres"][i]["center"][2].GetDouble()),
json_tree["spheres"][i]["radius"].GetDouble(),
mat_list[json_tree["spheres"][i]["material"].GetInt() - 1]
)
);
}
//o_list_size = sphere_cnt;
for (int i = 0; i < obj_cnt; i++) {
hittable **tmp_list;
int tmp_list_size;
read_obj_file((char*)(json_tree["objfile"][i]["dir"].GetString()), vec_obj_list, mat_list[json_tree["objfile"][i]["material"].GetInt() - 1]);
//memcpy(obj_list + o_list_size, tmp_list, tmp_list_size * sizeof(hittable*));
//o_list_size += tmp_list_size;
}
o_list_size = vec_obj_list.size();
obj_list = new hittable*[o_list_size];
for (int i = 0; i < o_list_size; i++) {
obj_list[i] = vec_obj_list[i];
}
return 1;
} |
876962ffa3c8b6da4237cdad25c1cd90223829b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<math.h>
#include <stdio.h>
#include<time.h>
#include <iostream>
#include <stdlib.h>
#include "GpuTimer.h"
using namespace std;
#define BLOCK_SIZE 16
#define TILE_WIDTH 16
void MatrixMultiplyCpu(float* A, float* B, float* C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns)
{
//@@ Insert Your Code Here for the CPU Function to Compute Matrix Maltiply
float sum;
for (int row = 0; row < numARows; row++) {
for (int col = 0; col < numBColumns; col++) {
sum = 0;
for (int i = 0; i < numAColumns; i++) {
sum += A[row*numAColumns + i] * B[i*numBColumns + col];
}
C[row*numBColumns + col] = sum;
}
}
}
__global__ void TiledMatrixMultiplication(float* A, float* B, float* C, int numARows, int numAColumns, int numBRows, int numBColumns , int numCRows, int numCColumns) {
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
float Pvalue = 0;
// Loop over the M and N tiles required to compute the P element
for (int p = 0; p < (numAColumns - 1) / TILE_WIDTH + 1; ++p)
{
if (Row < numARows && p * TILE_WIDTH + tx < numAColumns)
{
ds_M[ty][tx] = A[Row * numAColumns + p * TILE_WIDTH + tx];
}
else {
ds_M[ty][tx] = 0.0;
}
if (p*TILE_WIDTH + ty < numAColumns && Col < numBColumns)
{
ds_N[ty][tx] = B[(p*TILE_WIDTH + ty) * numBColumns + Col];
}
else {
ds_N[ty][tx] = 0.0;
}
__syncthreads();
if (Row < numAColumns && Col < numBColumns)
{
for (int i = 0; i < TILE_WIDTH; ++i)
{
Pvalue += ds_M[ty][i] * ds_N[i][tx];
}
__syncthreads();
} /* end of outer for loop */
if (Row < numAColumns && Col < numBColumns)
C[Row*numBColumns + Col] = Pvalue;
}
}
int main(void)
{
int numARows = 960; // number of rows in the matrix A
int numAColumns = 640; // number of columns in the matrix A
int numBRows = 640; // number of rows in the matrix B
int numBColumns = 800; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
//@@ Insert Your Code Here to Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//Allocate the host memory for the input and output matrices
float *h_A = (float *)malloc(sizeof(float)*numARows*numAColumns);
float *h_B = (float *)malloc(sizeof(float)*numBRows*numBColumns);
float *h_C = (float *)malloc(sizeof(float)*numCRows*numCColumns);
float *h_C_CPU = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//Random Initialize Matrix A.
//There are several ways to do this, such as making functions for manual input or using random numbers.
//In this case, we simply use a for loop to fill the cells with trigonometric values of the indices:
// Set the Seed for the random number generator rand()
//srand(clock());
for (int i = 0; i < numARows; i++)
{
for (int j = 0; j < numAColumns; j++)
{
//h_A[i*numAColumns+j]=(float)rand() /(float)(RAND_MAX)*4.0;
h_A[i*numAColumns + j] = sin(i);
}
}
//Random Initialize Matrix B
for (int i = 0; i < numBRows; i++)
{
for (int j = 0; j < numBColumns; j++)
{
//h_B[i*numBColumns+j]=(float)rand() /(float)(RAND_MAX) *4.0;
h_B[i*numBColumns + j] = cos(j);
}
}
//Allocate memory on the device for input and output matrices and record the needed time
float *d_A, *d_B, *d_C;
hipError_t err = hipSuccess;
GpuTimer timer;
timer.Start();
//@@Insert Your Code Here to allocate memory for d_A, d_B, d_C
err = hipMalloc((void **)&d_A, sizeof(float)*numARows*numAColumns);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device memory for vecotr A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_B, sizeof(float)*numBRows*numBColumns);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device memory for vecotr A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_C, sizeof(float)*numCRows*numCColumns);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device memory for vecotr A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
timer.Stop();
printf("Time to allocate memory on the device is: %f msecs.\n", timer.Elapsed());
//Copy the input matrices A and B from the host to the device and record the needed time
GpuTimer timer1;
timer1.Start();
//@@ Insert Your Code Here to copy matrices A and B from Host to Device
hipMemcpy(d_A, h_A, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice);
timer1.Stop();
printf("Time to copy the Matrix from the host to the device is: %f msecs.\n", timer1.Elapsed());
//Do the Processing on the GPU
//@@ Insert Kernel Execution Configuration Parameters
dim3 dimGrid(ceil(numBColumns / (float)BLOCK_SIZE), ceil(numAColumns / (float)BLOCK_SIZE), 1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
//Invoke the BasicMatrixMultiply kernel and record the needed time for its execution
GpuTimer timer2;
timer2.Start();
//@@ Insert Your Code Here for Kernel Invocation
hipLaunchKernelGGL(( TiledMatrixMultiplication) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_A, d_B, d_C, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
timer2.Stop();
printf("Implemented CUDA code ran in: %f msecs.\n", timer2.Elapsed());
//Copy resulting matrix from device to host and record the needed time
GpuTimer timer3;
timer3.Start();
//@@ Insert Your Code Here to Copy the resulting Matrix d_C from device to the Host h_C
hipMemcpy(h_C, d_C, sizeof(float)*numCColumns*numCRows, hipMemcpyDeviceToHost);
timer3.Stop();
printf("Time to copy the resulting Matrix from the device to the host is: %f msecs.\n", timer3.Elapsed());
//Do the Processing on the CPU
clock_t begin = clock();
//@@ Insert Your Code Here to call the CPU function MatrixMultiplyCpu where the resulting matrix is h_C_CPU
MatrixMultiplyCpu(h_A, h_B, h_C_CPU, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000;
printf("Implemented CPU serial code ran in: %f msecs.\n", time_spent);
//Verify Results Computed by GPU and CPU
for (int i = 0; i < numCRows; i++)
for (int j = 0; j < numCColumns; j++)
if (fabs(h_C_CPU[i*numCColumns + j] - h_C[i*numCColumns + j]) > 1e-2)
{
fprintf(stderr, "Result verification failed at element (%d,%d)!\n", i, j);
exit(EXIT_FAILURE);
}
printf("Test PASSED\n");
//Free host memory
free(h_A);
free(h_B);
free(h_C);
free(h_C_CPU);
//Free device memory
//@@ Insert Your Code Here to Free Device Memory
hipFree(d_A); hipFree(d_B); hipFree(d_C);
return 0;
} | 876962ffa3c8b6da4237cdad25c1cd90223829b4.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<math.h>
#include <stdio.h>
#include<time.h>
#include <iostream>
#include <stdlib.h>
#include "GpuTimer.h"
using namespace std;
#define BLOCK_SIZE 16
#define TILE_WIDTH 16
void MatrixMultiplyCpu(float* A, float* B, float* C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns)
{
//@@ Insert Your Code Here for the CPU Function to Compute Matrix Maltiply
float sum;
for (int row = 0; row < numARows; row++) {
for (int col = 0; col < numBColumns; col++) {
sum = 0;
for (int i = 0; i < numAColumns; i++) {
sum += A[row*numAColumns + i] * B[i*numBColumns + col];
}
C[row*numBColumns + col] = sum;
}
}
}
__global__ void TiledMatrixMultiplication(float* A, float* B, float* C, int numARows, int numAColumns, int numBRows, int numBColumns , int numCRows, int numCColumns) {
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
float Pvalue = 0;
// Loop over the M and N tiles required to compute the P element
for (int p = 0; p < (numAColumns - 1) / TILE_WIDTH + 1; ++p)
{
if (Row < numARows && p * TILE_WIDTH + tx < numAColumns)
{
ds_M[ty][tx] = A[Row * numAColumns + p * TILE_WIDTH + tx];
}
else {
ds_M[ty][tx] = 0.0;
}
if (p*TILE_WIDTH + ty < numAColumns && Col < numBColumns)
{
ds_N[ty][tx] = B[(p*TILE_WIDTH + ty) * numBColumns + Col];
}
else {
ds_N[ty][tx] = 0.0;
}
__syncthreads();
if (Row < numAColumns && Col < numBColumns)
{
for (int i = 0; i < TILE_WIDTH; ++i)
{
Pvalue += ds_M[ty][i] * ds_N[i][tx];
}
__syncthreads();
} /* end of outer for loop */
if (Row < numAColumns && Col < numBColumns)
C[Row*numBColumns + Col] = Pvalue;
}
}
int main(void)
{
int numARows = 960; // number of rows in the matrix A
int numAColumns = 640; // number of columns in the matrix A
int numBRows = 640; // number of rows in the matrix B
int numBColumns = 800; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
//@@ Insert Your Code Here to Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//Allocate the host memory for the input and output matrices
float *h_A = (float *)malloc(sizeof(float)*numARows*numAColumns);
float *h_B = (float *)malloc(sizeof(float)*numBRows*numBColumns);
float *h_C = (float *)malloc(sizeof(float)*numCRows*numCColumns);
float *h_C_CPU = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//Random Initialize Matrix A.
//There are several ways to do this, such as making functions for manual input or using random numbers.
//In this case, we simply use a for loop to fill the cells with trigonometric values of the indices:
// Set the Seed for the random number generator rand()
//srand(clock());
for (int i = 0; i < numARows; i++)
{
for (int j = 0; j < numAColumns; j++)
{
//h_A[i*numAColumns+j]=(float)rand() /(float)(RAND_MAX)*4.0;
h_A[i*numAColumns + j] = sin(i);
}
}
//Random Initialize Matrix B
for (int i = 0; i < numBRows; i++)
{
for (int j = 0; j < numBColumns; j++)
{
//h_B[i*numBColumns+j]=(float)rand() /(float)(RAND_MAX) *4.0;
h_B[i*numBColumns + j] = cos(j);
}
}
//Allocate memory on the device for input and output matrices and record the needed time
float *d_A, *d_B, *d_C;
cudaError_t err = cudaSuccess;
GpuTimer timer;
timer.Start();
//@@Insert Your Code Here to allocate memory for d_A, d_B, d_C
err = cudaMalloc((void **)&d_A, sizeof(float)*numARows*numAColumns);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device memory for vecotr A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_B, sizeof(float)*numBRows*numBColumns);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device memory for vecotr A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_C, sizeof(float)*numCRows*numCColumns);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device memory for vecotr A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
timer.Stop();
printf("Time to allocate memory on the device is: %f msecs.\n", timer.Elapsed());
//Copy the input matrices A and B from the host to the device and record the needed time
GpuTimer timer1;
timer1.Start();
//@@ Insert Your Code Here to copy matrices A and B from Host to Device
cudaMemcpy(d_A, h_A, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice);
timer1.Stop();
printf("Time to copy the Matrix from the host to the device is: %f msecs.\n", timer1.Elapsed());
//Do the Processing on the GPU
//@@ Insert Kernel Execution Configuration Parameters
dim3 dimGrid(ceil(numBColumns / (float)BLOCK_SIZE), ceil(numAColumns / (float)BLOCK_SIZE), 1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
//Invoke the BasicMatrixMultiply kernel and record the needed time for its execution
GpuTimer timer2;
timer2.Start();
//@@ Insert Your Code Here for Kernel Invocation
TiledMatrixMultiplication <<<dimGrid, dimBlock >>> (d_A, d_B, d_C, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
timer2.Stop();
printf("Implemented CUDA code ran in: %f msecs.\n", timer2.Elapsed());
//Copy resulting matrix from device to host and record the needed time
GpuTimer timer3;
timer3.Start();
//@@ Insert Your Code Here to Copy the resulting Matrix d_C from device to the Host h_C
cudaMemcpy(h_C, d_C, sizeof(float)*numCColumns*numCRows, cudaMemcpyDeviceToHost);
timer3.Stop();
printf("Time to copy the resulting Matrix from the device to the host is: %f msecs.\n", timer3.Elapsed());
//Do the Processing on the CPU
clock_t begin = clock();
//@@ Insert Your Code Here to call the CPU function MatrixMultiplyCpu where the resulting matrix is h_C_CPU
MatrixMultiplyCpu(h_A, h_B, h_C_CPU, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000;
printf("Implemented CPU serial code ran in: %f msecs.\n", time_spent);
//Verify Results Computed by GPU and CPU
for (int i = 0; i < numCRows; i++)
for (int j = 0; j < numCColumns; j++)
if (fabs(h_C_CPU[i*numCColumns + j] - h_C[i*numCColumns + j]) > 1e-2)
{
fprintf(stderr, "Result verification failed at element (%d,%d)!\n", i, j);
exit(EXIT_FAILURE);
}
printf("Test PASSED\n");
//Free host memory
free(h_A);
free(h_B);
free(h_C);
free(h_C_CPU);
//Free device memory
//@@ Insert Your Code Here to Free Device Memory
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
return 0;
} |
73718559fdefc38f03aa4d047844e9f058abe02a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,int var_7,int var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30) {
if (comp > -1.0797E35f / +1.6495E36f - ldexpf((var_1 + cosf(+1.7200E-44f)), 2)) {
if (comp > (var_2 + var_3)) {
if (comp > var_4 - var_5 + var_6) {
comp += (var_9 * asinf(atanf(var_10 * -0.0f - (-1.8797E-42f / var_11))));
comp = (+1.6700E36f - -1.0115E-13f * +0.0f / (var_12 / (var_13 + +1.0467E36f)));
if (comp < +1.6480E-43f / (+0.0f * var_14)) {
comp += (var_15 - +1.8852E14f + var_16);
comp = (var_17 - (+0.0f - (var_18 + var_19 * +1.6983E-5f)));
float tmp_1 = (+0.0f - cosf((+1.9261E35f + tanhf(fabsf(var_20 + var_21 * +1.1241E36f)))));
comp = tmp_1 - +0.0f + (var_22 + var_23);
}
for (int i=0; i < var_7; ++i) {
comp = var_24 * (-1.2225E-44f - (-0.0f + (var_25 / +1.4149E-43f)));
}
for (int i=0; i < var_8; ++i) {
comp += var_26 * var_27;
float tmp_2 = +1.0493E-44f;
comp += tmp_2 * var_28 * (-1.1704E-36f + fabsf(var_29 * (-1.0898E10f * (-1.4185E-37f - +1.9937E25f * -1.7456E36f * var_30))));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
int tmp_8 = atoi(argv[8]);
int tmp_9 = atoi(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31);
hipDeviceSynchronize();
return 0;
}
| 73718559fdefc38f03aa4d047844e9f058abe02a.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,int var_7,int var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30) {
if (comp > -1.0797E35f / +1.6495E36f - ldexpf((var_1 + cosf(+1.7200E-44f)), 2)) {
if (comp > (var_2 + var_3)) {
if (comp > var_4 - var_5 + var_6) {
comp += (var_9 * asinf(atanf(var_10 * -0.0f - (-1.8797E-42f / var_11))));
comp = (+1.6700E36f - -1.0115E-13f * +0.0f / (var_12 / (var_13 + +1.0467E36f)));
if (comp < +1.6480E-43f / (+0.0f * var_14)) {
comp += (var_15 - +1.8852E14f + var_16);
comp = (var_17 - (+0.0f - (var_18 + var_19 * +1.6983E-5f)));
float tmp_1 = (+0.0f - cosf((+1.9261E35f + tanhf(fabsf(var_20 + var_21 * +1.1241E36f)))));
comp = tmp_1 - +0.0f + (var_22 + var_23);
}
for (int i=0; i < var_7; ++i) {
comp = var_24 * (-1.2225E-44f - (-0.0f + (var_25 / +1.4149E-43f)));
}
for (int i=0; i < var_8; ++i) {
comp += var_26 * var_27;
float tmp_2 = +1.0493E-44f;
comp += tmp_2 * var_28 * (-1.1704E-36f + fabsf(var_29 * (-1.0898E10f * (-1.4185E-37f - +1.9937E25f * -1.7456E36f * var_30))));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
int tmp_8 = atoi(argv[8]);
int tmp_9 = atoi(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31);
cudaDeviceSynchronize();
return 0;
}
|
8dd4e649e4c8960b2a7399d9f2493a3d5b026e33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define N 512 // Falla a partir de 2048 porque 2048/2 es el nmero mximo de hebras por bloque
// Realiza la suma secuencial de los valores del vector
float sumaSecuencial(float *vector)
{
float fResultado = 0.0;
for (int iPos = 0; iPos < N; iPos++)
fResultado += vector[iPos];
return fResultado;
}
// Kernell CUDA para la suma de los valores del vector
__global__ void sumaParalela(float *vector, int n)
{
__shared__ float vectorComp[N];
vectorComp[threadIdx.x] = vector[threadIdx.x];
if (threadIdx.x + blockDim.x < n)
vectorComp[threadIdx.x + blockDim.x] = vector[threadIdx.x + blockDim.x];
__syncthreads();
for (unsigned int iPos = n >> 1; iPos >= 1; iPos = iPos >> 1)
{
if (threadIdx.x < iPos)
vectorComp[threadIdx.x] += vectorComp[threadIdx.x + iPos];
__syncthreads();
}
if (threadIdx.x == 0){
vector[0] = vectorComp[0];
}
}
int main(void)
{
float host_v[N];
float fResultadoParalelo, fResultadoSecuencial;
float *dev_v;
// Se llena de forma aleatoria el vector sobre el que se realiza la suma
srand((unsigned) time(NULL));
for (int i = 0; i < N; i++)
host_v[i] = floorf(100*(rand()/(float)RAND_MAX));
// Pedir memoria en el Device para el vector a sumar (dev_v)
/* COMPLETAR */
hipMalloc((void **)&dev_v, N*sizeof(float));
// Transferir el vector del Host al Device
/* COMPLETAR */
hipMemcpy(dev_v, host_v, N*sizeof(float), hipMemcpyHostToDevice);
int threads = (N / 2) + N % 2;
// Llamar al kernell CUDA
/* COMPLETAR */
hipLaunchKernelGGL(( sumaParalela), dim3(1), dim3(threads), 0, 0, dev_v, N);
// Copiar el resultado de la operacin del Device al Host
/* COMPLETAR */
hipMemcpy(&fResultadoParalelo, dev_v, sizeof(float), hipMemcpyDeviceToHost);
// Se comprueba que el resultado es correcto y se muestra un mensaje
fResultadoSecuencial = sumaSecuencial(host_v);
if (fResultadoParalelo == fResultadoSecuencial)
printf("Operacion correcta\nDevice = %f\nHost = %f\n", fResultadoParalelo, fResultadoSecuencial);
else
printf("Operacion INCORRECTA\nDevice = %f\nHost = %f\n", fResultadoParalelo, fResultadoSecuencial);
// Librerar la memoria solicitada en el Device
/* COMPLETAR */
hipFree(dev_v);
return 0;
} | 8dd4e649e4c8960b2a7399d9f2493a3d5b026e33.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define N 512 // Falla a partir de 2048 porque 2048/2 es el número máximo de hebras por bloque
// Realiza la suma secuencial de los valores del vector
float sumaSecuencial(float *vector)
{
float fResultado = 0.0;
for (int iPos = 0; iPos < N; iPos++)
fResultado += vector[iPos];
return fResultado;
}
// Kernell CUDA para la suma de los valores del vector
__global__ void sumaParalela(float *vector, int n)
{
__shared__ float vectorComp[N];
vectorComp[threadIdx.x] = vector[threadIdx.x];
if (threadIdx.x + blockDim.x < n)
vectorComp[threadIdx.x + blockDim.x] = vector[threadIdx.x + blockDim.x];
__syncthreads();
for (unsigned int iPos = n >> 1; iPos >= 1; iPos = iPos >> 1)
{
if (threadIdx.x < iPos)
vectorComp[threadIdx.x] += vectorComp[threadIdx.x + iPos];
__syncthreads();
}
if (threadIdx.x == 0){
vector[0] = vectorComp[0];
}
}
int main(void)
{
float host_v[N];
float fResultadoParalelo, fResultadoSecuencial;
float *dev_v;
// Se llena de forma aleatoria el vector sobre el que se realiza la suma
srand((unsigned) time(NULL));
for (int i = 0; i < N; i++)
host_v[i] = floorf(100*(rand()/(float)RAND_MAX));
// Pedir memoria en el Device para el vector a sumar (dev_v)
/* COMPLETAR */
cudaMalloc((void **)&dev_v, N*sizeof(float));
// Transferir el vector del Host al Device
/* COMPLETAR */
cudaMemcpy(dev_v, host_v, N*sizeof(float), cudaMemcpyHostToDevice);
int threads = (N / 2) + N % 2;
// Llamar al kernell CUDA
/* COMPLETAR */
sumaParalela<<< 1, threads>>>(dev_v, N);
// Copiar el resultado de la operación del Device al Host
/* COMPLETAR */
cudaMemcpy(&fResultadoParalelo, dev_v, sizeof(float), cudaMemcpyDeviceToHost);
// Se comprueba que el resultado es correcto y se muestra un mensaje
fResultadoSecuencial = sumaSecuencial(host_v);
if (fResultadoParalelo == fResultadoSecuencial)
printf("Operacion correcta\nDevice = %f\nHost = %f\n", fResultadoParalelo, fResultadoSecuencial);
else
printf("Operacion INCORRECTA\nDevice = %f\nHost = %f\n", fResultadoParalelo, fResultadoSecuencial);
// Librerar la memoria solicitada en el Device
/* COMPLETAR */
cudaFree(dev_v);
return 0;
} |
210e55d045debb11cbc8239f705a828e4ae405eb.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2018 XIAOLIN WANG (xiaolin.wang@nict.go.jp; arthur.xlw@google.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "basicHeads.h"
#include "Global.h"
namespace cytonLib
{
cudnnDataType_t cudnnDataType =CUDNN_DATA_FLOAT;
// Define some error checking macros.
hipError_t checkError_(hipError_t stat, const char *file, int line)
{
if (stat != hipSuccess)
{
string tErr=hipGetErrorString(stat);
if(tErr!="driver shutting down")
{
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
assert(false);
exit(1);
}
else
{
}
}
return stat;
}
cudnnStatus_t checkError_(cudnnStatus_t stat, const char *file, int line)
{
if (stat != CUDNN_STATUS_SUCCESS)
{
fprintf(stderr, "cuDNN Error: %s %s %d\n", cudnnGetErrorString(stat), file, line);
assert(false);
exit(1);
}
return stat;
}
hipblasStatus_t checkError_(hipblasStatus_t stat, const char *file, int line)
{
if (stat != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "cublas Error: %d %s %d\n", stat, file, line);
assert(false);
exit(1);
}
return stat;
}
hiprandStatus_t checkError_(hiprandStatus_t stat, const char *file, int line)
{
if (stat != HIPRAND_STATUS_SUCCESS)
{
fprintf(stderr, "hiprand Error: %s %d\n", file, line);
assert(false);
exit(1);
}
return stat;
}
static const char *_cusolverGetErrorEnum(cusolverStatus_t error)
{
switch (error)
{
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
cusolverStatus_t checkError_(cusolverStatus_t stat, const char *file, int line)
{
if(CUSOLVER_STATUS_SUCCESS != stat) {
fprintf(stderr, "cusolver error: %s %d, error %d %s\n", file, line,
stat, _cusolverGetErrorEnum(stat));
assert(0);
}
return stat;
}
void checkFile(ifstream& f, const string t)
{
string line;
while(getline(f, line))
{
if(!line.empty())
{
break;
}
}
bool right=line==t;
assert(right);
}
}
| 210e55d045debb11cbc8239f705a828e4ae405eb.cu | /*
Copyright 2018 XIAOLIN WANG (xiaolin.wang@nict.go.jp; arthur.xlw@google.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "basicHeads.h"
#include "Global.h"
namespace cytonLib
{
cudnnDataType_t cudnnDataType =CUDNN_DATA_FLOAT;
// Define some error checking macros.
cudaError_t checkError_(cudaError_t stat, const char *file, int line)
{
if (stat != cudaSuccess)
{
string tErr=cudaGetErrorString(stat);
if(tErr!="driver shutting down")
{
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
assert(false);
exit(1);
}
else
{
}
}
return stat;
}
cudnnStatus_t checkError_(cudnnStatus_t stat, const char *file, int line)
{
if (stat != CUDNN_STATUS_SUCCESS)
{
fprintf(stderr, "cuDNN Error: %s %s %d\n", cudnnGetErrorString(stat), file, line);
assert(false);
exit(1);
}
return stat;
}
cublasStatus_t checkError_(cublasStatus_t stat, const char *file, int line)
{
if (stat != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "cublas Error: %d %s %d\n", stat, file, line);
assert(false);
exit(1);
}
return stat;
}
curandStatus_t checkError_(curandStatus_t stat, const char *file, int line)
{
if (stat != CURAND_STATUS_SUCCESS)
{
fprintf(stderr, "curand Error: %s %d\n", file, line);
assert(false);
exit(1);
}
return stat;
}
static const char *_cusolverGetErrorEnum(cusolverStatus_t error)
{
switch (error)
{
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
cusolverStatus_t checkError_(cusolverStatus_t stat, const char *file, int line)
{
if(CUSOLVER_STATUS_SUCCESS != stat) {
fprintf(stderr, "cusolver error: %s %d, error %d %s\n", file, line,
stat, _cusolverGetErrorEnum(stat));
assert(0);
}
return stat;
}
void checkFile(ifstream& f, const string t)
{
string line;
while(getline(f, line))
{
if(!line.empty())
{
break;
}
}
bool right=line==t;
assert(right);
}
}
|
1eb3baf66a31bc7bdfe0de94f16c8ced4bf3141e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <rocblas.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = hipblasGetError();
if (status != HIPBLAS_STATUS_SUCCESS)
printf("cublas error: %d\n", status) ;
return status != HIPBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
hipError_t err = hipGetLastError();
if (hipSuccess != err)
printf("%s\n", hipGetErrorString( err));
return hipSuccess != err;
}
extern const char* get_last_cuda_error() {
hipError_t err = hipGetLastError();
return hipGetErrorString( err);
}
extern int cublas_init() {
hipblasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int cublas_shutdown() {
hipblasShutdown();
hipDeviceReset();
return 0;
}
extern int cuda_set_device(int deviceId) {
hipSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
for (int i = 0; i < NUM_RND_STREAMS; i++) {
fscanf (pFile, "%u", &host_mults[i]);
}
fclose (pFile);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
hipblasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//hipMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//hipMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//hipMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kSeedRandom), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, seed);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
extern int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
extern int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
extern void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
extern void cuda_sync_threads() {
hipDeviceSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
extern int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = hipblasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
hipblasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
extern int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
hipblasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipblasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kGetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kSetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kTranspose), dim3(grid), dim3(threads) , 0, 0, target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = hipblasFree(mat->data_device);
mat->on_device = 0;
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
extern int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
extern void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
extern int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomUniform), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddColMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThan), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThan), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int equals(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kEquals), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int equals_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kEqualsScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMaxColumnwise), dim3(w),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSign), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplySigmoid), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyTanh), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyAbs), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyLog1PlusExp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLog), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSqrt), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPow), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPowMatrix), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kReciprocal), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
printf("WTF HAPPENED!! ");
printf("mat1 dimensions: (%d x %d): \n", get_leading_dimension(mat1), get_nonleading_dimension(mat1));
printf("mat2 dimensions: (%d x %d): \n", get_leading_dimension(mat2), get_nonleading_dimension(mat2));
printf("target dimensions: (%d x %d): \n", get_leading_dimension(target), get_nonleading_dimension(target));
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
hipblasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
if (SYNC_THREADS)
hipDeviceSynchronize();
return 0;
}
extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = hipblasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipblasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat1 == target) {
hipblasSaxpy(len, 1, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
hipLaunchKernelGGL(( kAdd), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSubtract), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivide), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kAssignScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat == target) {
hipblasSscal(len, alpha, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
hipLaunchKernelGGL(( kMultScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivideScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = hipblasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSelectRows), dim3(gridDim), dim3(blockDim), 0, 0, source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSetSelectedRows), dim3(gridDim), dim3(blockDim), 0, 0, target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
}
| 1eb3baf66a31bc7bdfe0de94f16c8ced4bf3141e.cu | #include <stdio.h>
#include <stdlib.h>
#include <cublas.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = cublasGetError();
if (status != CUBLAS_STATUS_SUCCESS)
printf("cublas error: %d\n", status) ;
return status != CUBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
printf("%s\n", cudaGetErrorString( err));
return cudaSuccess != err;
}
extern const char* get_last_cuda_error() {
cudaError_t err = cudaGetLastError();
return cudaGetErrorString( err);
}
extern int cublas_init() {
cublasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int cublas_shutdown() {
cublasShutdown();
cudaThreadExit();
return 0;
}
extern int cuda_set_device(int deviceId) {
cudaSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
for (int i = 0; i < NUM_RND_STREAMS; i++) {
fscanf (pFile, "%u", &host_mults[i]);
}
fclose (pFile);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//cudaMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//cudaMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//cudaMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, seed);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
extern int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
extern int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
extern void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
extern void cuda_sync_threads() {
cudaThreadSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
extern int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = cublasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
extern int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kGetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kSetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kTranspose<<< grid, threads >>>(target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = cublasFree(mat->data_device);
mat->on_device = 0;
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
extern int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
extern void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
extern int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int equals(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kEquals<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int equals_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kEqualsScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMaxColumnwise<<<w,32>>>(mat->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySigmoid<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyTanh<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyAbs<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyLog1PlusExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLog<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSqrt<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPow<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPowMatrix<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kReciprocal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
printf("WTF HAPPENED!! ");
printf("mat1 dimensions: (%d x %d): \n", get_leading_dimension(mat1), get_nonleading_dimension(mat1));
printf("mat2 dimensions: (%d x %d): \n", get_leading_dimension(mat2), get_nonleading_dimension(mat2));
printf("target dimensions: (%d x %d): \n", get_leading_dimension(target), get_nonleading_dimension(target));
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
cublasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
if (SYNC_THREADS)
cudaThreadSynchronize();
return 0;
}
extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = cublasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat1 == target) {
cublasSaxpy(len, 1, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
kAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSubtract<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivide<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kAssignScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat == target) {
cublasSscal(len, alpha, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
kMultScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivideScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = cublasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
kSelectRows<<<gridDim, blockDim>>>(source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
kSetSelectedRows<<<gridDim, blockDim>>>(target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
}
|
691e23f20bef143e26b748390487de3a970e28d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if __CUDACC_VER_MAJOR__ >= 8
#include "scope/scope.hpp"
#include "args.hpp"
#define NAME "Comm_UM_Demand_GPUToHost"
static void cpu_write(char *ptr, const size_t n, const size_t stride) {
for (size_t i = 0; i < n; i += stride) {
benchmark::DoNotOptimize(ptr[i] = 0);
}
}
template <bool NOOP = false>
__global__ void gpu_write(char *ptr, const size_t count, const size_t stride) {
if (NOOP) {
return;
}
// global ID
const size_t gx = blockIdx.x * blockDim.x + threadIdx.x;
// lane ID 0-31
const size_t lx = gx & 31;
// warp ID
size_t wx = gx / 32;
const size_t numWarps = (gridDim.x * blockDim.x + 32 - 1) / 32;
if (0 == lx) {
for (size_t i = wx * stride; i < count; i += numWarps * stride) {
ptr[i] = 0;
}
}
}
auto Comm_UM_Demand_GPUToHost = [](benchmark::State &state, const int numa_id,
const int cuda_id) {
const size_t pageSize = page_size();
const auto bytes = 1ULL << static_cast<size_t>(state.range(0));
numa::ScopedBind binder(numa_id);
if (PRINT_IF_ERROR(scope::cuda_reset_device(cuda_id))) {
state.SkipWithError(NAME " failed to reset device");
return;
}
if (PRINT_IF_ERROR(hipSetDevice(cuda_id))) {
state.SkipWithError(NAME " failed to set CUDA device");
return;
}
char *ptr = nullptr;
if (PRINT_IF_ERROR(hipMallocManaged(&ptr, bytes))) {
state.SkipWithError(NAME " failed to perform hipMallocManaged");
return;
}
defer(hipFree(ptr));
if (PRINT_IF_ERROR(hipMemset(ptr, 0, bytes))) {
state.SkipWithError(NAME " failed to perform hipMemset");
return;
}
for (auto _ : state) {
state.PauseTiming();
hipError_t err = hipMemPrefetchAsync(ptr, bytes, cuda_id);
if (hipErrorInvalidDevice == err) {
hipLaunchKernelGGL(( gpu_write), dim3(256), dim3(256), 0, 0, ptr, bytes, pageSize);
}
if (PRINT_IF_ERROR(hipDeviceSynchronize())) {
state.SkipWithError(NAME " failed to synchronize");
return;
}
state.ResumeTiming();
cpu_write(ptr, bytes, pageSize);
}
state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytes));
state.counters["bytes"] = bytes;
state.counters["cuda_id"] = cuda_id;
state.counters["numa_id"] = numa_id;
};
static void registerer() {
for (int cuda_id : scope::system::cuda_devices()) {
for (auto numa_id : numa::mems()) {
std::string name = std::string(NAME) + "/" + std::to_string(numa_id) +
"/" + std::to_string(cuda_id);
benchmark::RegisterBenchmark(name.c_str(), Comm_UM_Demand_GPUToHost,
numa_id, cuda_id)
->SMALL_ARGS();
}
}
}
SCOPE_AFTER_INIT(registerer, NAME);
#endif // __CUDACC_VER_MAJOR__ >= 8
| 691e23f20bef143e26b748390487de3a970e28d1.cu | #if __CUDACC_VER_MAJOR__ >= 8
#include "scope/scope.hpp"
#include "args.hpp"
#define NAME "Comm_UM_Demand_GPUToHost"
static void cpu_write(char *ptr, const size_t n, const size_t stride) {
for (size_t i = 0; i < n; i += stride) {
benchmark::DoNotOptimize(ptr[i] = 0);
}
}
template <bool NOOP = false>
__global__ void gpu_write(char *ptr, const size_t count, const size_t stride) {
if (NOOP) {
return;
}
// global ID
const size_t gx = blockIdx.x * blockDim.x + threadIdx.x;
// lane ID 0-31
const size_t lx = gx & 31;
// warp ID
size_t wx = gx / 32;
const size_t numWarps = (gridDim.x * blockDim.x + 32 - 1) / 32;
if (0 == lx) {
for (size_t i = wx * stride; i < count; i += numWarps * stride) {
ptr[i] = 0;
}
}
}
auto Comm_UM_Demand_GPUToHost = [](benchmark::State &state, const int numa_id,
const int cuda_id) {
const size_t pageSize = page_size();
const auto bytes = 1ULL << static_cast<size_t>(state.range(0));
numa::ScopedBind binder(numa_id);
if (PRINT_IF_ERROR(scope::cuda_reset_device(cuda_id))) {
state.SkipWithError(NAME " failed to reset device");
return;
}
if (PRINT_IF_ERROR(cudaSetDevice(cuda_id))) {
state.SkipWithError(NAME " failed to set CUDA device");
return;
}
char *ptr = nullptr;
if (PRINT_IF_ERROR(cudaMallocManaged(&ptr, bytes))) {
state.SkipWithError(NAME " failed to perform cudaMallocManaged");
return;
}
defer(cudaFree(ptr));
if (PRINT_IF_ERROR(cudaMemset(ptr, 0, bytes))) {
state.SkipWithError(NAME " failed to perform cudaMemset");
return;
}
for (auto _ : state) {
state.PauseTiming();
cudaError_t err = cudaMemPrefetchAsync(ptr, bytes, cuda_id);
if (cudaErrorInvalidDevice == err) {
gpu_write<<<256, 256>>>(ptr, bytes, pageSize);
}
if (PRINT_IF_ERROR(cudaDeviceSynchronize())) {
state.SkipWithError(NAME " failed to synchronize");
return;
}
state.ResumeTiming();
cpu_write(ptr, bytes, pageSize);
}
state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytes));
state.counters["bytes"] = bytes;
state.counters["cuda_id"] = cuda_id;
state.counters["numa_id"] = numa_id;
};
static void registerer() {
for (int cuda_id : scope::system::cuda_devices()) {
for (auto numa_id : numa::mems()) {
std::string name = std::string(NAME) + "/" + std::to_string(numa_id) +
"/" + std::to_string(cuda_id);
benchmark::RegisterBenchmark(name.c_str(), Comm_UM_Demand_GPUToHost,
numa_id, cuda_id)
->SMALL_ARGS();
}
}
}
SCOPE_AFTER_INIT(registerer, NAME);
#endif // __CUDACC_VER_MAJOR__ >= 8
|
f370217daabfd42f31cb8c9289d0a6257964a99a.hip | // !!! This is a file automatically generated by hipify!!!
//nvcc SeayJohnnyHW5.cu -o SeayJohnnyHW5 -lglut -lGL -lm
#include <GL/glut.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include "../../headers/arrays.h"
#include "../../headers/drawing.h"
#include "../../headers/density.cuh"
#define DIM 1024
#define NODES 20
float2 *nodes = (float2*)malloc((NODES)*sizeof(float2));
__constant__ float2 c_nodes[NODES];
float *pixels;
float *buffer;
int trigger = 1;
float rnd(float x)
{
return(x*rand() / RAND_MAX);
}
void display()
{
glClear(GL_COLOR_BUFFER_BIT);
int b = 32;
drawDensity(nodes, NODES, b, 1.0);
drawPoints(nodes, NODES, 5.0, NULL);
drawGrid(2.0/b, 2.0/b, 1.0);
float circleColor[] = {0.88, 0.61, 0.0};
drawCircle(make_float2(0.0, 0.0), 0.5, 100, 2.5, circleColor);
glFlush();
}
int main(int argc, char** argv)
{
srand( time(NULL) );
for(int i = 0; i < NODES; i++)
{
nodes[i].x = rnd(2.0) - 1.0;
nodes[i].y = rnd(2.0) - 1.0;
}
hipMemcpyToSymbol(c_nodes, nodes, sizeof(float2)*NODES);
pixels = (float*)malloc(DIM*DIM*3*sizeof(float));
hipMalloc(&buffer, DIM*DIM*3*sizeof(float));
// Initialize OpenGL
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE | GLUT_MULTISAMPLE);
glutInitWindowSize(DIM, DIM);
glutCreateWindow("GPU | Time to render:\t---");
glutDisplayFunc(display);
glClearColor(0.0, 0.0, 0.0, 0.1);
glEnable(GL_MULTISAMPLE_ARB);
glEnable(GL_POINT_SMOOTH);
glHint(GL_POINT_SMOOTH_HINT, GL_NICEST);
glEnable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
glutMainLoop();
return(0);
} | f370217daabfd42f31cb8c9289d0a6257964a99a.cu | //nvcc SeayJohnnyHW5.cu -o SeayJohnnyHW5 -lglut -lGL -lm
#include <GL/glut.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include "../../headers/arrays.h"
#include "../../headers/drawing.h"
#include "../../headers/density.cuh"
#define DIM 1024
#define NODES 20
float2 *nodes = (float2*)malloc((NODES)*sizeof(float2));
__constant__ float2 c_nodes[NODES];
float *pixels;
float *buffer;
int trigger = 1;
float rnd(float x)
{
return(x*rand() / RAND_MAX);
}
void display()
{
glClear(GL_COLOR_BUFFER_BIT);
int b = 32;
drawDensity(nodes, NODES, b, 1.0);
drawPoints(nodes, NODES, 5.0, NULL);
drawGrid(2.0/b, 2.0/b, 1.0);
float circleColor[] = {0.88, 0.61, 0.0};
drawCircle(make_float2(0.0, 0.0), 0.5, 100, 2.5, circleColor);
glFlush();
}
int main(int argc, char** argv)
{
srand( time(NULL) );
for(int i = 0; i < NODES; i++)
{
nodes[i].x = rnd(2.0) - 1.0;
nodes[i].y = rnd(2.0) - 1.0;
}
cudaMemcpyToSymbol(c_nodes, nodes, sizeof(float2)*NODES);
pixels = (float*)malloc(DIM*DIM*3*sizeof(float));
cudaMalloc(&buffer, DIM*DIM*3*sizeof(float));
// Initialize OpenGL
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE | GLUT_MULTISAMPLE);
glutInitWindowSize(DIM, DIM);
glutCreateWindow("GPU | Time to render:\t---");
glutDisplayFunc(display);
glClearColor(0.0, 0.0, 0.0, 0.1);
glEnable(GL_MULTISAMPLE_ARB);
glEnable(GL_POINT_SMOOTH);
glHint(GL_POINT_SMOOTH_HINT, GL_NICEST);
glEnable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
glutMainLoop();
return(0);
} |
a1f05f8325bbd34d5a308afef001d82ba2a483b1.hip | // !!! This is a file automatically generated by hipify!!!
//CUDA
//PCCUDA
#include <stdio.h>
#include <memory>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
extern "C" void DeviceInfo(void)
{
int deviceCount;
hipError_t error_id = hipGetDeviceCount(&deviceCount); //CUDA
if (error_id != hipSuccess)
{
printf("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("There are no available device(s) that support CUDA\n");
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %u bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n",
8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %u bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %u bytes\n",
deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n",
deviceProp.deviceOverlap ? "Yes" : "No");
#endif
}
printf("\nTest PASSED\n");
}
| a1f05f8325bbd34d5a308afef001d82ba2a483b1.cu | //采用新版本的CUDA函数编写
//此函数是测试运行的PC机是否具有CUDA编程的能力
#include <stdio.h>
#include <memory>
#include <cuda_runtime.h>
#include <helper_cuda.h>
extern "C" void DeviceInfo(void)
{
int deviceCount;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount); //CUDA
if (error_id != cudaSuccess)
{
printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("There are no available device(s) that support CUDA\n");
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %u bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n",
8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %u bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %u bytes\n",
deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n",
deviceProp.deviceOverlap ? "Yes" : "No");
#endif
}
printf("\nTest PASSED\n");
}
|
573cee5ae8d5e4b3e723dc01cfa327d448218186.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <string>
#include "hipcub/hipcub.hpp"
#include "paddle/fluid/operators/math.h"
#include "paddle/fluid/operators/nll_loss_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/hostdevice.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static const int NTHREADS = 32;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename T>
__global__ void GPUNLLLossForward1D_no_reduce(T* out_data, const T* x_data,
const int64_t* label_data,
const T* weight_data,
const int64_t batch_size,
const int64_t n_classes,
const int64_t ignore_index) {
CUDA_1D_KERNEL_LOOP(i, batch_size) {
const int64_t cur_label = label_data[i];
if (cur_label == ignore_index) {
out_data[i] = 0;
continue;
}
const T cur_weight = weight_data ? weight_data[cur_label] : (T)1;
out_data[i] = -x_data[i * n_classes + cur_label] * cur_weight;
}
}
template <typename T>
__global__ void GPUNLLLossForward1D_with_reduce(
T* out_data, T* total_weight_data, const T* x_data,
const int64_t* label_data, const T* weight_data, const int64_t batch_size,
const int64_t n_classes, const int64_t size_average,
const int64_t ignore_index) {
__shared__ T sharedInputs[NTHREADS], sharedWeights[NTHREADS];
sharedInputs[threadIdx.x] = 0;
sharedWeights[threadIdx.x] = 0;
int i;
for (i = threadIdx.x; i < batch_size; i += NTHREADS) {
const auto cur_label = label_data[i];
if (cur_label != ignore_index) {
const auto cur_weight = weight_data ? weight_data[cur_label] : (T)1;
sharedInputs[threadIdx.x] -=
x_data[i * n_classes + cur_label] * cur_weight;
sharedWeights[threadIdx.x] += cur_weight;
}
}
__syncthreads();
if (threadIdx.x == 0) {
*out_data = *total_weight_data = 0;
T output_val = 0;
T total_weight_val = 0;
for (i = 0; i < NTHREADS; ++i) {
output_val += sharedInputs[i];
total_weight_val += sharedWeights[i];
}
*total_weight_data = total_weight_val;
*out_data = output_val;
if (size_average && *total_weight_data != 0) {
*out_data = output_val / total_weight_val;
}
}
}
// Reduce N values concurrently, i.e. suppose N = 2, and there are 4 threads:
// (1, 2), (3, 4), (5, 6), (7, 8), then the return in threadVals for thread 0
// is (1 + 3 + 5 + 7, 2 + 4 + 6 + 8) = (16, 20)
//
// If smem is not used again, there is no need to __syncthreads before this
// call. However, if smem will be used, e.g., this function is called in a loop,
// then __syncthreads is needed either before or afterwards to prevent non-0
// threads overriding smem in the next loop before num-0 thread reads from it.
template <typename T, typename ReduceOp, int N>
__device__ void reduceNValuesInBlock(T* smem, T threadVals[N],
const unsigned int numVals,
ReduceOp reduceOp, T init) {
if (numVals == 0) {
#pragma unroll
for (int i = 0; i < N; ++i) {
threadVals[i] = init;
}
return;
}
// We store each of the N values contiguously, so if N = 2, all values for
// the first threadVal for each thread in the block are stored followed by
// all of the values for the second threadVal for each thread in the block
if (threadIdx.x < numVals) {
#pragma unroll
for (int i = 0; i < N; ++i) {
smem[i * numVals + threadIdx.x] = threadVals[i];
}
}
__syncthreads();
// Number of lanes in the final reduction --> this is used to determine
// where to put the outputs of each of the n things we are reducing. If
// nLP = 32, then we have the 32 outputs for the first threadVal,
// followed by the 32 outputs for the second threadVal, etc.
const unsigned int numLanesParticipating = min(numVals, warpSize);
if (numVals > warpSize && ((threadIdx.x / warpSize) == 0)) {
#pragma unroll
for (int i = 0; i < N; ++i) {
threadVals[i] = threadIdx.x < numVals ? threadVals[i] : init;
}
for (int i = warpSize + threadIdx.x; i < numVals; i += warpSize) {
#pragma unroll
for (int j = 0; j < N; ++j) {
threadVals[j] = reduceOp(threadVals[j], smem[j * numVals + i]);
}
}
#pragma unroll
for (int i = 0; i < N; ++i) {
smem[i * numLanesParticipating + threadIdx.x] = threadVals[i];
}
}
__syncthreads();
if (threadIdx.x == 0) {
if (numLanesParticipating == 32) {
#pragma unroll
for (int i = 0; i < N; ++i) {
#pragma unroll
for (int j = 1; j < 32; ++j) {
threadVals[i] = reduceOp(threadVals[i], smem[i * 32 + j]);
}
}
} else {
#pragma unroll
for (int i = 0; i < N; ++i) {
for (int j = 1; j < numLanesParticipating; ++j) {
threadVals[i] = reduceOp(threadVals[i], smem[i * numVals + j]);
}
}
}
}
}
// Block-wide reduction in shared memory helper; only threadIdx.x == 0 will
// return the reduced value
//
// If smem is not used again, there is no need to __syncthreads before this
// call. However, if smem will be used, e.g., this function is called in a loop,
// then __syncthreads is needed either before or afterwards to prevent non-0
// threads overriding smem in the next loop before num-0 thread reads from it.
template <typename T, typename ReduceOp>
__device__ T reduceBlock(T* smem, const unsigned int numVals, T threadVal,
ReduceOp reduceOp, T init) {
reduceNValuesInBlock<T, ReduceOp, 1>(smem, &threadVal, numVals, reduceOp,
init);
return threadVal;
}
template <typename T>
__global__ void GPUNLLLossForward2D_no_reduce(
T* out_data, const T* x_data, const int64_t* label_data,
const T* weight_data, const int64_t batch_size, const int64_t n_classes,
const int64_t in_dim2, const int64_t in_dim3, const int64_t ignore_index) {
const int64_t map_size = in_dim2 * in_dim3;
const int64_t sample_size = n_classes * map_size;
const int64_t out_numel = batch_size * map_size;
CUDA_1D_KERNEL_LOOP(i, out_numel) {
const int64_t b = i % batch_size;
const int64_t h = (i / batch_size) % in_dim2;
const int64_t w = (i / (batch_size * in_dim2)) % in_dim3;
const int64_t index = b * map_size + h * in_dim3 + w;
const int64_t cur_label = label_data[index];
if (cur_label == ignore_index) {
out_data[index] = 0;
continue;
}
const T cur_weight = weight_data ? weight_data[cur_label] : (T)1;
out_data[index] =
-x_data[b * sample_size + cur_label * map_size + h * in_dim3 + w] *
cur_weight;
}
}
template <typename T>
__global__ void GPUNLLLossForward2D_with_reduce(
T* out_data, T* total_weight_data, const T* x_data,
const int64_t* label_data, const T* weight_data, const int64_t batch_size,
const int64_t n_classes, const int64_t map_nelem,
const int64_t blocks_per_sample, const int64_t ignore_index) {
__shared__ T partial_sums[kNumCUDAThreads];
int64_t i;
T input_sum = 0;
T acc_weight = 0;
*out_data = 0;
*total_weight_data = 0;
int64_t sample = blockIdx.x / blocks_per_sample;
int64_t toffset = sample * map_nelem;
int64_t ioffset = sample * map_nelem * n_classes;
int64_t step = blockDim.x * blocks_per_sample;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem; i += step) {
const int64_t cur_label = label_data[toffset + i];
if (cur_label != ignore_index) {
const T cur_weight = weight_data ? weight_data[cur_label] : (T)1;
input_sum -= x_data[ioffset + i + map_nelem * cur_label] * cur_weight;
acc_weight += cur_weight;
}
}
input_sum =
reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<T>(), (T)0);
__syncthreads();
acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight,
thrust::plus<T>(), (T)0);
if (threadIdx.x == 0) {
paddle::platform::CudaAtomicAdd(total_weight_data, acc_weight);
paddle::platform::CudaAtomicAdd(out_data, input_sum);
}
}
template <typename T>
__global__ void GPUNLLLossForward2D_size_average(T* out_data,
T* total_weight_data) {
if (*total_weight_data != 0) {
*out_data /= *total_weight_data;
}
}
template <typename T>
__global__ void GPUNLLLossBackward1D_no_reduce(
T* dx_data, const int64_t* label_data, const T* weight_data,
const T* dout_data, const int64_t batch_size, const int64_t n_classes,
const int64_t ignore_index) {
CUDA_1D_KERNEL_LOOP(i, batch_size) {
const int64_t cur_label = label_data[i];
if (cur_label == ignore_index) {
continue;
}
const T cur_weight = weight_data ? weight_data[cur_label] : (T)1;
dx_data[i * n_classes + cur_label] = -dout_data[i] * cur_weight;
}
}
template <typename T>
__global__ void GPUNLLLossBackward1D_with_reduce(
T* dx_data, const T* total_weight_data, const int64_t* label_data,
const T* weight_data, const T* dout_data, const int64_t batch_size,
const int64_t n_classes, const int64_t size_average,
const int64_t ignore_index) {
if (*total_weight_data <= 0) {
return;
}
int i;
const T norm = size_average ? (T)(1 / *total_weight_data) : (T)1;
for (i = threadIdx.x; i < batch_size; i += NTHREADS) {
const int64_t cur_label = label_data[i];
if (cur_label != ignore_index) {
const T cur_weight = weight_data ? weight_data[cur_label] : (T)1;
dx_data[i * n_classes + cur_label] = -cur_weight * dout_data[0] * norm;
}
}
}
template <typename T>
__global__ void GPUNLLLossBackward2D_no_reduce(
T* dx_data, const int64_t* label_data, const T* weight_data,
const T* dout_data, const int64_t batch_size, const int64_t n_classes,
const int64_t in_dim2, const int64_t in_dim3, const int64_t ignore_index) {
const int64_t map_size = in_dim2 * in_dim3;
const int64_t sample_size = n_classes * map_size;
const int64_t out_numel = batch_size * map_size;
CUDA_1D_KERNEL_LOOP(i, out_numel) {
const int64_t b = i % batch_size;
const int64_t h = (i / batch_size) % in_dim2;
const int64_t w = (i / (batch_size * in_dim2)) % in_dim3;
const int64_t index = b * map_size + h * in_dim3 + w;
const int64_t cur_label = label_data[index];
if (cur_label == ignore_index) {
continue;
}
const T cur_weight = weight_data ? weight_data[cur_label] : (T)1;
dx_data[b * sample_size + cur_label * map_size + h * in_dim3 + w] =
-dout_data[index] * cur_weight;
}
}
template <typename T>
__global__ void GPUNLLLossBackward2D_with_reduce(
T* dx_data, const T* total_weight_data, const int64_t* label_data,
const T* weight_data, const T* dout_data, const int64_t batch_size,
const int64_t n_classes, const int64_t map_nelem,
const int64_t blocks_per_sample, const int64_t size_average,
const int64_t ignore_index) {
if (*total_weight_data <= 0) {
return;
}
int64_t i;
const T norm = size_average ? (T)(1 / *total_weight_data) : (T)1;
int sample = blockIdx.x / blocks_per_sample;
int step = blockDim.x * blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem; i += step) {
const int64_t cur_label = label_data[toffset + i];
if (cur_label != ignore_index) {
dx_data[ioffset + i + map_nelem * cur_label] =
-(weight_data ? weight_data[cur_label] : (T)1) * norm * dout_data[0];
}
}
}
template <typename DeviceContext, typename T>
class NLLLossCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* labels = ctx.Input<Tensor>("Label");
auto* weight = ctx.Input<Tensor>("Weight");
auto* out = ctx.Output<Tensor>("Out");
auto* total_weight = ctx.Output<Tensor>("Total_weight");
auto ignore_index = ctx.Attr<int64_t>("ignore_index");
auto reduction = ctx.Attr<std::string>("reduction");
auto x_data = x->data<T>();
auto out_data = out->mutable_data<T>(ctx.GetPlace());
auto total_weight_data = total_weight->mutable_data<T>(ctx.GetPlace());
auto label_data = labels->data<int64_t>();
auto weight_data = weight ? weight->data<T>() : nullptr;
hipMemset(total_weight_data, 0, sizeof(T));
auto x_dims = x->dims();
auto batch_size = x_dims[0];
auto n_classes = x_dims[1];
int64_t size_average = (int64_t)(reduction == "mean");
if (x_dims.size() == 2) {
int blocks = NumBlocks(batch_size);
int threads = kNumCUDAThreads;
auto& dev_ctx = ctx.cuda_device_context();
if (reduction == "none") {
hipLaunchKernelGGL(( GPUNLLLossForward1D_no_reduce<
T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
out_data, x_data, label_data, weight_data, batch_size, n_classes,
ignore_index);
} else {
hipLaunchKernelGGL(( GPUNLLLossForward1D_with_reduce<
T>), dim3(1), dim3(NTHREADS), 0, dev_ctx.stream(),
out_data, total_weight_data, x_data, label_data, weight_data,
batch_size, n_classes, size_average, ignore_index);
}
} else if (x_dims.size() == 4) {
const auto in_dim2 = x_dims[2];
const auto in_dim3 = x_dims[3];
const auto map_size = in_dim2 * in_dim3;
const auto out_numel = batch_size * in_dim2 * in_dim3;
int blocks = NumBlocks(out_numel);
int threads = kNumCUDAThreads;
auto& dev_ctx = ctx.cuda_device_context();
if (reduction == "none") {
hipLaunchKernelGGL(( GPUNLLLossForward2D_no_reduce<
T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
out_data, x_data, label_data, weight_data, batch_size, n_classes,
in_dim2, in_dim3, ignore_index);
} else {
int blocks_per_sample = NumBlocks(map_size) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
hipLaunchKernelGGL(( GPUNLLLossForward2D_with_reduce<
T>), dim3(total_blocks), dim3(threads), 0, dev_ctx.stream(),
out_data, total_weight_data, x_data, label_data, weight_data,
batch_size, n_classes, map_size, blocks_per_sample, ignore_index);
if (size_average) {
hipLaunchKernelGGL(( GPUNLLLossForward2D_size_average<T>), dim3(1), dim3(1), 0, dev_ctx.stream(),
out_data, total_weight_data);
}
}
}
}
};
template <typename DeviceContext, typename T>
class NLLLossGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* labels = ctx.Input<Tensor>("Label");
auto* weight = ctx.Input<Tensor>("Weight");
auto* total_weight = ctx.Input<Tensor>("Total_weight");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto dx_data = dx->mutable_data<T>(ctx.GetPlace());
auto dout_data = dout->data<T>();
auto label_data = labels->data<int64_t>();
auto weight_data = weight ? weight->data<T>() : nullptr;
auto total_weight_data = total_weight->data<T>();
auto ignore_index = ctx.Attr<int64_t>("ignore_index");
auto reduction = ctx.Attr<std::string>("reduction");
hipMemset(dx_data, 0, dx->numel() * sizeof(T));
int64_t size_average = (int64_t)(reduction == "mean");
auto x_dims = x->dims();
auto batch_size = x_dims[0];
auto n_classes = x_dims[1];
if (x_dims.size() == 2) {
int blocks = NumBlocks(batch_size);
int threads = kNumCUDAThreads;
auto& dev_ctx = ctx.cuda_device_context();
if (reduction == "none") {
hipLaunchKernelGGL(( GPUNLLLossBackward1D_no_reduce<
T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
dx_data, label_data, weight_data, dout_data, batch_size, n_classes,
ignore_index);
} else {
hipLaunchKernelGGL(( GPUNLLLossBackward1D_with_reduce<
T>), dim3(1), dim3(NTHREADS), 0, dev_ctx.stream(),
dx_data, total_weight_data, label_data, weight_data, dout_data,
batch_size, n_classes, size_average, ignore_index);
}
} else if (x_dims.size() == 4) {
const auto in_dim2 = x_dims[2];
const auto in_dim3 = x_dims[3];
const auto map_size = in_dim2 * in_dim3;
const auto out_numel = batch_size * in_dim2 * in_dim3;
int blocks = NumBlocks(out_numel);
int threads = kNumCUDAThreads;
auto& dev_ctx = ctx.cuda_device_context();
if (reduction == "none") {
hipLaunchKernelGGL(( GPUNLLLossBackward2D_no_reduce<
T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
dx_data, label_data, weight_data, dout_data, batch_size, n_classes,
in_dim2, in_dim3, ignore_index);
} else {
int blocks_per_sample = NumBlocks(map_size) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
hipLaunchKernelGGL(( GPUNLLLossBackward2D_with_reduce<
T>), dim3(total_blocks), dim3(threads), 0, dev_ctx.stream(),
dx_data, total_weight_data, label_data, weight_data, dout_data,
batch_size, n_classes, map_size, blocks_per_sample, size_average,
ignore_index);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
nll_loss,
ops::NLLLossCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::NLLLossCUDAKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
nll_loss_grad,
ops::NLLLossGradCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::NLLLossGradCUDAKernel<paddle::platform::CUDADeviceContext, double>);
| 573cee5ae8d5e4b3e723dc01cfa327d448218186.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <string>
#include "cub/cub.cuh"
#include "paddle/fluid/operators/math.h"
#include "paddle/fluid/operators/nll_loss_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/hostdevice.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static const int NTHREADS = 32;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename T>
__global__ void GPUNLLLossForward1D_no_reduce(T* out_data, const T* x_data,
const int64_t* label_data,
const T* weight_data,
const int64_t batch_size,
const int64_t n_classes,
const int64_t ignore_index) {
CUDA_1D_KERNEL_LOOP(i, batch_size) {
const int64_t cur_label = label_data[i];
if (cur_label == ignore_index) {
out_data[i] = 0;
continue;
}
const T cur_weight = weight_data ? weight_data[cur_label] : (T)1;
out_data[i] = -x_data[i * n_classes + cur_label] * cur_weight;
}
}
template <typename T>
__global__ void GPUNLLLossForward1D_with_reduce(
T* out_data, T* total_weight_data, const T* x_data,
const int64_t* label_data, const T* weight_data, const int64_t batch_size,
const int64_t n_classes, const int64_t size_average,
const int64_t ignore_index) {
__shared__ T sharedInputs[NTHREADS], sharedWeights[NTHREADS];
sharedInputs[threadIdx.x] = 0;
sharedWeights[threadIdx.x] = 0;
int i;
for (i = threadIdx.x; i < batch_size; i += NTHREADS) {
const auto cur_label = label_data[i];
if (cur_label != ignore_index) {
const auto cur_weight = weight_data ? weight_data[cur_label] : (T)1;
sharedInputs[threadIdx.x] -=
x_data[i * n_classes + cur_label] * cur_weight;
sharedWeights[threadIdx.x] += cur_weight;
}
}
__syncthreads();
if (threadIdx.x == 0) {
*out_data = *total_weight_data = 0;
T output_val = 0;
T total_weight_val = 0;
for (i = 0; i < NTHREADS; ++i) {
output_val += sharedInputs[i];
total_weight_val += sharedWeights[i];
}
*total_weight_data = total_weight_val;
*out_data = output_val;
if (size_average && *total_weight_data != 0) {
*out_data = output_val / total_weight_val;
}
}
}
// Reduce N values concurrently, i.e. suppose N = 2, and there are 4 threads:
// (1, 2), (3, 4), (5, 6), (7, 8), then the return in threadVals for thread 0
// is (1 + 3 + 5 + 7, 2 + 4 + 6 + 8) = (16, 20)
//
// If smem is not used again, there is no need to __syncthreads before this
// call. However, if smem will be used, e.g., this function is called in a loop,
// then __syncthreads is needed either before or afterwards to prevent non-0
// threads overriding smem in the next loop before num-0 thread reads from it.
template <typename T, typename ReduceOp, int N>
__device__ void reduceNValuesInBlock(T* smem, T threadVals[N],
const unsigned int numVals,
ReduceOp reduceOp, T init) {
if (numVals == 0) {
#pragma unroll
for (int i = 0; i < N; ++i) {
threadVals[i] = init;
}
return;
}
// We store each of the N values contiguously, so if N = 2, all values for
// the first threadVal for each thread in the block are stored followed by
// all of the values for the second threadVal for each thread in the block
if (threadIdx.x < numVals) {
#pragma unroll
for (int i = 0; i < N; ++i) {
smem[i * numVals + threadIdx.x] = threadVals[i];
}
}
__syncthreads();
// Number of lanes in the final reduction --> this is used to determine
// where to put the outputs of each of the n things we are reducing. If
// nLP = 32, then we have the 32 outputs for the first threadVal,
// followed by the 32 outputs for the second threadVal, etc.
const unsigned int numLanesParticipating = min(numVals, warpSize);
if (numVals > warpSize && ((threadIdx.x / warpSize) == 0)) {
#pragma unroll
for (int i = 0; i < N; ++i) {
threadVals[i] = threadIdx.x < numVals ? threadVals[i] : init;
}
for (int i = warpSize + threadIdx.x; i < numVals; i += warpSize) {
#pragma unroll
for (int j = 0; j < N; ++j) {
threadVals[j] = reduceOp(threadVals[j], smem[j * numVals + i]);
}
}
#pragma unroll
for (int i = 0; i < N; ++i) {
smem[i * numLanesParticipating + threadIdx.x] = threadVals[i];
}
}
__syncthreads();
if (threadIdx.x == 0) {
if (numLanesParticipating == 32) {
#pragma unroll
for (int i = 0; i < N; ++i) {
#pragma unroll
for (int j = 1; j < 32; ++j) {
threadVals[i] = reduceOp(threadVals[i], smem[i * 32 + j]);
}
}
} else {
#pragma unroll
for (int i = 0; i < N; ++i) {
for (int j = 1; j < numLanesParticipating; ++j) {
threadVals[i] = reduceOp(threadVals[i], smem[i * numVals + j]);
}
}
}
}
}
// Block-wide reduction in shared memory helper; only threadIdx.x == 0 will
// return the reduced value
//
// If smem is not used again, there is no need to __syncthreads before this
// call. However, if smem will be used, e.g., this function is called in a loop,
// then __syncthreads is needed either before or afterwards to prevent non-0
// threads overriding smem in the next loop before num-0 thread reads from it.
template <typename T, typename ReduceOp>
__device__ T reduceBlock(T* smem, const unsigned int numVals, T threadVal,
ReduceOp reduceOp, T init) {
reduceNValuesInBlock<T, ReduceOp, 1>(smem, &threadVal, numVals, reduceOp,
init);
return threadVal;
}
template <typename T>
__global__ void GPUNLLLossForward2D_no_reduce(
T* out_data, const T* x_data, const int64_t* label_data,
const T* weight_data, const int64_t batch_size, const int64_t n_classes,
const int64_t in_dim2, const int64_t in_dim3, const int64_t ignore_index) {
const int64_t map_size = in_dim2 * in_dim3;
const int64_t sample_size = n_classes * map_size;
const int64_t out_numel = batch_size * map_size;
CUDA_1D_KERNEL_LOOP(i, out_numel) {
const int64_t b = i % batch_size;
const int64_t h = (i / batch_size) % in_dim2;
const int64_t w = (i / (batch_size * in_dim2)) % in_dim3;
const int64_t index = b * map_size + h * in_dim3 + w;
const int64_t cur_label = label_data[index];
if (cur_label == ignore_index) {
out_data[index] = 0;
continue;
}
const T cur_weight = weight_data ? weight_data[cur_label] : (T)1;
out_data[index] =
-x_data[b * sample_size + cur_label * map_size + h * in_dim3 + w] *
cur_weight;
}
}
template <typename T>
__global__ void GPUNLLLossForward2D_with_reduce(
T* out_data, T* total_weight_data, const T* x_data,
const int64_t* label_data, const T* weight_data, const int64_t batch_size,
const int64_t n_classes, const int64_t map_nelem,
const int64_t blocks_per_sample, const int64_t ignore_index) {
__shared__ T partial_sums[kNumCUDAThreads];
int64_t i;
T input_sum = 0;
T acc_weight = 0;
*out_data = 0;
*total_weight_data = 0;
int64_t sample = blockIdx.x / blocks_per_sample;
int64_t toffset = sample * map_nelem;
int64_t ioffset = sample * map_nelem * n_classes;
int64_t step = blockDim.x * blocks_per_sample;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem; i += step) {
const int64_t cur_label = label_data[toffset + i];
if (cur_label != ignore_index) {
const T cur_weight = weight_data ? weight_data[cur_label] : (T)1;
input_sum -= x_data[ioffset + i + map_nelem * cur_label] * cur_weight;
acc_weight += cur_weight;
}
}
input_sum =
reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<T>(), (T)0);
__syncthreads();
acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight,
thrust::plus<T>(), (T)0);
if (threadIdx.x == 0) {
paddle::platform::CudaAtomicAdd(total_weight_data, acc_weight);
paddle::platform::CudaAtomicAdd(out_data, input_sum);
}
}
template <typename T>
__global__ void GPUNLLLossForward2D_size_average(T* out_data,
T* total_weight_data) {
if (*total_weight_data != 0) {
*out_data /= *total_weight_data;
}
}
template <typename T>
__global__ void GPUNLLLossBackward1D_no_reduce(
T* dx_data, const int64_t* label_data, const T* weight_data,
const T* dout_data, const int64_t batch_size, const int64_t n_classes,
const int64_t ignore_index) {
CUDA_1D_KERNEL_LOOP(i, batch_size) {
const int64_t cur_label = label_data[i];
if (cur_label == ignore_index) {
continue;
}
const T cur_weight = weight_data ? weight_data[cur_label] : (T)1;
dx_data[i * n_classes + cur_label] = -dout_data[i] * cur_weight;
}
}
template <typename T>
__global__ void GPUNLLLossBackward1D_with_reduce(
T* dx_data, const T* total_weight_data, const int64_t* label_data,
const T* weight_data, const T* dout_data, const int64_t batch_size,
const int64_t n_classes, const int64_t size_average,
const int64_t ignore_index) {
if (*total_weight_data <= 0) {
return;
}
int i;
const T norm = size_average ? (T)(1 / *total_weight_data) : (T)1;
for (i = threadIdx.x; i < batch_size; i += NTHREADS) {
const int64_t cur_label = label_data[i];
if (cur_label != ignore_index) {
const T cur_weight = weight_data ? weight_data[cur_label] : (T)1;
dx_data[i * n_classes + cur_label] = -cur_weight * dout_data[0] * norm;
}
}
}
template <typename T>
__global__ void GPUNLLLossBackward2D_no_reduce(
T* dx_data, const int64_t* label_data, const T* weight_data,
const T* dout_data, const int64_t batch_size, const int64_t n_classes,
const int64_t in_dim2, const int64_t in_dim3, const int64_t ignore_index) {
const int64_t map_size = in_dim2 * in_dim3;
const int64_t sample_size = n_classes * map_size;
const int64_t out_numel = batch_size * map_size;
CUDA_1D_KERNEL_LOOP(i, out_numel) {
const int64_t b = i % batch_size;
const int64_t h = (i / batch_size) % in_dim2;
const int64_t w = (i / (batch_size * in_dim2)) % in_dim3;
const int64_t index = b * map_size + h * in_dim3 + w;
const int64_t cur_label = label_data[index];
if (cur_label == ignore_index) {
continue;
}
const T cur_weight = weight_data ? weight_data[cur_label] : (T)1;
dx_data[b * sample_size + cur_label * map_size + h * in_dim3 + w] =
-dout_data[index] * cur_weight;
}
}
template <typename T>
__global__ void GPUNLLLossBackward2D_with_reduce(
T* dx_data, const T* total_weight_data, const int64_t* label_data,
const T* weight_data, const T* dout_data, const int64_t batch_size,
const int64_t n_classes, const int64_t map_nelem,
const int64_t blocks_per_sample, const int64_t size_average,
const int64_t ignore_index) {
if (*total_weight_data <= 0) {
return;
}
int64_t i;
const T norm = size_average ? (T)(1 / *total_weight_data) : (T)1;
int sample = blockIdx.x / blocks_per_sample;
int step = blockDim.x * blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem; i += step) {
const int64_t cur_label = label_data[toffset + i];
if (cur_label != ignore_index) {
dx_data[ioffset + i + map_nelem * cur_label] =
-(weight_data ? weight_data[cur_label] : (T)1) * norm * dout_data[0];
}
}
}
template <typename DeviceContext, typename T>
class NLLLossCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* labels = ctx.Input<Tensor>("Label");
auto* weight = ctx.Input<Tensor>("Weight");
auto* out = ctx.Output<Tensor>("Out");
auto* total_weight = ctx.Output<Tensor>("Total_weight");
auto ignore_index = ctx.Attr<int64_t>("ignore_index");
auto reduction = ctx.Attr<std::string>("reduction");
auto x_data = x->data<T>();
auto out_data = out->mutable_data<T>(ctx.GetPlace());
auto total_weight_data = total_weight->mutable_data<T>(ctx.GetPlace());
auto label_data = labels->data<int64_t>();
auto weight_data = weight ? weight->data<T>() : nullptr;
cudaMemset(total_weight_data, 0, sizeof(T));
auto x_dims = x->dims();
auto batch_size = x_dims[0];
auto n_classes = x_dims[1];
int64_t size_average = (int64_t)(reduction == "mean");
if (x_dims.size() == 2) {
int blocks = NumBlocks(batch_size);
int threads = kNumCUDAThreads;
auto& dev_ctx = ctx.cuda_device_context();
if (reduction == "none") {
GPUNLLLossForward1D_no_reduce<
T><<<blocks, threads, 0, dev_ctx.stream()>>>(
out_data, x_data, label_data, weight_data, batch_size, n_classes,
ignore_index);
} else {
GPUNLLLossForward1D_with_reduce<
T><<<1, NTHREADS, 0, dev_ctx.stream()>>>(
out_data, total_weight_data, x_data, label_data, weight_data,
batch_size, n_classes, size_average, ignore_index);
}
} else if (x_dims.size() == 4) {
const auto in_dim2 = x_dims[2];
const auto in_dim3 = x_dims[3];
const auto map_size = in_dim2 * in_dim3;
const auto out_numel = batch_size * in_dim2 * in_dim3;
int blocks = NumBlocks(out_numel);
int threads = kNumCUDAThreads;
auto& dev_ctx = ctx.cuda_device_context();
if (reduction == "none") {
GPUNLLLossForward2D_no_reduce<
T><<<blocks, threads, 0, dev_ctx.stream()>>>(
out_data, x_data, label_data, weight_data, batch_size, n_classes,
in_dim2, in_dim3, ignore_index);
} else {
int blocks_per_sample = NumBlocks(map_size) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
GPUNLLLossForward2D_with_reduce<
T><<<total_blocks, threads, 0, dev_ctx.stream()>>>(
out_data, total_weight_data, x_data, label_data, weight_data,
batch_size, n_classes, map_size, blocks_per_sample, ignore_index);
if (size_average) {
GPUNLLLossForward2D_size_average<T><<<1, 1, 0, dev_ctx.stream()>>>(
out_data, total_weight_data);
}
}
}
}
};
template <typename DeviceContext, typename T>
class NLLLossGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* labels = ctx.Input<Tensor>("Label");
auto* weight = ctx.Input<Tensor>("Weight");
auto* total_weight = ctx.Input<Tensor>("Total_weight");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto dx_data = dx->mutable_data<T>(ctx.GetPlace());
auto dout_data = dout->data<T>();
auto label_data = labels->data<int64_t>();
auto weight_data = weight ? weight->data<T>() : nullptr;
auto total_weight_data = total_weight->data<T>();
auto ignore_index = ctx.Attr<int64_t>("ignore_index");
auto reduction = ctx.Attr<std::string>("reduction");
cudaMemset(dx_data, 0, dx->numel() * sizeof(T));
int64_t size_average = (int64_t)(reduction == "mean");
auto x_dims = x->dims();
auto batch_size = x_dims[0];
auto n_classes = x_dims[1];
if (x_dims.size() == 2) {
int blocks = NumBlocks(batch_size);
int threads = kNumCUDAThreads;
auto& dev_ctx = ctx.cuda_device_context();
if (reduction == "none") {
GPUNLLLossBackward1D_no_reduce<
T><<<blocks, threads, 0, dev_ctx.stream()>>>(
dx_data, label_data, weight_data, dout_data, batch_size, n_classes,
ignore_index);
} else {
GPUNLLLossBackward1D_with_reduce<
T><<<1, NTHREADS, 0, dev_ctx.stream()>>>(
dx_data, total_weight_data, label_data, weight_data, dout_data,
batch_size, n_classes, size_average, ignore_index);
}
} else if (x_dims.size() == 4) {
const auto in_dim2 = x_dims[2];
const auto in_dim3 = x_dims[3];
const auto map_size = in_dim2 * in_dim3;
const auto out_numel = batch_size * in_dim2 * in_dim3;
int blocks = NumBlocks(out_numel);
int threads = kNumCUDAThreads;
auto& dev_ctx = ctx.cuda_device_context();
if (reduction == "none") {
GPUNLLLossBackward2D_no_reduce<
T><<<blocks, threads, 0, dev_ctx.stream()>>>(
dx_data, label_data, weight_data, dout_data, batch_size, n_classes,
in_dim2, in_dim3, ignore_index);
} else {
int blocks_per_sample = NumBlocks(map_size) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
GPUNLLLossBackward2D_with_reduce<
T><<<total_blocks, threads, 0, dev_ctx.stream()>>>(
dx_data, total_weight_data, label_data, weight_data, dout_data,
batch_size, n_classes, map_size, blocks_per_sample, size_average,
ignore_index);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
nll_loss,
ops::NLLLossCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::NLLLossCUDAKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
nll_loss_grad,
ops::NLLLossGradCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::NLLLossGradCUDAKernel<paddle::platform::CUDADeviceContext, double>);
|
4edd7b6029cbba64b356d9e0c04b0e8aab0a4a9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Refactor `loop` to be a CUDA Kernel. The new kernel should
* only do the work of 1 iteration of the original loop.
*/
__global__ void loop()
{
printf("This is iteration number %d\n", threadIdx.x);
}
int main()
{
/*
* When refactoring `loop` to launch as a kernel, be sure
* to use the execution configuration to control how many
* "iterations" to perform.
*
* For this exercise, only use 1 block of threads.
*/
int N = 10;
hipLaunchKernelGGL(( loop), dim3(1), dim3(N), 0, 0, );
hipDeviceSynchronize();
}
| 4edd7b6029cbba64b356d9e0c04b0e8aab0a4a9f.cu | #include <stdio.h>
/*
* Refactor `loop` to be a CUDA Kernel. The new kernel should
* only do the work of 1 iteration of the original loop.
*/
__global__ void loop()
{
printf("This is iteration number %d\n", threadIdx.x);
}
int main()
{
/*
* When refactoring `loop` to launch as a kernel, be sure
* to use the execution configuration to control how many
* "iterations" to perform.
*
* For this exercise, only use 1 block of threads.
*/
int N = 10;
loop<<<1, N>>>();
cudaDeviceSynchronize();
}
|
600f12a4e11097b14bb373e23f0cabca4fe7f7e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
//
// FILE: burrows_wheeler_encoder.cu
// DESCRIPTION: uses bitonic sort to encode a string with BWT
// AUTHOR: Dan Fabian
// DATE: 4/5/2020
#include <iostream>
#include <stdio.h>
using std::cout; using std::endl;
// constants, MUST BE A POWER OF 2 IN LENGTH
const int SIZE = 8;
const char STRING[] = "^BANANA|";
// kernal func prototype
__global__ void bitonic_sort(char *string, int *indices);
////////////////////////////////////////////////////////////////////////////////
//
// MAIN
int main()
{
// create array of vals
char *string_d;
int *indices = new int[SIZE], *indices_d;
// copy string to device memory and allocate mem
int stringMem = sizeof(char) * SIZE, indexMem = sizeof(int) * SIZE;
hipMalloc((void**)&string_d, stringMem);
hipMalloc((void**)&indices_d, indexMem);
hipMemcpy(string_d, STRING, stringMem, hipMemcpyHostToDevice);
hipMemcpy(indices_d, indices, indexMem, hipMemcpyHostToDevice);
// sort
hipLaunchKernelGGL(( bitonic_sort), dim3(1), dim3(SIZE), 0, 0, string_d, indices_d);
// copy device memory back to host
hipMemcpy(indices, indices_d, indexMem, hipMemcpyDeviceToHost);
// print out encoded string
for (int i = 0; i < SIZE; ++i)
if (indices[i] != 0)
cout << STRING[indices[i] - 1] << ' ';
else
cout << STRING[SIZE - 1] << ' ';
cout << endl;
// free all device memory
hipFree(indices_d); hipFree(string_d);
hipDeviceSynchronize();
}
////////////////////////////////////////////////////////////////////////////////
//
// KERNEL function
////////////////////////////////////////
// compare strings
__device__ bool lessThan(char *string, const int& pos1, const int& pos2, const int &size)
{
int i = 0;
while (string[(pos1 + i) % size] == string[(pos2 + i) % size] && i < size) ++i;
if (i == size) return false;
return string[(pos1 + i) % size] < string[(pos2 + i) % size];
}
////////////////////////////////////////
// gpu sort func
__global__ void bitonic_sort(char *string, int *indices)
{
const int size = SIZE;
// create shared arrays
static __shared__ char string_s[size]; // holds original string
static __shared__ int indices_s[size]; // holds char indices of sorted array
// thread idx
int idx = threadIdx.x;
// load 1 elem in each array per index
string_s[idx] = string[idx];
indices_s[idx] = idx;
// bitonic sort alg
int tmp, elemIdx1, elemIdx2, strIdx1, strIdx2;
bool max; // if max then put max elem in higher index
for (int i = 2; i <= size; i *= 2)
{
// bitonic merge of size i
max = (idx % i) < (i / 2);
for (int j = i / 2; j > 0; j /= 2)
{
// get element indices to compare
elemIdx1 = (idx / j) * (j * 2) + idx % j;
elemIdx2 = elemIdx1 + j;
strIdx1 = indices_s[elemIdx1];
strIdx2 = indices_s[elemIdx2];
// check if swap is needed
if ((elemIdx2 < size) &&
((max && lessThan(string_s, strIdx2, strIdx1, size)) ||
(!max && lessThan(string_s, strIdx1, strIdx2, size))))
{
// swap indices
tmp = indices_s[elemIdx1];
indices_s[elemIdx1] = indices_s[elemIdx2];
indices_s[elemIdx2] = tmp;
}
// need to sync before next step
__syncthreads();
}
}
// transfer memory to global
indices[idx] = indices_s[idx];
} | 600f12a4e11097b14bb373e23f0cabca4fe7f7e9.cu | ////////////////////////////////////////////////////////////////////////////////
//
// FILE: burrows_wheeler_encoder.cu
// DESCRIPTION: uses bitonic sort to encode a string with BWT
// AUTHOR: Dan Fabian
// DATE: 4/5/2020
#include <iostream>
#include <stdio.h>
using std::cout; using std::endl;
// constants, MUST BE A POWER OF 2 IN LENGTH
const int SIZE = 8;
const char STRING[] = "^BANANA|";
// kernal func prototype
__global__ void bitonic_sort(char *string, int *indices);
////////////////////////////////////////////////////////////////////////////////
//
// MAIN
int main()
{
// create array of vals
char *string_d;
int *indices = new int[SIZE], *indices_d;
// copy string to device memory and allocate mem
int stringMem = sizeof(char) * SIZE, indexMem = sizeof(int) * SIZE;
cudaMalloc((void**)&string_d, stringMem);
cudaMalloc((void**)&indices_d, indexMem);
cudaMemcpy(string_d, STRING, stringMem, cudaMemcpyHostToDevice);
cudaMemcpy(indices_d, indices, indexMem, cudaMemcpyHostToDevice);
// sort
bitonic_sort<<<1, SIZE>>>(string_d, indices_d);
// copy device memory back to host
cudaMemcpy(indices, indices_d, indexMem, cudaMemcpyDeviceToHost);
// print out encoded string
for (int i = 0; i < SIZE; ++i)
if (indices[i] != 0)
cout << STRING[indices[i] - 1] << ' ';
else
cout << STRING[SIZE - 1] << ' ';
cout << endl;
// free all device memory
cudaFree(indices_d); cudaFree(string_d);
cudaDeviceSynchronize();
}
////////////////////////////////////////////////////////////////////////////////
//
// KERNEL function
////////////////////////////////////////
// compare strings
__device__ bool lessThan(char *string, const int& pos1, const int& pos2, const int &size)
{
int i = 0;
while (string[(pos1 + i) % size] == string[(pos2 + i) % size] && i < size) ++i;
if (i == size) return false;
return string[(pos1 + i) % size] < string[(pos2 + i) % size];
}
////////////////////////////////////////
// gpu sort func
__global__ void bitonic_sort(char *string, int *indices)
{
const int size = SIZE;
// create shared arrays
static __shared__ char string_s[size]; // holds original string
static __shared__ int indices_s[size]; // holds char indices of sorted array
// thread idx
int idx = threadIdx.x;
// load 1 elem in each array per index
string_s[idx] = string[idx];
indices_s[idx] = idx;
// bitonic sort alg
int tmp, elemIdx1, elemIdx2, strIdx1, strIdx2;
bool max; // if max then put max elem in higher index
for (int i = 2; i <= size; i *= 2)
{
// bitonic merge of size i
max = (idx % i) < (i / 2);
for (int j = i / 2; j > 0; j /= 2)
{
// get element indices to compare
elemIdx1 = (idx / j) * (j * 2) + idx % j;
elemIdx2 = elemIdx1 + j;
strIdx1 = indices_s[elemIdx1];
strIdx2 = indices_s[elemIdx2];
// check if swap is needed
if ((elemIdx2 < size) &&
((max && lessThan(string_s, strIdx2, strIdx1, size)) ||
(!max && lessThan(string_s, strIdx1, strIdx2, size))))
{
// swap indices
tmp = indices_s[elemIdx1];
indices_s[elemIdx1] = indices_s[elemIdx2];
indices_s[elemIdx2] = tmp;
}
// need to sync before next step
__syncthreads();
}
}
// transfer memory to global
indices[idx] = indices_s[idx];
} |
7bc5d6394df4a962089cc42bd3f45b5cb5768e1c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math_constants.h>
#define OP_SUM 0
#define OP_MULT 1
const int B = %(B)s; // blockDim.x
/**
* Helper function to sum across a block.
* Assume pS_data is already in shared memory
* Only the first thread returns a value in pSum
*/
__device__ void reduceBlock( float pSdata[B], float* pSum, int op )
{
int idx = threadIdx.x * blockDim.y + threadIdx.y;
// Sync all threads across the block
__syncthreads();
// Calculate the minimum value by doing a reduction
int half = (blockDim.x*blockDim.y) / 2;
if( idx < half )
{
while( half > 0 )
{
if(idx < half)
{
switch(op)
{
case OP_SUM:
pSdata[idx] = pSdata[idx] + pSdata[idx + half];
break;
case OP_MULT:
pSdata[idx] = pSdata[idx] * pSdata[idx + half];
break;
default:
// default to the identity
// TODO: throw error?
pSdata[idx] = pSdata[idx];
break;
}
}
half = half / 2;
__syncthreads();
}
}
// Store the minimum value back to global memory
if (idx == 0)
{
pSum[0] = pSdata[0];
}
}
/**
* Compute the mean of each component in a Gaussian mixture model
* ((N/B)xK) grid of (Bx1x1) blocks
*/
__global__ void computeXSum(int N,
int* pC,
int D,
int d,
float* pX,
float* pXMeanSum)
{
int k = blockIdx.y;
int x = threadIdx.x;
int n = blockIdx.x * blockDim.x + x;
__shared__ float xsum[B];
xsum[x] = 0.0;
if (n<N)
{
if (pC[n]==k)
{
xsum[x] = pX[d*N+n];
}
}
// Sum xsum for this block
// the output array is KxGridDim.x in size
float blockXsum = 0.0;
reduceBlock(xsum, &blockXsum, OP_SUM);
if (x==0)
{
pXMeanSum[k*gridDim.x+blockIdx.x] = blockXsum;
}
}
/**
* Compute the mean of each component in a Gaussian mixture model
* ((N/B)xK) grid of (Bx1x1) blocks
*
* pXvarsum is a KxDxD array
*/
__global__ void computeXVarSum(int N,
int D,
int* pC,
int d1,
int d2,
float* pX,
float* pXmean,
float* pXCovarSum)
{
int k = blockIdx.y;
int x = threadIdx.x;
int n = blockIdx.x * blockDim.x + x;
__shared__ float xvarsum[B];
xvarsum[x] = 0.0;
float xmean1 = pXmean[k*D+d1];
float xmean2 = pXmean[k*D+d2];
if (n<N)
{
if (pC[n]==k)
{
xvarsum[x] = (pX[d1*N+n]-xmean1)*(pX[d2*N+n]-xmean2);
}
}
// Sum xsum for this block
// the output array is KxGridDim.x in size
float blockXvarsum = 0.0;
reduceBlock(xvarsum, &blockXvarsum, OP_SUM);
if (x==0)
{
pXCovarSum[k*gridDim.x+blockIdx.x] = blockXvarsum;
}
}
/**
* Compute the per-spike contributions to the probability of the n-th spike
* occuring on process k. Spike n's process identity affects the parenting
* spike Z[n] as well as any child spikes n' such that Z[n']=n.
*/
__global__ void computePerSpikePrCn(int n,
int N,
int K,
int D,
float* pX,
float* pMuX,
float* pLamX,
int* pZ,
int* pC,
bool* pA,
float* pW,
float* pLam,
float* pPrCn)
{
int cn = blockIdx.y;
int x = threadIdx.x;
int m = blockIdx.x * blockDim.x + x; // this thread represents the m-th spike
__shared__ float logprcn[B];
logprcn[x] = 0.0;
if (m<N)
{
// Get the m-th spike's process identity
int cm = pC[m];
if (m<n && pZ[n]==m)
{
// if the m-th spike is the parent of spike n, calculate its contribution
logprcn[x] += logf(pA[cm*K+cn]) + logf(pW[cm*K+cn]);
}
else if (m==n)
{
// if m==n then calculate the prior pr given mu and the pr if it is parented
// by the background
if (pZ[n]==-1)
{
// TODO: UPDATE PYTHON CODE FOR NON-HOMOGENEOUS BACKGROUND RATES
logprcn[x] += logf(pLam[cn*N+n]);
}
// Calculate the prior probability given mu and sigma
// This involves a vector-matrix-vector multiply for the Gaussian logprob
for (int d1=0; d1<D; d1++)
{
for (int d2=0; d2<D; d2++)
{
float xn1 = pX[d1*N+n];
float xn2 = pX[d2*N+n];
logprcn[x] += -1*(xn1-pMuX[cn*D+d1])*pLamX[cn*D*D+d1*D+d2]*(xn2-pMuX[cn*D+d2]);
}
}
}
else if (m>n && pZ[m]==n)
{
// if the m-th spike is parented by spike n calculate its contribution
logprcn[x] += logf(pA[cn*K+cm]) + logf(pW[cn*K+cm]);
}
}
// Sum the result for each block
// the output array is KxGridDim.x in size
float blockLogPrCnSum = 0.0;
reduceBlock(logprcn, &blockLogPrCnSum, OP_SUM);
if (x==0)
{
pPrCn[cn*gridDim.x+blockIdx.x] = blockLogPrCnSum;
}
}
/**
* Compute the log Q-ratio for accepting a MH proposal made by the
* MetaProcessId model. The proposal is to change all spikes affiliated with
* underlying process k to a new meta-process m, while simultaneously updating
* the parent assignments Z. The log probability of accepting such a proposal is
* proportional to a sum over terms for every spike that either occurs on the
* underlying process k, or may have a parent spike on underlying process k.
*/
__global__ void computeLogQratio(int k,
int m,
int N,
int M,
int* pY,
int* pC,
float* pLam,
bool* pA,
float* pW,
float* pGS,
int* pColPtrs,
int* pRowIndices,
float* pLogQRatio
)
{
int x = threadIdx.x;
int n = x + blockIdx.x*blockDim.x;
__shared__ float logQratio[B];
logQratio[x] = 0.0;
if (n < N)
{
// the ratio is equal to log(num)-log(den), where each term consists
// of a sum over spike children
float num = 0;
float den = 0;
int cn = pC[n];
int yn_old = pY[cn];
int yn_new = (cn==k) ? m : yn_old;
num += pLam[yn_new*N+n];
den += pLam[yn_old*N+n];
for (int pa_off=pColPtrs[n]; pa_off<pColPtrs[n+1]; pa_off++)
{
int n_pa = pRowIndices[pa_off];
int c_pa = pC[n_pa];
int y_pa_old = pY[c_pa];
int y_pa_new = (c_pa==k) ? m : y_pa_old;
num += pA[y_pa_new*M+yn_new] * pW[y_pa_new*M+yn_new] * pGS[pa_off];
den += pA[y_pa_old*M+yn_old] * pW[y_pa_old*M+yn_old] * pGS[pa_off];
}
// Update this spikes contribution to log Q-ratio
logQratio[x] = logf(num)-logf(den);
}
// Sum over all the spikes in this block
float logQratioSum = 0.0;
reduceBlock(logQratio, &logQratioSum, OP_SUM);
if (x==0)
{
pLogQRatio[blockIdx.x] = logQratioSum;
}
} | 7bc5d6394df4a962089cc42bd3f45b5cb5768e1c.cu | #include <cuda.h>
#include <math_constants.h>
#define OP_SUM 0
#define OP_MULT 1
const int B = %(B)s; // blockDim.x
/**
* Helper function to sum across a block.
* Assume pS_data is already in shared memory
* Only the first thread returns a value in pSum
*/
__device__ void reduceBlock( float pSdata[B], float* pSum, int op )
{
int idx = threadIdx.x * blockDim.y + threadIdx.y;
// Sync all threads across the block
__syncthreads();
// Calculate the minimum value by doing a reduction
int half = (blockDim.x*blockDim.y) / 2;
if( idx < half )
{
while( half > 0 )
{
if(idx < half)
{
switch(op)
{
case OP_SUM:
pSdata[idx] = pSdata[idx] + pSdata[idx + half];
break;
case OP_MULT:
pSdata[idx] = pSdata[idx] * pSdata[idx + half];
break;
default:
// default to the identity
// TODO: throw error?
pSdata[idx] = pSdata[idx];
break;
}
}
half = half / 2;
__syncthreads();
}
}
// Store the minimum value back to global memory
if (idx == 0)
{
pSum[0] = pSdata[0];
}
}
/**
* Compute the mean of each component in a Gaussian mixture model
* ((N/B)xK) grid of (Bx1x1) blocks
*/
__global__ void computeXSum(int N,
int* pC,
int D,
int d,
float* pX,
float* pXMeanSum)
{
int k = blockIdx.y;
int x = threadIdx.x;
int n = blockIdx.x * blockDim.x + x;
__shared__ float xsum[B];
xsum[x] = 0.0;
if (n<N)
{
if (pC[n]==k)
{
xsum[x] = pX[d*N+n];
}
}
// Sum xsum for this block
// the output array is KxGridDim.x in size
float blockXsum = 0.0;
reduceBlock(xsum, &blockXsum, OP_SUM);
if (x==0)
{
pXMeanSum[k*gridDim.x+blockIdx.x] = blockXsum;
}
}
/**
* Compute the mean of each component in a Gaussian mixture model
* ((N/B)xK) grid of (Bx1x1) blocks
*
* pXvarsum is a KxDxD array
*/
__global__ void computeXVarSum(int N,
int D,
int* pC,
int d1,
int d2,
float* pX,
float* pXmean,
float* pXCovarSum)
{
int k = blockIdx.y;
int x = threadIdx.x;
int n = blockIdx.x * blockDim.x + x;
__shared__ float xvarsum[B];
xvarsum[x] = 0.0;
float xmean1 = pXmean[k*D+d1];
float xmean2 = pXmean[k*D+d2];
if (n<N)
{
if (pC[n]==k)
{
xvarsum[x] = (pX[d1*N+n]-xmean1)*(pX[d2*N+n]-xmean2);
}
}
// Sum xsum for this block
// the output array is KxGridDim.x in size
float blockXvarsum = 0.0;
reduceBlock(xvarsum, &blockXvarsum, OP_SUM);
if (x==0)
{
pXCovarSum[k*gridDim.x+blockIdx.x] = blockXvarsum;
}
}
/**
* Compute the per-spike contributions to the probability of the n-th spike
* occuring on process k. Spike n's process identity affects the parenting
* spike Z[n] as well as any child spikes n' such that Z[n']=n.
*/
__global__ void computePerSpikePrCn(int n,
int N,
int K,
int D,
float* pX,
float* pMuX,
float* pLamX,
int* pZ,
int* pC,
bool* pA,
float* pW,
float* pLam,
float* pPrCn)
{
int cn = blockIdx.y;
int x = threadIdx.x;
int m = blockIdx.x * blockDim.x + x; // this thread represents the m-th spike
__shared__ float logprcn[B];
logprcn[x] = 0.0;
if (m<N)
{
// Get the m-th spike's process identity
int cm = pC[m];
if (m<n && pZ[n]==m)
{
// if the m-th spike is the parent of spike n, calculate its contribution
logprcn[x] += logf(pA[cm*K+cn]) + logf(pW[cm*K+cn]);
}
else if (m==n)
{
// if m==n then calculate the prior pr given mu and the pr if it is parented
// by the background
if (pZ[n]==-1)
{
// TODO: UPDATE PYTHON CODE FOR NON-HOMOGENEOUS BACKGROUND RATES
logprcn[x] += logf(pLam[cn*N+n]);
}
// Calculate the prior probability given mu and sigma
// This involves a vector-matrix-vector multiply for the Gaussian logprob
for (int d1=0; d1<D; d1++)
{
for (int d2=0; d2<D; d2++)
{
float xn1 = pX[d1*N+n];
float xn2 = pX[d2*N+n];
logprcn[x] += -1*(xn1-pMuX[cn*D+d1])*pLamX[cn*D*D+d1*D+d2]*(xn2-pMuX[cn*D+d2]);
}
}
}
else if (m>n && pZ[m]==n)
{
// if the m-th spike is parented by spike n calculate its contribution
logprcn[x] += logf(pA[cn*K+cm]) + logf(pW[cn*K+cm]);
}
}
// Sum the result for each block
// the output array is KxGridDim.x in size
float blockLogPrCnSum = 0.0;
reduceBlock(logprcn, &blockLogPrCnSum, OP_SUM);
if (x==0)
{
pPrCn[cn*gridDim.x+blockIdx.x] = blockLogPrCnSum;
}
}
/**
* Compute the log Q-ratio for accepting a MH proposal made by the
* MetaProcessId model. The proposal is to change all spikes affiliated with
* underlying process k to a new meta-process m, while simultaneously updating
* the parent assignments Z. The log probability of accepting such a proposal is
* proportional to a sum over terms for every spike that either occurs on the
* underlying process k, or may have a parent spike on underlying process k.
*/
__global__ void computeLogQratio(int k,
int m,
int N,
int M,
int* pY,
int* pC,
float* pLam,
bool* pA,
float* pW,
float* pGS,
int* pColPtrs,
int* pRowIndices,
float* pLogQRatio
)
{
int x = threadIdx.x;
int n = x + blockIdx.x*blockDim.x;
__shared__ float logQratio[B];
logQratio[x] = 0.0;
if (n < N)
{
// the ratio is equal to log(num)-log(den), where each term consists
// of a sum over spike children
float num = 0;
float den = 0;
int cn = pC[n];
int yn_old = pY[cn];
int yn_new = (cn==k) ? m : yn_old;
num += pLam[yn_new*N+n];
den += pLam[yn_old*N+n];
for (int pa_off=pColPtrs[n]; pa_off<pColPtrs[n+1]; pa_off++)
{
int n_pa = pRowIndices[pa_off];
int c_pa = pC[n_pa];
int y_pa_old = pY[c_pa];
int y_pa_new = (c_pa==k) ? m : y_pa_old;
num += pA[y_pa_new*M+yn_new] * pW[y_pa_new*M+yn_new] * pGS[pa_off];
den += pA[y_pa_old*M+yn_old] * pW[y_pa_old*M+yn_old] * pGS[pa_off];
}
// Update this spikes contribution to log Q-ratio
logQratio[x] = logf(num)-logf(den);
}
// Sum over all the spikes in this block
float logQratioSum = 0.0;
reduceBlock(logQratio, &logQratioSum, OP_SUM);
if (x==0)
{
pLogQRatio[blockIdx.x] = logQratioSum;
}
} |
4875450df8e3dbed6aaea27f3698d7b2a751f348.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2017, 2019 ETH Zrich, Thomas Schps
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "libvis/cuda/patch_match_stereo.cuh"
#include <math_constants.h>
#include "libvis/cuda/cuda_auto_tuner.h"
#include "libvis/cuda/cuda_unprojection_lookup.cuh"
#include "libvis/cuda/cuda_util.cuh"
#include "libvis/cuda/cuda_util.h"
#include "libvis/cuda/patch_match_stereo_cost.cuh"
namespace vis {
__global__ void MedianFilterDepthMap3x3CUDAKernel(
int context_radius,
CUDABuffer_<float> inv_depth_map,
CUDABuffer_<float> inv_depth_map_out,
CUDABuffer_<float> costs,
CUDABuffer_<float> costs_out,
CUDABuffer_<float> second_best_costs,
CUDABuffer_<float> second_best_costs_out) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
const float kInvalidInvDepth = 0; // TODO: De-duplicate with above
if (x >= context_radius && y >= context_radius &&
x < inv_depth_map.width() - context_radius && y < inv_depth_map.height() - context_radius) {
// Collect valid depth values of 3x3 neighborhood
int count = 1;
float inv_depths[9];
float cost[9];
float second_best_cost[9];
inv_depths[0] = inv_depth_map(y, x);
if (inv_depths[0] == kInvalidInvDepth) {
inv_depth_map_out(y, x) = kInvalidInvDepth;
costs_out(y, x) = CUDART_NAN_F;
second_best_costs_out(y, x) = CUDART_NAN_F;
return;
}
cost[0] = costs(y, x);
second_best_cost[0] = second_best_costs(y, x);
#pragma unroll
for (int dy = -1; dy <= 1; ++ dy) {
if (y + dy < context_radius || y + dy >= inv_depth_map.height() - context_radius) {
continue;
}
#pragma unroll
for (int dx = -1; dx <= 1; ++ dx) {
if (dy == 0 && dx == 0) {
continue;
}
if (x + dx < context_radius || x + dx >= inv_depth_map.width() - context_radius) {
continue;
}
float inv_depth = inv_depth_map(y + dy, x + dx);
if (inv_depth != kInvalidInvDepth) {
inv_depths[count] = inv_depth;
cost[count] = costs(y + dy, x + dx);
second_best_cost[count] = second_best_costs(y + dy, x + dx);
++ count;
}
}
}
// Sort depth values up to the middle of the maximum count
for (int i = 0; i <= 4; ++ i) {
for (int k = i + 1; k < 9; ++ k) {
if (k < count && inv_depths[i] > inv_depths[k]) {
// Swap.
float temp = inv_depths[i];
inv_depths[i] = inv_depths[k];
inv_depths[k] = temp;
temp = cost[i];
cost[i] = cost[k];
cost[k] = temp;
temp = second_best_cost[i];
second_best_cost[i] = second_best_cost[k];
second_best_cost[k] = temp;
}
}
}
// Assign the median
if (count % 2 == 1) {
inv_depth_map_out(y, x) = inv_depths[count / 2];
costs_out(y, x) = cost[count / 2];
second_best_costs_out(y, x) = second_best_cost[count / 2];
} else {
// For disambiguation in the even-count case, use the value which is
// closer to the average of the two middle values.
float average = 0.5f * (inv_depths[count / 2 - 1] + inv_depths[count / 2]);
if (fabs(average - inv_depths[count / 2 - 1]) <
fabs(average - inv_depths[count / 2])) {
inv_depth_map_out(y, x) = inv_depths[count / 2 - 1];
costs_out(y, x) = cost[count / 2 - 1];
second_best_costs_out(y, x) = second_best_cost[count / 2 - 1];
} else {
inv_depth_map_out(y, x) = inv_depths[count / 2];
costs_out(y, x) = cost[count / 2];
second_best_costs_out(y, x) = second_best_cost[count / 2];
}
}
} else if (x < inv_depth_map_out.width() && y < inv_depth_map_out.height()) {
inv_depth_map_out(y, x) = kInvalidInvDepth;
costs_out(y, x) = CUDART_NAN_F;
second_best_costs_out(y, x) = CUDART_NAN_F;
}
}
void MedianFilterDepthMap3x3CUDA(
hipStream_t stream,
int context_radius,
CUDABuffer_<float>* inv_depth_map,
CUDABuffer_<float>* inv_depth_map_out,
CUDABuffer_<float>* costs,
CUDABuffer_<float>* costs_out,
CUDABuffer_<float>* second_best_costs,
CUDABuffer_<float>* second_best_costs_out) {
CHECK_CUDA_NO_ERROR();
CUDA_AUTO_TUNE_2D(
MedianFilterDepthMap3x3CUDAKernel,
32, 32,
inv_depth_map->width(), inv_depth_map->height(),
0, stream,
/* kernel parameters */
context_radius,
*inv_depth_map,
*inv_depth_map_out,
*costs,
*costs_out,
*second_best_costs,
*second_best_costs_out);
CHECK_CUDA_NO_ERROR();
}
__global__ void BilateralFilterCUDAKernel(
float denom_xy,
float denom_value,
int radius,
int radius_squared,
CUDABuffer_<float> inv_depth_map,
CUDABuffer_<float> inv_depth_map_out) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
const float kInvalidInvDepth = 0; // TODO: De-duplicate with above
if (x < inv_depth_map_out.width() && y < inv_depth_map_out.height()) {
const float center_value = inv_depth_map(y, x);
if (center_value == kInvalidInvDepth) {
inv_depth_map_out(y, x) = kInvalidInvDepth;
return;
}
// Bilateral filtering.
float sum = 0;
float weight = 0;
const int min_y = max(static_cast<int>(0), static_cast<int>(y - radius));
const int max_y = min(static_cast<int>(inv_depth_map_out.height() - 1), static_cast<int>(y + radius));
for (int sample_y = min_y; sample_y <= max_y; ++ sample_y) {
const int dy = sample_y - y;
const int min_x = max(static_cast<int>(0), static_cast<int>(x - radius));
const int max_x = min(static_cast<int>(inv_depth_map_out.width() - 1), static_cast<int>(x + radius));
for (int sample_x = min_x; sample_x <= max_x; ++ sample_x) {
const int dx = sample_x - x;
const int grid_distance_squared = dx * dx + dy * dy;
if (grid_distance_squared > radius_squared) {
continue;
}
const float sample = inv_depth_map(sample_y, sample_x);
if (sample == kInvalidInvDepth) {
continue;
}
float value_distance_squared = center_value - sample;
value_distance_squared *= value_distance_squared;
float w = exp(-grid_distance_squared / denom_xy + -value_distance_squared / denom_value);
sum += w * sample;
weight += w;
}
}
inv_depth_map_out(y, x) = (weight == 0) ? kInvalidInvDepth : (sum / weight);
}
}
void BilateralFilterCUDA(
hipStream_t stream,
float sigma_xy,
float sigma_value,
float radius_factor,
const CUDABuffer_<float>& inv_depth_map,
CUDABuffer_<float>* inv_depth_map_out) {
CHECK_CUDA_NO_ERROR();
int radius = radius_factor * sigma_xy + 0.5f;
CUDA_AUTO_TUNE_2D(
BilateralFilterCUDAKernel,
32, 32,
inv_depth_map_out->width(), inv_depth_map_out->height(),
0, stream,
/* kernel parameters */
2.0f * sigma_xy * sigma_xy,
2.0f * sigma_value * sigma_value,
radius,
radius * radius,
inv_depth_map,
*inv_depth_map_out);
CHECK_CUDA_NO_ERROR();
}
__global__ void FillHolesCUDAKernel(
CUDABuffer_<float> inv_depth_map,
CUDABuffer_<float> inv_depth_map_out) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
const float kInvalidInvDepth = 0; // TODO: De-duplicate with above
if (x < inv_depth_map_out.width() && y < inv_depth_map_out.height()) {
const float center_inv_depth = inv_depth_map(y, x);
if (center_inv_depth != kInvalidInvDepth ||
x < 1 ||
y < 1 ||
x >= inv_depth_map.width() - 1 ||
y >= inv_depth_map.height() - 1) {
inv_depth_map_out(y, x) = center_inv_depth;
return;
}
// Get the average depth of the neighbor pixels.
float sum = 0;
int count = 0;
#pragma unroll
for (int dy = -1; dy <= 1; ++ dy) {
#pragma unroll
for (int dx = -1; dx <= 1; ++ dx) {
if (dx == 0 && dy == 0) {
continue;
}
float inv_depth = inv_depth_map(y + dy, x + dx);
if (inv_depth != kInvalidInvDepth) {
sum += inv_depth;
++ count;
}
}
}
float avg_inv_depth = sum / count;
// Fill in this pixel if there are at least a minimum number of valid
// neighbor pixels nearby which have similar depth.
constexpr float kSimilarDepthFactorThreshold = 1.01f; // TODO: Make parameter
constexpr int kMinSimilarPixelsForFillIn = 6; // TODO: Make parameter
sum = 0;
count = 0;
#pragma unroll
for (int dy = -1; dy <= 1; ++ dy) {
#pragma unroll
for (int dx = -1; dx <= 1; ++ dx) {
if (dx == 0 && dy == 0) {
continue;
}
float inv_depth = inv_depth_map(y + dy, x + dx);
if (inv_depth != kInvalidInvDepth) {
float factor = inv_depth / avg_inv_depth;
if (factor < 1) {
factor = 1 / factor;
}
if (factor <= kSimilarDepthFactorThreshold) {
sum += inv_depth;
++ count;
}
}
}
}
inv_depth_map_out(y, x) = (count >= kMinSimilarPixelsForFillIn) ? (sum / count) : kInvalidInvDepth;
}
}
void FillHolesCUDA(
hipStream_t stream,
const CUDABuffer_<float>& inv_depth_map,
CUDABuffer_<float>* inv_depth_map_out) {
CHECK_CUDA_NO_ERROR();
CUDA_AUTO_TUNE_2D(
FillHolesCUDAKernel,
32, 32,
inv_depth_map_out->width(), inv_depth_map_out->height(),
0, stream,
/* kernel parameters */
inv_depth_map,
*inv_depth_map_out);
CHECK_CUDA_NO_ERROR();
}
}
| 4875450df8e3dbed6aaea27f3698d7b2a751f348.cu | // Copyright 2017, 2019 ETH Zürich, Thomas Schöps
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "libvis/cuda/patch_match_stereo.cuh"
#include <math_constants.h>
#include "libvis/cuda/cuda_auto_tuner.h"
#include "libvis/cuda/cuda_unprojection_lookup.cuh"
#include "libvis/cuda/cuda_util.cuh"
#include "libvis/cuda/cuda_util.h"
#include "libvis/cuda/patch_match_stereo_cost.cuh"
namespace vis {
__global__ void MedianFilterDepthMap3x3CUDAKernel(
int context_radius,
CUDABuffer_<float> inv_depth_map,
CUDABuffer_<float> inv_depth_map_out,
CUDABuffer_<float> costs,
CUDABuffer_<float> costs_out,
CUDABuffer_<float> second_best_costs,
CUDABuffer_<float> second_best_costs_out) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
const float kInvalidInvDepth = 0; // TODO: De-duplicate with above
if (x >= context_radius && y >= context_radius &&
x < inv_depth_map.width() - context_radius && y < inv_depth_map.height() - context_radius) {
// Collect valid depth values of 3x3 neighborhood
int count = 1;
float inv_depths[9];
float cost[9];
float second_best_cost[9];
inv_depths[0] = inv_depth_map(y, x);
if (inv_depths[0] == kInvalidInvDepth) {
inv_depth_map_out(y, x) = kInvalidInvDepth;
costs_out(y, x) = CUDART_NAN_F;
second_best_costs_out(y, x) = CUDART_NAN_F;
return;
}
cost[0] = costs(y, x);
second_best_cost[0] = second_best_costs(y, x);
#pragma unroll
for (int dy = -1; dy <= 1; ++ dy) {
if (y + dy < context_radius || y + dy >= inv_depth_map.height() - context_radius) {
continue;
}
#pragma unroll
for (int dx = -1; dx <= 1; ++ dx) {
if (dy == 0 && dx == 0) {
continue;
}
if (x + dx < context_radius || x + dx >= inv_depth_map.width() - context_radius) {
continue;
}
float inv_depth = inv_depth_map(y + dy, x + dx);
if (inv_depth != kInvalidInvDepth) {
inv_depths[count] = inv_depth;
cost[count] = costs(y + dy, x + dx);
second_best_cost[count] = second_best_costs(y + dy, x + dx);
++ count;
}
}
}
// Sort depth values up to the middle of the maximum count
for (int i = 0; i <= 4; ++ i) {
for (int k = i + 1; k < 9; ++ k) {
if (k < count && inv_depths[i] > inv_depths[k]) {
// Swap.
float temp = inv_depths[i];
inv_depths[i] = inv_depths[k];
inv_depths[k] = temp;
temp = cost[i];
cost[i] = cost[k];
cost[k] = temp;
temp = second_best_cost[i];
second_best_cost[i] = second_best_cost[k];
second_best_cost[k] = temp;
}
}
}
// Assign the median
if (count % 2 == 1) {
inv_depth_map_out(y, x) = inv_depths[count / 2];
costs_out(y, x) = cost[count / 2];
second_best_costs_out(y, x) = second_best_cost[count / 2];
} else {
// For disambiguation in the even-count case, use the value which is
// closer to the average of the two middle values.
float average = 0.5f * (inv_depths[count / 2 - 1] + inv_depths[count / 2]);
if (fabs(average - inv_depths[count / 2 - 1]) <
fabs(average - inv_depths[count / 2])) {
inv_depth_map_out(y, x) = inv_depths[count / 2 - 1];
costs_out(y, x) = cost[count / 2 - 1];
second_best_costs_out(y, x) = second_best_cost[count / 2 - 1];
} else {
inv_depth_map_out(y, x) = inv_depths[count / 2];
costs_out(y, x) = cost[count / 2];
second_best_costs_out(y, x) = second_best_cost[count / 2];
}
}
} else if (x < inv_depth_map_out.width() && y < inv_depth_map_out.height()) {
inv_depth_map_out(y, x) = kInvalidInvDepth;
costs_out(y, x) = CUDART_NAN_F;
second_best_costs_out(y, x) = CUDART_NAN_F;
}
}
void MedianFilterDepthMap3x3CUDA(
cudaStream_t stream,
int context_radius,
CUDABuffer_<float>* inv_depth_map,
CUDABuffer_<float>* inv_depth_map_out,
CUDABuffer_<float>* costs,
CUDABuffer_<float>* costs_out,
CUDABuffer_<float>* second_best_costs,
CUDABuffer_<float>* second_best_costs_out) {
CHECK_CUDA_NO_ERROR();
CUDA_AUTO_TUNE_2D(
MedianFilterDepthMap3x3CUDAKernel,
32, 32,
inv_depth_map->width(), inv_depth_map->height(),
0, stream,
/* kernel parameters */
context_radius,
*inv_depth_map,
*inv_depth_map_out,
*costs,
*costs_out,
*second_best_costs,
*second_best_costs_out);
CHECK_CUDA_NO_ERROR();
}
__global__ void BilateralFilterCUDAKernel(
float denom_xy,
float denom_value,
int radius,
int radius_squared,
CUDABuffer_<float> inv_depth_map,
CUDABuffer_<float> inv_depth_map_out) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
const float kInvalidInvDepth = 0; // TODO: De-duplicate with above
if (x < inv_depth_map_out.width() && y < inv_depth_map_out.height()) {
const float center_value = inv_depth_map(y, x);
if (center_value == kInvalidInvDepth) {
inv_depth_map_out(y, x) = kInvalidInvDepth;
return;
}
// Bilateral filtering.
float sum = 0;
float weight = 0;
const int min_y = max(static_cast<int>(0), static_cast<int>(y - radius));
const int max_y = min(static_cast<int>(inv_depth_map_out.height() - 1), static_cast<int>(y + radius));
for (int sample_y = min_y; sample_y <= max_y; ++ sample_y) {
const int dy = sample_y - y;
const int min_x = max(static_cast<int>(0), static_cast<int>(x - radius));
const int max_x = min(static_cast<int>(inv_depth_map_out.width() - 1), static_cast<int>(x + radius));
for (int sample_x = min_x; sample_x <= max_x; ++ sample_x) {
const int dx = sample_x - x;
const int grid_distance_squared = dx * dx + dy * dy;
if (grid_distance_squared > radius_squared) {
continue;
}
const float sample = inv_depth_map(sample_y, sample_x);
if (sample == kInvalidInvDepth) {
continue;
}
float value_distance_squared = center_value - sample;
value_distance_squared *= value_distance_squared;
float w = exp(-grid_distance_squared / denom_xy + -value_distance_squared / denom_value);
sum += w * sample;
weight += w;
}
}
inv_depth_map_out(y, x) = (weight == 0) ? kInvalidInvDepth : (sum / weight);
}
}
void BilateralFilterCUDA(
cudaStream_t stream,
float sigma_xy,
float sigma_value,
float radius_factor,
const CUDABuffer_<float>& inv_depth_map,
CUDABuffer_<float>* inv_depth_map_out) {
CHECK_CUDA_NO_ERROR();
int radius = radius_factor * sigma_xy + 0.5f;
CUDA_AUTO_TUNE_2D(
BilateralFilterCUDAKernel,
32, 32,
inv_depth_map_out->width(), inv_depth_map_out->height(),
0, stream,
/* kernel parameters */
2.0f * sigma_xy * sigma_xy,
2.0f * sigma_value * sigma_value,
radius,
radius * radius,
inv_depth_map,
*inv_depth_map_out);
CHECK_CUDA_NO_ERROR();
}
__global__ void FillHolesCUDAKernel(
CUDABuffer_<float> inv_depth_map,
CUDABuffer_<float> inv_depth_map_out) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
const float kInvalidInvDepth = 0; // TODO: De-duplicate with above
if (x < inv_depth_map_out.width() && y < inv_depth_map_out.height()) {
const float center_inv_depth = inv_depth_map(y, x);
if (center_inv_depth != kInvalidInvDepth ||
x < 1 ||
y < 1 ||
x >= inv_depth_map.width() - 1 ||
y >= inv_depth_map.height() - 1) {
inv_depth_map_out(y, x) = center_inv_depth;
return;
}
// Get the average depth of the neighbor pixels.
float sum = 0;
int count = 0;
#pragma unroll
for (int dy = -1; dy <= 1; ++ dy) {
#pragma unroll
for (int dx = -1; dx <= 1; ++ dx) {
if (dx == 0 && dy == 0) {
continue;
}
float inv_depth = inv_depth_map(y + dy, x + dx);
if (inv_depth != kInvalidInvDepth) {
sum += inv_depth;
++ count;
}
}
}
float avg_inv_depth = sum / count;
// Fill in this pixel if there are at least a minimum number of valid
// neighbor pixels nearby which have similar depth.
constexpr float kSimilarDepthFactorThreshold = 1.01f; // TODO: Make parameter
constexpr int kMinSimilarPixelsForFillIn = 6; // TODO: Make parameter
sum = 0;
count = 0;
#pragma unroll
for (int dy = -1; dy <= 1; ++ dy) {
#pragma unroll
for (int dx = -1; dx <= 1; ++ dx) {
if (dx == 0 && dy == 0) {
continue;
}
float inv_depth = inv_depth_map(y + dy, x + dx);
if (inv_depth != kInvalidInvDepth) {
float factor = inv_depth / avg_inv_depth;
if (factor < 1) {
factor = 1 / factor;
}
if (factor <= kSimilarDepthFactorThreshold) {
sum += inv_depth;
++ count;
}
}
}
}
inv_depth_map_out(y, x) = (count >= kMinSimilarPixelsForFillIn) ? (sum / count) : kInvalidInvDepth;
}
}
void FillHolesCUDA(
cudaStream_t stream,
const CUDABuffer_<float>& inv_depth_map,
CUDABuffer_<float>* inv_depth_map_out) {
CHECK_CUDA_NO_ERROR();
CUDA_AUTO_TUNE_2D(
FillHolesCUDAKernel,
32, 32,
inv_depth_map_out->width(), inv_depth_map_out->height(),
0, stream,
/* kernel parameters */
inv_depth_map,
*inv_depth_map_out);
CHECK_CUDA_NO_ERROR();
}
}
|
c87b9d1bd6e7c466039a11a42171a28e7ca9a46e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define RADIUS 3
#define BLOCK_SIZE 256
#define NUM_ELEMENTS (4096*2)
__global__ void stencil_1d_simple(int *in, int *out)
{
// compute this thread's global index
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x + RADIUS;
int alpha = 1;
int beta = 1;
if(i < NUM_ELEMENTS + RADIUS ){
/* FIX ME #1 */
}
}
__global__ void stencil_1d_improved(int *in, int *out)
{
__shared__ int temp[BLOCK_SIZE]; /* FIXME #2*/
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) ; /* FIXME #3*/
int lindex = threadIdx.x ; /* FIXME #4 */
// Read input elements into shared memory
temp[lindex] = in[gindex];
//Load ghost cells (halos)
if (threadIdx.x < RADIUS)
{
/* FIXME #5 */
}
// Make sure all threads get to this point before proceeding!
/* FIXME #6 */
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex] = result;
}
int main()
{
unsigned int i;
int N = NUM_ELEMENTS + 2 * RADIUS;
int h_in[N], h_out[N];
int *d_in, *d_out;
// Initialize host data
for( i = 0; i < (N); ++i )
h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7
// Allocate space on the device
hipMalloc( &d_in, N * sizeof(int)) ;
hipMalloc( &d_out, N * sizeof(int)) ;
// Copy input data to device
hipMemcpy( d_in, h_in, N * sizeof(int), hipMemcpyHostToDevice) ;
hipLaunchKernelGGL(( stencil_1d_simple), dim3((NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, d_in, d_out);
//stencil_1d_improved<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out);
hipMemcpy( h_out, d_out, N * sizeof(int), hipMemcpyDeviceToHost) ;
// Verify every out value is 7
for( i = RADIUS; i < NUM_ELEMENTS+RADIUS; ++i )
if (h_out[i] != RADIUS*2+1)
{
printf("Element h_out[%d] == %d != 7\n", i, h_out[i]);
break;
}
if (i == NUM_ELEMENTS+RADIUS)
printf("SUCCESS!\n");
// Free out memory
hipFree(d_in);
hipFree(d_out);
return 0;
}
| c87b9d1bd6e7c466039a11a42171a28e7ca9a46e.cu | #include <stdio.h>
#define RADIUS 3
#define BLOCK_SIZE 256
#define NUM_ELEMENTS (4096*2)
__global__ void stencil_1d_simple(int *in, int *out)
{
// compute this thread's global index
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x + RADIUS;
int alpha = 1;
int beta = 1;
if(i < NUM_ELEMENTS + RADIUS ){
/* FIX ME #1 */
}
}
__global__ void stencil_1d_improved(int *in, int *out)
{
__shared__ int temp[BLOCK_SIZE]; /* FIXME #2*/
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) ; /* FIXME #3*/
int lindex = threadIdx.x ; /* FIXME #4 */
// Read input elements into shared memory
temp[lindex] = in[gindex];
//Load ghost cells (halos)
if (threadIdx.x < RADIUS)
{
/* FIXME #5 */
}
// Make sure all threads get to this point before proceeding!
/* FIXME #6 */
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex] = result;
}
int main()
{
unsigned int i;
int N = NUM_ELEMENTS + 2 * RADIUS;
int h_in[N], h_out[N];
int *d_in, *d_out;
// Initialize host data
for( i = 0; i < (N); ++i )
h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7
// Allocate space on the device
cudaMalloc( &d_in, N * sizeof(int)) ;
cudaMalloc( &d_out, N * sizeof(int)) ;
// Copy input data to device
cudaMemcpy( d_in, h_in, N * sizeof(int), cudaMemcpyHostToDevice) ;
stencil_1d_simple<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out);
//stencil_1d_improved<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out);
cudaMemcpy( h_out, d_out, N * sizeof(int), cudaMemcpyDeviceToHost) ;
// Verify every out value is 7
for( i = RADIUS; i < NUM_ELEMENTS+RADIUS; ++i )
if (h_out[i] != RADIUS*2+1)
{
printf("Element h_out[%d] == %d != 7\n", i, h_out[i]);
break;
}
if (i == NUM_ELEMENTS+RADIUS)
printf("SUCCESS!\n");
// Free out memory
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
e24ecaa96ddf107aa1c859b2e9c54359bf296b1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "flamegpu/runtime/messaging/MessageArray2D.h"
#include "flamegpu/model/AgentDescription.h" // Used by Move-Assign
#include "flamegpu/simulation/detail/CUDAMessage.h"
#include "flamegpu/simulation/detail/CUDAScatter.cuh"
#include "flamegpu/runtime/messaging/MessageArray2D/MessageArray2DHost.h"
// #include "flamegpu/runtime/messaging/MessageArray2D/MessageArray2DDevice.cuh"
#include "flamegpu/detail/cuda.cuh"
namespace flamegpu {
/**
* Constructor
* Allocates memory on device for message list length
* @param a Parent CUDAMessage, used to access message settings, data ptrs etc
*/
MessageArray2D::CUDAModelHandler::CUDAModelHandler(detail::CUDAMessage &a)
: MessageSpecialisationHandler()
, d_metadata(nullptr)
, sim_message(a)
, d_write_flag(nullptr)
, d_write_flag_len(0) {
const Data& d = static_cast<const Data &>(a.getMessageData());
memcpy(&hd_metadata.dimensions, d.dimensions.data(), d.dimensions.size() * sizeof(unsigned int));
hd_metadata.length = d.dimensions[0] * d.dimensions[1];
}
void MessageArray2D::CUDAModelHandler::init(detail::CUDAScatter &scatter, unsigned int streamId, hipStream_t stream) {
allocateMetaDataDevicePtr(stream);
// Allocate messages
this->sim_message.resize(hd_metadata.length, scatter, stream, streamId);
this->sim_message.setMessageCount(hd_metadata.length);
// Zero the output arrays
auto &read_list = this->sim_message.getReadList();
auto &write_list = this->sim_message.getWriteList();
for (auto &var : this->sim_message.getMessageData().variables) {
// Elements is harmless, futureproof for arrays support
// hd_metadata.length is used, as message array can be longer than message count
gpuErrchk(hipMemsetAsync(write_list.at(var.first), 0, var.second.type_size * var.second.elements * hd_metadata.length));
gpuErrchk(hipMemsetAsync(read_list.at(var.first), 0, var.second.type_size * var.second.elements * hd_metadata.length));
}
gpuErrchk(hipStreamSynchronize(stream));
}
void MessageArray2D::CUDAModelHandler::allocateMetaDataDevicePtr(hipStream_t stream) {
if (d_metadata == nullptr) {
gpuErrchk(hipMalloc(&d_metadata, sizeof(MetaData)));
gpuErrchk(hipMemcpyAsync(d_metadata, &hd_metadata, sizeof(MetaData), hipMemcpyHostToDevice));
gpuErrchk(hipStreamSynchronize(stream));
}
}
void MessageArray2D::CUDAModelHandler::freeMetaDataDevicePtr() {
if (d_metadata != nullptr) {
gpuErrchk(flamegpu::detail::cuda::hipFree(d_metadata));
}
d_metadata = nullptr;
if (d_write_flag) {
gpuErrchk(flamegpu::detail::cuda::hipFree(d_write_flag));
}
d_write_flag = nullptr;
d_write_flag_len = 0;
}
void MessageArray2D::CUDAModelHandler::buildIndex(detail::CUDAScatter &scatter, unsigned int streamId, hipStream_t stream) {
const unsigned int MESSAGE_COUNT = this->sim_message.getMessageCount();
// Zero the output arrays
auto &read_list = this->sim_message.getReadList();
auto &write_list = this->sim_message.getWriteList();
for (auto &var : this->sim_message.getMessageData().variables) {
// Elements is harmless, futureproof for arrays support
// hd_metadata.length is used, as message array can be longer than message count
gpuErrchk(hipMemsetAsync(write_list.at(var.first), 0, var.second.type_size * var.second.elements * hd_metadata.length, stream));
}
// Reorder messages
unsigned int *t_d_write_flag = nullptr;
if (MESSAGE_COUNT > hd_metadata.length) {
// Use internal memory for d_write_flag
if (d_write_flag_len < MESSAGE_COUNT) {
// Increase length
if (d_write_flag) {
gpuErrchk(flamegpu::detail::cuda::hipFree(d_write_flag));
}
d_write_flag_len = static_cast<unsigned int>(MESSAGE_COUNT * 1.1f);
gpuErrchk(hipMalloc(&d_write_flag, sizeof(unsigned int) * d_write_flag_len));
}
t_d_write_flag = d_write_flag;
}
scatter.arrayMessageReorder(streamId, stream, this->sim_message.getMessageData().variables, read_list, write_list, MESSAGE_COUNT, hd_metadata.length, t_d_write_flag);
this->sim_message.swap();
// Reset message count back to full array length
// Array message exposes not output messages as 0
if (MESSAGE_COUNT != hd_metadata.length)
this->sim_message.setMessageCount(hd_metadata.length);
// Detect errors
// TODO
gpuErrchk(hipStreamSynchronize(stream)); // Redundant: Array msg reorder has a sync
}
/// <summary>
/// CDescription
/// </summary>
MessageArray2D::CDescription::CDescription(std::shared_ptr<Data> data)
: MessageBruteForce::CDescription(std::move(std::static_pointer_cast<MessageBruteForce::Data>(data))) { }
MessageArray2D::CDescription::CDescription(std::shared_ptr<const Data> data)
: CDescription(std::move(std::const_pointer_cast<Data>(data))) { }
bool MessageArray2D::CDescription::operator==(const CDescription& rhs) const {
return *this->message == *rhs.message; // Compare content is functionally the same
}
bool MessageArray2D::CDescription::operator!=(const CDescription& rhs) const {
return !(*this == rhs);
}
/**
* Const accessors
*/
std::array<flamegpu::size_type, 2> MessageArray2D::CDescription::getDimensions() const {
return std::static_pointer_cast<Data>(message)->dimensions;
}
flamegpu::size_type MessageArray2D::CDescription::getDimX() const {
return std::static_pointer_cast<Data>(message)->dimensions[0];
}
flamegpu::size_type MessageArray2D::CDescription::getDimY() const {
return std::static_pointer_cast<Data>(message)->dimensions[1];
}
/// <summary>
/// Description
/// </summary>
MessageArray2D::Description::Description(std::shared_ptr<Data> data)
: CDescription(data) { }
/**
* Accessors
*/
void MessageArray2D::Description::setDimensions(const size_type len_x, const size_type len_y) {
setDimensions({ len_x , len_y });
}
void MessageArray2D::Description::setDimensions(const std::array<size_type, 2>& dims) {
if (dims[0] == 0 || dims[1] == 0) {
THROW exception::InvalidArgument("All dimensions must be above zero in array2D message.\n");
}
std::static_pointer_cast<Data>(message)->dimensions = dims;
}
/// <summary>
/// Data
/// </summary>
MessageArray2D::Data::Data(std::shared_ptr<const ModelData> model, const std::string &message_name)
: MessageBruteForce::Data(model, message_name)
, dimensions({ 0, 0 }) {
variables.emplace("___INDEX", Variable(1, size_type()));
}
MessageArray2D::Data::Data(std::shared_ptr<const ModelData> model, const Data &other)
: MessageBruteForce::Data(model, other)
, dimensions(other.dimensions) {
if (dimensions[0] == 0 || dimensions[1] == 0) {
THROW exception::InvalidMessage("All dimensions must be ABOVE zero in array2D message '%s'\n", other.name.c_str());
}
}
MessageArray2D::Data *MessageArray2D::Data::clone(const std::shared_ptr<const ModelData> &newParent) {
return new Data(newParent, *this);
}
std::unique_ptr<MessageSpecialisationHandler> MessageArray2D::Data::getSpecialisationHander(detail::CUDAMessage &owner) const {
return std::unique_ptr<MessageSpecialisationHandler>(new CUDAModelHandler(owner));
}
std::type_index MessageArray2D::Data::getType() const { return std::type_index(typeid(MessageArray2D)); }
} // namespace flamegpu
| e24ecaa96ddf107aa1c859b2e9c54359bf296b1a.cu | #include "flamegpu/runtime/messaging/MessageArray2D.h"
#include "flamegpu/model/AgentDescription.h" // Used by Move-Assign
#include "flamegpu/simulation/detail/CUDAMessage.h"
#include "flamegpu/simulation/detail/CUDAScatter.cuh"
#include "flamegpu/runtime/messaging/MessageArray2D/MessageArray2DHost.h"
// #include "flamegpu/runtime/messaging/MessageArray2D/MessageArray2DDevice.cuh"
#include "flamegpu/detail/cuda.cuh"
namespace flamegpu {
/**
* Constructor
* Allocates memory on device for message list length
* @param a Parent CUDAMessage, used to access message settings, data ptrs etc
*/
MessageArray2D::CUDAModelHandler::CUDAModelHandler(detail::CUDAMessage &a)
: MessageSpecialisationHandler()
, d_metadata(nullptr)
, sim_message(a)
, d_write_flag(nullptr)
, d_write_flag_len(0) {
const Data& d = static_cast<const Data &>(a.getMessageData());
memcpy(&hd_metadata.dimensions, d.dimensions.data(), d.dimensions.size() * sizeof(unsigned int));
hd_metadata.length = d.dimensions[0] * d.dimensions[1];
}
void MessageArray2D::CUDAModelHandler::init(detail::CUDAScatter &scatter, unsigned int streamId, cudaStream_t stream) {
allocateMetaDataDevicePtr(stream);
// Allocate messages
this->sim_message.resize(hd_metadata.length, scatter, stream, streamId);
this->sim_message.setMessageCount(hd_metadata.length);
// Zero the output arrays
auto &read_list = this->sim_message.getReadList();
auto &write_list = this->sim_message.getWriteList();
for (auto &var : this->sim_message.getMessageData().variables) {
// Elements is harmless, futureproof for arrays support
// hd_metadata.length is used, as message array can be longer than message count
gpuErrchk(cudaMemsetAsync(write_list.at(var.first), 0, var.second.type_size * var.second.elements * hd_metadata.length));
gpuErrchk(cudaMemsetAsync(read_list.at(var.first), 0, var.second.type_size * var.second.elements * hd_metadata.length));
}
gpuErrchk(cudaStreamSynchronize(stream));
}
void MessageArray2D::CUDAModelHandler::allocateMetaDataDevicePtr(cudaStream_t stream) {
if (d_metadata == nullptr) {
gpuErrchk(cudaMalloc(&d_metadata, sizeof(MetaData)));
gpuErrchk(cudaMemcpyAsync(d_metadata, &hd_metadata, sizeof(MetaData), cudaMemcpyHostToDevice));
gpuErrchk(cudaStreamSynchronize(stream));
}
}
void MessageArray2D::CUDAModelHandler::freeMetaDataDevicePtr() {
if (d_metadata != nullptr) {
gpuErrchk(flamegpu::detail::cuda::cudaFree(d_metadata));
}
d_metadata = nullptr;
if (d_write_flag) {
gpuErrchk(flamegpu::detail::cuda::cudaFree(d_write_flag));
}
d_write_flag = nullptr;
d_write_flag_len = 0;
}
void MessageArray2D::CUDAModelHandler::buildIndex(detail::CUDAScatter &scatter, unsigned int streamId, cudaStream_t stream) {
const unsigned int MESSAGE_COUNT = this->sim_message.getMessageCount();
// Zero the output arrays
auto &read_list = this->sim_message.getReadList();
auto &write_list = this->sim_message.getWriteList();
for (auto &var : this->sim_message.getMessageData().variables) {
// Elements is harmless, futureproof for arrays support
// hd_metadata.length is used, as message array can be longer than message count
gpuErrchk(cudaMemsetAsync(write_list.at(var.first), 0, var.second.type_size * var.second.elements * hd_metadata.length, stream));
}
// Reorder messages
unsigned int *t_d_write_flag = nullptr;
if (MESSAGE_COUNT > hd_metadata.length) {
// Use internal memory for d_write_flag
if (d_write_flag_len < MESSAGE_COUNT) {
// Increase length
if (d_write_flag) {
gpuErrchk(flamegpu::detail::cuda::cudaFree(d_write_flag));
}
d_write_flag_len = static_cast<unsigned int>(MESSAGE_COUNT * 1.1f);
gpuErrchk(cudaMalloc(&d_write_flag, sizeof(unsigned int) * d_write_flag_len));
}
t_d_write_flag = d_write_flag;
}
scatter.arrayMessageReorder(streamId, stream, this->sim_message.getMessageData().variables, read_list, write_list, MESSAGE_COUNT, hd_metadata.length, t_d_write_flag);
this->sim_message.swap();
// Reset message count back to full array length
// Array message exposes not output messages as 0
if (MESSAGE_COUNT != hd_metadata.length)
this->sim_message.setMessageCount(hd_metadata.length);
// Detect errors
// TODO
gpuErrchk(cudaStreamSynchronize(stream)); // Redundant: Array msg reorder has a sync
}
/// <summary>
/// CDescription
/// </summary>
MessageArray2D::CDescription::CDescription(std::shared_ptr<Data> data)
: MessageBruteForce::CDescription(std::move(std::static_pointer_cast<MessageBruteForce::Data>(data))) { }
MessageArray2D::CDescription::CDescription(std::shared_ptr<const Data> data)
: CDescription(std::move(std::const_pointer_cast<Data>(data))) { }
bool MessageArray2D::CDescription::operator==(const CDescription& rhs) const {
return *this->message == *rhs.message; // Compare content is functionally the same
}
bool MessageArray2D::CDescription::operator!=(const CDescription& rhs) const {
return !(*this == rhs);
}
/**
* Const accessors
*/
std::array<flamegpu::size_type, 2> MessageArray2D::CDescription::getDimensions() const {
return std::static_pointer_cast<Data>(message)->dimensions;
}
flamegpu::size_type MessageArray2D::CDescription::getDimX() const {
return std::static_pointer_cast<Data>(message)->dimensions[0];
}
flamegpu::size_type MessageArray2D::CDescription::getDimY() const {
return std::static_pointer_cast<Data>(message)->dimensions[1];
}
/// <summary>
/// Description
/// </summary>
MessageArray2D::Description::Description(std::shared_ptr<Data> data)
: CDescription(data) { }
/**
* Accessors
*/
void MessageArray2D::Description::setDimensions(const size_type len_x, const size_type len_y) {
setDimensions({ len_x , len_y });
}
void MessageArray2D::Description::setDimensions(const std::array<size_type, 2>& dims) {
if (dims[0] == 0 || dims[1] == 0) {
THROW exception::InvalidArgument("All dimensions must be above zero in array2D message.\n");
}
std::static_pointer_cast<Data>(message)->dimensions = dims;
}
/// <summary>
/// Data
/// </summary>
MessageArray2D::Data::Data(std::shared_ptr<const ModelData> model, const std::string &message_name)
: MessageBruteForce::Data(model, message_name)
, dimensions({ 0, 0 }) {
variables.emplace("___INDEX", Variable(1, size_type()));
}
MessageArray2D::Data::Data(std::shared_ptr<const ModelData> model, const Data &other)
: MessageBruteForce::Data(model, other)
, dimensions(other.dimensions) {
if (dimensions[0] == 0 || dimensions[1] == 0) {
THROW exception::InvalidMessage("All dimensions must be ABOVE zero in array2D message '%s'\n", other.name.c_str());
}
}
MessageArray2D::Data *MessageArray2D::Data::clone(const std::shared_ptr<const ModelData> &newParent) {
return new Data(newParent, *this);
}
std::unique_ptr<MessageSpecialisationHandler> MessageArray2D::Data::getSpecialisationHander(detail::CUDAMessage &owner) const {
return std::unique_ptr<MessageSpecialisationHandler>(new CUDAModelHandler(owner));
}
std::type_index MessageArray2D::Data::getType() const { return std::type_index(typeid(MessageArray2D)); }
} // namespace flamegpu
|
45a32d38b9f9645b0a389f1808eea3031b4e7cb5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "max_subsampling_2d_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../max_subsampling_layer.h"
#include "../nn_types.h"
struct __align__(4) window_x_x_config
{
window_x_x_config(int window_x, int x)
{
this->window_x_x_pair = (((unsigned int)window_x) << 16) | (unsigned int)x;
}
unsigned int window_x_x_pair;
};
struct __align__(4) y_feature_map_config
{
y_feature_map_config(int y, int feature_map_id)
{
this->y_feature_map_id_pair = (((unsigned int)y) << 16) | (unsigned int)feature_map_id;
}
unsigned int y_feature_map_id_pair;
};
extern __shared__ float arr_sh[];
#define FEATURE_MAP_BLOCK_SIZE 4
__global__ void max_subsampling_2d_tex_kernel(
float * __restrict output,
const float * __restrict input,
const window_x_x_config * __restrict window_x_x_config_list,
const y_feature_map_config * __restrict y_feature_map_config_list,
int subsampling_width,
int subsampling_height,
int input_width,
int input_height,
int output_width,
int output_height,
int feature_map_count,
int entry_count,
int window_x_x_config_count,
int y_feature_map_config_count,
int input_neuron_count,
int output_neuron_count,
int input_neuron_count_per_feature_map,
int output_neuron_count_per_feature_map,
int threadblock_size)
{
int window_x_x_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
int local_thread_id = (threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
float * vals = arr_sh;
bool in_bounds = (entry_id < entry_count) && (window_x_x_config_id < window_x_x_config_count) && (feature_map_config_id < y_feature_map_config_count);
float res[FEATURE_MAP_BLOCK_SIZE];
int window_x;
int output_x;
int output_y;
int base_feature_map_id;
bool item_valid[FEATURE_MAP_BLOCK_SIZE - 1];
if (in_bounds)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
res[i] = -1.0e37F;
window_x_x_config wxx = window_x_x_config_list[window_x_x_config_id];
output_x = wxx.window_x_x_pair & 0xFFFF;
window_x = wxx.window_x_x_pair >> 16;
y_feature_map_config yfm = y_feature_map_config_list[feature_map_config_id];
base_feature_map_id = yfm.y_feature_map_id_pair & 0xFFFF;
output_y = yfm.y_feature_map_id_pair >> 16;
#pragma unroll
for(int i = 1; i < FEATURE_MAP_BLOCK_SIZE; ++i)
item_valid[i - 1] = (base_feature_map_id + i < feature_map_count);
int input_x = output_x * subsampling_width + window_x;
int input_y = output_y * subsampling_height;
int current_input_elem_id[FEATURE_MAP_BLOCK_SIZE];
current_input_elem_id[0] = entry_id * input_neuron_count + base_feature_map_id * input_neuron_count_per_feature_map + input_y * input_width + input_x;
#pragma unroll
for(int i = 1; i < FEATURE_MAP_BLOCK_SIZE; ++i)
current_input_elem_id[i] = current_input_elem_id[i - 1] + input_neuron_count_per_feature_map;
res[0] = input[current_input_elem_id[0]];
#pragma unroll
for(int i = 1; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (item_valid[i - 1])
res[i] = input[current_input_elem_id[i]];
for(int j = 1; j < subsampling_height; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
current_input_elem_id[i] += input_width;
float new_val[FEATURE_MAP_BLOCK_SIZE];
new_val[0] = input[current_input_elem_id[0]];
#pragma unroll
for(int i = 1; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (item_valid[i - 1])
new_val[i] = input[current_input_elem_id[i]];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
res[i] = max(res[i], new_val[i]);
}
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
vals[local_thread_id + threadblock_size * i] = res[i];
}
__syncthreads();
if (in_bounds && (window_x == 0))
{
for(int j = 1; j < subsampling_width; ++j)
{
local_thread_id++;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
res[i] = max(res[i], vals[local_thread_id + threadblock_size * i]);
}
int output_offset = entry_id * output_neuron_count + base_feature_map_id * output_neuron_count_per_feature_map + output_y * output_width + output_x;
output[output_offset] = res[0];
#pragma unroll
for(int i = 1; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
output_offset += output_neuron_count_per_feature_map;
if (item_valid[i - 1])
output[output_offset] = res[i];
}
}
}
namespace nnforge
{
namespace cuda
{
max_subsampling_2d_layer_tester_cuda::max_subsampling_2d_layer_tester_cuda()
{
}
max_subsampling_2d_layer_tester_cuda::~max_subsampling_2d_layer_tester_cuda()
{
}
void max_subsampling_2d_layer_tester_cuda::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
const float * input = *input_buffer;
float * output = *additional_buffers[0];
int window_x_x_config_count = subsampling_sizes[0] * output_configuration_specific.dimension_sizes[0];
const window_x_x_config * window_x_x_config_list = static_cast<const window_x_x_config *>((const void *)*additional_buffers[1]);
int y_feature_map_config_count = output_configuration_specific.dimension_sizes[1] * feature_map_block_count;
const y_feature_map_config * y_feature_map_config_list = static_cast<const y_feature_map_config *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
window_x_x_config_count,
y_feature_map_config_count,
entry_count,
subsampling_sizes[0]);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * sizeof(float) * FEATURE_MAP_BLOCK_SIZE;
hipLaunchKernelGGL(( max_subsampling_2d_tex_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id,
output,
input,
window_x_x_config_list,
y_feature_map_config_list,
subsampling_sizes[0],
subsampling_sizes[1],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
output_configuration_specific.feature_map_count,
entry_count,
window_x_x_config_count,
y_feature_map_config_count,
input_elem_count_per_entry,
output_elem_count_per_entry,
input_elem_count_per_feature_map,
output_elem_count_per_feature_map,
threadblock_size);
}
std::vector<size_t> max_subsampling_2d_layer_tester_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
cuda_linear_buffer_device_smart_ptr max_subsampling_2d_layer_tester_cuda::get_output_buffer(
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers)
{
return additional_buffers[0];
}
void max_subsampling_2d_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const max_subsampling_layer> layer_derived = nnforge_dynamic_pointer_cast<const max_subsampling_layer>(layer_schema);
subsampling_sizes = layer_derived->subsampling_sizes;
feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
std::vector<size_t> max_subsampling_2d_layer_tester_cuda::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(window_x_x_config) * subsampling_sizes[0] * output_configuration_specific.dimension_sizes[0]);
res.push_back(sizeof(y_feature_map_config) * output_configuration_specific.dimension_sizes[1] * feature_map_block_count);
return res;
}
void max_subsampling_2d_layer_tester_cuda::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<window_x_x_config> task_list;
for(int x = 0; x < output_configuration_specific.dimension_sizes[0]; ++x)
for(int window_x = 0; window_x < subsampling_sizes[0]; ++window_x)
task_list.push_back(window_x_x_config(window_x, x));
cuda_safe_call(hipMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(window_x_x_config) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<y_feature_map_config> task_list;
for(int feature_map_block_id = 0; feature_map_block_id < feature_map_block_count; ++feature_map_block_id)
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
task_list.push_back(y_feature_map_config(y, feature_map_block_id * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(hipMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(y_feature_map_config) * task_list.size(), hipMemcpyHostToDevice));
}
}
}
}
| 45a32d38b9f9645b0a389f1808eea3031b4e7cb5.cu | /*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "max_subsampling_2d_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../max_subsampling_layer.h"
#include "../nn_types.h"
struct __align__(4) window_x_x_config
{
window_x_x_config(int window_x, int x)
{
this->window_x_x_pair = (((unsigned int)window_x) << 16) | (unsigned int)x;
}
unsigned int window_x_x_pair;
};
struct __align__(4) y_feature_map_config
{
y_feature_map_config(int y, int feature_map_id)
{
this->y_feature_map_id_pair = (((unsigned int)y) << 16) | (unsigned int)feature_map_id;
}
unsigned int y_feature_map_id_pair;
};
extern __shared__ float arr_sh[];
#define FEATURE_MAP_BLOCK_SIZE 4
__global__ void max_subsampling_2d_tex_kernel(
float * __restrict output,
const float * __restrict input,
const window_x_x_config * __restrict window_x_x_config_list,
const y_feature_map_config * __restrict y_feature_map_config_list,
int subsampling_width,
int subsampling_height,
int input_width,
int input_height,
int output_width,
int output_height,
int feature_map_count,
int entry_count,
int window_x_x_config_count,
int y_feature_map_config_count,
int input_neuron_count,
int output_neuron_count,
int input_neuron_count_per_feature_map,
int output_neuron_count_per_feature_map,
int threadblock_size)
{
int window_x_x_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
int local_thread_id = (threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
float * vals = arr_sh;
bool in_bounds = (entry_id < entry_count) && (window_x_x_config_id < window_x_x_config_count) && (feature_map_config_id < y_feature_map_config_count);
float res[FEATURE_MAP_BLOCK_SIZE];
int window_x;
int output_x;
int output_y;
int base_feature_map_id;
bool item_valid[FEATURE_MAP_BLOCK_SIZE - 1];
if (in_bounds)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
res[i] = -1.0e37F;
window_x_x_config wxx = window_x_x_config_list[window_x_x_config_id];
output_x = wxx.window_x_x_pair & 0xFFFF;
window_x = wxx.window_x_x_pair >> 16;
y_feature_map_config yfm = y_feature_map_config_list[feature_map_config_id];
base_feature_map_id = yfm.y_feature_map_id_pair & 0xFFFF;
output_y = yfm.y_feature_map_id_pair >> 16;
#pragma unroll
for(int i = 1; i < FEATURE_MAP_BLOCK_SIZE; ++i)
item_valid[i - 1] = (base_feature_map_id + i < feature_map_count);
int input_x = output_x * subsampling_width + window_x;
int input_y = output_y * subsampling_height;
int current_input_elem_id[FEATURE_MAP_BLOCK_SIZE];
current_input_elem_id[0] = entry_id * input_neuron_count + base_feature_map_id * input_neuron_count_per_feature_map + input_y * input_width + input_x;
#pragma unroll
for(int i = 1; i < FEATURE_MAP_BLOCK_SIZE; ++i)
current_input_elem_id[i] = current_input_elem_id[i - 1] + input_neuron_count_per_feature_map;
res[0] = input[current_input_elem_id[0]];
#pragma unroll
for(int i = 1; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (item_valid[i - 1])
res[i] = input[current_input_elem_id[i]];
for(int j = 1; j < subsampling_height; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
current_input_elem_id[i] += input_width;
float new_val[FEATURE_MAP_BLOCK_SIZE];
new_val[0] = input[current_input_elem_id[0]];
#pragma unroll
for(int i = 1; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (item_valid[i - 1])
new_val[i] = input[current_input_elem_id[i]];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
res[i] = max(res[i], new_val[i]);
}
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
vals[local_thread_id + threadblock_size * i] = res[i];
}
__syncthreads();
if (in_bounds && (window_x == 0))
{
for(int j = 1; j < subsampling_width; ++j)
{
local_thread_id++;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
res[i] = max(res[i], vals[local_thread_id + threadblock_size * i]);
}
int output_offset = entry_id * output_neuron_count + base_feature_map_id * output_neuron_count_per_feature_map + output_y * output_width + output_x;
output[output_offset] = res[0];
#pragma unroll
for(int i = 1; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
output_offset += output_neuron_count_per_feature_map;
if (item_valid[i - 1])
output[output_offset] = res[i];
}
}
}
namespace nnforge
{
namespace cuda
{
max_subsampling_2d_layer_tester_cuda::max_subsampling_2d_layer_tester_cuda()
{
}
max_subsampling_2d_layer_tester_cuda::~max_subsampling_2d_layer_tester_cuda()
{
}
void max_subsampling_2d_layer_tester_cuda::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
const float * input = *input_buffer;
float * output = *additional_buffers[0];
int window_x_x_config_count = subsampling_sizes[0] * output_configuration_specific.dimension_sizes[0];
const window_x_x_config * window_x_x_config_list = static_cast<const window_x_x_config *>((const void *)*additional_buffers[1]);
int y_feature_map_config_count = output_configuration_specific.dimension_sizes[1] * feature_map_block_count;
const y_feature_map_config * y_feature_map_config_list = static_cast<const y_feature_map_config *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
window_x_x_config_count,
y_feature_map_config_count,
entry_count,
subsampling_sizes[0]);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * sizeof(float) * FEATURE_MAP_BLOCK_SIZE;
max_subsampling_2d_tex_kernel<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>(
output,
input,
window_x_x_config_list,
y_feature_map_config_list,
subsampling_sizes[0],
subsampling_sizes[1],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
output_configuration_specific.feature_map_count,
entry_count,
window_x_x_config_count,
y_feature_map_config_count,
input_elem_count_per_entry,
output_elem_count_per_entry,
input_elem_count_per_feature_map,
output_elem_count_per_feature_map,
threadblock_size);
}
std::vector<size_t> max_subsampling_2d_layer_tester_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
cuda_linear_buffer_device_smart_ptr max_subsampling_2d_layer_tester_cuda::get_output_buffer(
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers)
{
return additional_buffers[0];
}
void max_subsampling_2d_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const max_subsampling_layer> layer_derived = nnforge_dynamic_pointer_cast<const max_subsampling_layer>(layer_schema);
subsampling_sizes = layer_derived->subsampling_sizes;
feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
std::vector<size_t> max_subsampling_2d_layer_tester_cuda::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(window_x_x_config) * subsampling_sizes[0] * output_configuration_specific.dimension_sizes[0]);
res.push_back(sizeof(y_feature_map_config) * output_configuration_specific.dimension_sizes[1] * feature_map_block_count);
return res;
}
void max_subsampling_2d_layer_tester_cuda::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<window_x_x_config> task_list;
for(int x = 0; x < output_configuration_specific.dimension_sizes[0]; ++x)
for(int window_x = 0; window_x < subsampling_sizes[0]; ++window_x)
task_list.push_back(window_x_x_config(window_x, x));
cuda_safe_call(cudaMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(window_x_x_config) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<y_feature_map_config> task_list;
for(int feature_map_block_id = 0; feature_map_block_id < feature_map_block_count; ++feature_map_block_id)
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
task_list.push_back(y_feature_map_config(y, feature_map_block_id * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(cudaMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(y_feature_map_config) * task_list.size(), cudaMemcpyHostToDevice));
}
}
}
}
|
21763383bc73dc9b1807d2b9b8fe3200bc28d30d.hip | // !!! This is a file automatically generated by hipify!!!
#include <mpi.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <cmeansMPI.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <float.h>
#include <cmeansMPI_kernel.cu>
#include "MDL.h"
void printCudaError() {
hipError_t error = hipGetLastError();
if(error != hipSuccess) {
printf("%s\n",hipGetErrorString(error));
}
}
typedef struct {
hipEvent_t start;
hipEvent_t stop;
float* et;
} cudaTimer_t;
void createTimer(cudaTimer_t* timer) {
#pragma omp critical (create_timer)
{
hipEventCreate(&(timer->start));
hipEventCreate(&(timer->stop));
timer->et = (float*) malloc(sizeof(float));
*(timer->et) = 0.0f;
}
}
void deleteTimer(cudaTimer_t timer) {
#pragma omp critical (delete_timer)
{
hipEventDestroy(timer.start);
hipEventDestroy(timer.stop);
free(timer.et);
}
}
void startTimer(cudaTimer_t timer) {
hipEventRecord(timer.start,0);
}
void stopTimer(cudaTimer_t timer) {
hipEventRecord(timer.stop,0);
hipEventSynchronize(timer.stop);
float tmp;
hipEventElapsedTime(&tmp,timer.start,timer.stop);
*(timer.et) += tmp;
}
float getTimerValue(cudaTimer_t timer) {
return *(timer.et);
}
/************************************************************************/
/* C-means Main */
/************************************************************************/
int main(int argc, char* argv[])
{
int rank, num_nodes, len, provided;
char name[MPI_MAX_PROCESSOR_NAME];
MPI_Init_thread(&argc,&argv,MPI_THREAD_MULTIPLE,&provided);
MPI_Comm_size(MPI_COMM_WORLD,&num_nodes);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Get_processor_name(name, &len);
printf("Hello world from node %d of %d on %s\n",rank,num_nodes,name);
unsigned int timer_io; // Timer for I/O, such as reading FCS file and outputting result files
unsigned int timer_total; // Total time
unsigned int timer_main_cpu; // Total time
cutCreateTimer(&timer_io);
cutCreateTimer(&timer_total);
cutCreateTimer(&timer_main_cpu);
// determine the number of CUDA capable GPUs
int num_gpus = 0; // number of CUDA GPUs
hipGetDeviceCount(&num_gpus);
if(num_gpus < 1)
{
printf("no CUDA capable devices were detected\n");
return 1;
}
// display CPU and GPU configuration
printf("number of host CPUs:\t%d\n", omp_get_num_procs());
printf("number of CUDA devices:\t%d\n", num_gpus);
for(int i = 0; i < num_gpus; i++)
{
hipDeviceProp_t dprop;
hipGetDeviceProperties(&dprop, i);
printf(" %d: %s\n", i, dprop.name);
}
printf("---------------------------\n");
int total_num_gpus = num_gpus * num_nodes;
cutStartTimer(timer_total);
// [program name] [data file]
if(argc != 2){
printf("Usage Error: must supply data file. e.g. programe_name @opt(flags) file.in\n");
return 1;
}
cutStartTimer(timer_io);
float* myEvents;
int elements_per_node, elements_being_sent;
elements_per_node = NUM_EVENTS / total_num_gpus * num_gpus * NUM_DIMENSIONS;
// Root reads input from file and distributes to each node
if(rank == 0) {
myEvents = ParseSampleInput(argv[1]);
MPI_Request* requests = (MPI_Request*) malloc(sizeof(MPI_Request)*num_nodes);
MPI_Status s;
// Send everything asynchronously
for(int i=1; i < num_nodes; i++) {
elements_being_sent = elements_per_node;
if(i == num_nodes-1) { // boundary condition
elements_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_DIMENSIONS;
}
MPI_Isend(&(myEvents[elements_per_node*i]),elements_being_sent,MPI_FLOAT,i,1,MPI_COMM_WORLD,&requests[i]);
//MPI_Send(&(myEvents[elements_per_node*i]),elements_being_sent,MPI_FLOAT,i,1,MPI_COMM_WORLD);
}
// Wait for the Isends to complete
for(int i=1; i < num_nodes; i++) {
MPI_Wait(&requests[i],&s);
}
free(requests);
elements_being_sent = elements_per_node; // so that its set properly for the root
} else {
myEvents = (float*) malloc(sizeof(float)*NUM_DIMENSIONS*NUM_EVENTS);
elements_being_sent = elements_per_node;
if(rank == num_nodes-1) { // boundary condition
elements_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_DIMENSIONS;
}
MPI_Status s;
MPI_Recv(&(myEvents[elements_per_node*rank]),elements_being_sent,MPI_FLOAT,0,1,MPI_COMM_WORLD,&s);
}
MPI_Barrier(MPI_COMM_WORLD);
cutStopTimer(timer_io);
cutStartTimer(timer_main_cpu);
//srand((unsigned)(time(0)));
srand(2012);
// Allocate arrays for the cluster centers
float* myClusters = (float*)malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
float* newClusters = (float*)malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
// Select random cluster centers
// double t1,t2;
generateInitialClusters(myClusters, myEvents);
// Create an array of arrays for temporary cluster centers from each GPU
float** tempClusters = (float**) malloc(sizeof(float*)*num_gpus);
float** tempDenominators = (float**) malloc(sizeof(float*)*num_gpus);
for(int i=0; i < num_gpus; i++) {
tempClusters[i] = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
tempDenominators[i] = (float*) malloc(sizeof(float)*NUM_CLUSTERS);
memcpy(tempClusters[i],myClusters,sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
}
// Create an array of arrays for temporary Q matrix pieces from each GPU
float** q_matrices = (float**) malloc(sizeof(float*)*num_gpus);
// Create an array for the final Q matrix
float* q_matrix = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_CLUSTERS);
float diff; // used to track difference in cluster centers between iterations
// Transpose the events matrix
float* transposedEvents = (float*)malloc(sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS);
for(int i=0; i<NUM_EVENTS; i++) {
for(int j=0; j<NUM_DIMENSIONS; j++) {
transposedEvents[j*NUM_EVENTS+i] = myEvents[i*NUM_DIMENSIONS+j];
}
}
float* memberships = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_EVENTS);
int* finalClusterConfig;
cutStopTimer(timer_main_cpu);
////////////////////////////////////////////////////////////////
// run as many CPU threads as there are CUDA devices
// num_gpus = 1;
// omp_set_num_threads(num_gpus); // create as many CPU threads as there are CUDA devices
#pragma omp parallel shared(myClusters,diff,tempClusters,tempDenominators,memberships,finalClusterConfig)
{
cudaTimer_t timer_memcpy; // Timer for GPU <---> CPU memory copying
cudaTimer_t timer_cpu; // Timer for processing on CPU
cudaTimer_t timer_gpu; // Timer for kernels on the GPU
cudaTimer_t timer_mpi; // Timer for MPI
unsigned int tid = omp_get_thread_num();
unsigned int num_cpu_threads = omp_get_num_threads();
int gpu_num = rank*num_gpus+tid;
printf("hello from thread %d of %d\n",tid,num_cpu_threads);
// set and check the CUDA device for this CPU thread
int gpu_id = -1;
hipSetDevice(tid % num_gpus); // "% num_gpus" allows more CPU threads than GPU devices
hipGetDevice(&gpu_id);
#pragma omp barrier
createTimer(&timer_memcpy);
createTimer(&timer_cpu);
createTimer(&timer_gpu);
createTimer(&timer_mpi);
printf("CPU thread %d (of %d) uses CUDA device %d\n", tid, num_cpu_threads, gpu_id);
// Compute starting/finishing indexes for the events for each gpu
int events_per_gpu = NUM_EVENTS / total_num_gpus;
int my_num_events = events_per_gpu;
if(gpu_num == (total_num_gpus-1)) {
my_num_events += NUM_EVENTS % total_num_gpus;
}
startTimer(timer_memcpy);
float* d_distanceMatrix;
CUDA_SAFE_CALL(hipMalloc((void**)&d_distanceMatrix, sizeof(float)*my_num_events*NUM_CLUSTERS));
#if !LINEAR
float* d_memberships;
CUDA_SAFE_CALL(hipMalloc((void**)&d_memberships, sizeof(float)*my_num_events*NUM_CLUSTERS));
#endif
float* d_E;
CUDA_SAFE_CALL(hipMalloc((void**)&d_E, sizeof(float)*my_num_events*NUM_DIMENSIONS));
float* d_C;
CUDA_SAFE_CALL(hipMalloc((void**)&d_C, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS));
float* d_nC;
CUDA_SAFE_CALL(hipMalloc((void**)&d_nC, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS));
float* d_denoms;
CUDA_SAFE_CALL(hipMalloc((void**)&d_denoms, sizeof(float)*NUM_CLUSTERS));
int size = sizeof(float)*NUM_DIMENSIONS*my_num_events;
// Copying the transposed data is trickier since it's not all contigious for the relavant events
float* temp_fcs_data = (float*) malloc(size);
for(int d=0; d < NUM_DIMENSIONS; d++) {
memcpy(&temp_fcs_data[d*my_num_events],&transposedEvents[d*NUM_EVENTS + gpu_num*events_per_gpu],sizeof(float)*my_num_events);
}
CUDA_SAFE_CALL(hipMemcpy( d_E, temp_fcs_data, size,hipMemcpyHostToDevice) );
hipDeviceSynchronize();
free(temp_fcs_data);
size = sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS;
CUDA_SAFE_CALL(hipMemcpy(d_C, myClusters, size, hipMemcpyHostToDevice));
stopTimer(timer_memcpy);
printf("Starting C-means\n");
int iterations = 0;
int num_blocks_distance = my_num_events / NUM_THREADS_DISTANCE;
if(my_num_events % NUM_THREADS_DISTANCE) {
num_blocks_distance++;
}
int num_blocks_membership = my_num_events / NUM_THREADS_MEMBERSHIP;
if(my_num_events % NUM_THREADS_DISTANCE) {
num_blocks_membership++;
}
int num_blocks_update = NUM_CLUSTERS / NUM_CLUSTERS_PER_BLOCK;
if(NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK) {
num_blocks_update++;
}
do{
cudaTimer_t timer;
createTimer(&timer);
startTimer(timer);
size = sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS;
// Copy the cluster centers to the GPU
startTimer(timer_memcpy);
CUDA_SAFE_CALL(hipMemcpy(d_C, myClusters, size, hipMemcpyHostToDevice));
stopTimer(timer_memcpy);
startTimer(timer_gpu);
DEBUG("Launching ComputeDistanceMatrix kernel\n");
hipLaunchKernelGGL(( ComputeDistanceMatrix), dim3(dim3(num_blocks_distance,NUM_CLUSTERS)), dim3(NUM_THREADS_DISTANCE) , 0, 0, d_C, d_E, d_distanceMatrix, my_num_events);
#if LINEAR
// O(M) membership kernel
DEBUG("Launching ComputeMembershipMatrixLinear kernel\n");
hipLaunchKernelGGL(( ComputeMembershipMatrixLinear), dim3(num_blocks_membership), dim3(NUM_THREADS_MEMBERSHIP) , 0, 0, d_distanceMatrix, my_num_events);
DEBUG("Launching UpdateClusterCentersGPU kernel\n");
//UpdateClusterCentersGPU<<< dim3(NUM_CLUSTERS,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix, d_denoms, my_num_events);
//UpdateClusterCentersGPU2<<< dim3(num_blocks_update,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix, my_num_events);
hipLaunchKernelGGL(( UpdateClusterCentersGPU3), dim3(dim3(NUM_DIMENSIONS,num_blocks_update)), dim3(NUM_THREADS_UPDATE) , 0, 0, d_C, d_E, d_nC, d_distanceMatrix, my_num_events);
hipLaunchKernelGGL(( ComputeClusterSizes), dim3(NUM_CLUSTERS), dim3(512) , 0, 0, d_distanceMatrix, d_denoms, my_num_events);
#else
// O(M^2) membership kernel
DEBUG("Launching ComputeMembershipMatrix kernel\n");
hipLaunchKernelGGL(( ComputeMembershipMatrix), dim3(dim3(num_blocks_membership,NUM_CLUSTERS)), dim3(NUM_THREADS_MEMBERSHIP) , 0, 0, d_distanceMatrix, d_memberships, my_num_events);
DEBUG("Launching UpdateClusterCentersGPU kernel\n");
//UpdateClusterCentersGPU<<< dim3(NUM_CLUSTERS,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships, d_denoms, my_num_events);
//UpdateClusterCentersGPU2<<< dim3(num_blocks_update,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships, my_num_events);
hipLaunchKernelGGL(( UpdateClusterCentersGPU3), dim3(dim3(NUM_DIMENSIONS,num_blocks_update)), dim3(NUM_THREADS_UPDATE) , 0, 0, d_C, d_E, d_nC, d_memberships, my_num_events);
hipLaunchKernelGGL(( ComputeClusterSizes), dim3(NUM_CLUSTERS), dim3(512) , 0, 0, d_memberships, d_denoms, my_num_events );
#endif
hipDeviceSynchronize();
printCudaError();
stopTimer(timer_gpu);
// Copy partial centers and denominators to host
startTimer(timer_memcpy);
hipMemcpy(tempClusters[tid], d_nC, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS, hipMemcpyDeviceToHost);
hipMemcpy(tempDenominators[tid], d_denoms, sizeof(float)*NUM_CLUSTERS, hipMemcpyDeviceToHost);
printCudaError();
stopTimer(timer_memcpy);
stopTimer(timer);
float thisTime = getTimerValue(timer);
DEBUG("Processing time for GPU %d: %f (ms) \n", tid, thisTime);
deleteTimer(timer);
#pragma omp barrier
#pragma omp master
{
startTimer(timer_cpu);
// Sum up the partial cluster centers (numerators)
for(int i=1; i < num_gpus; i++) {
for(int c=0; c < NUM_CLUSTERS; c++) {
for(int d=0; d < NUM_DIMENSIONS; d++) {
tempClusters[0][c*NUM_DIMENSIONS+d] += tempClusters[i][c*NUM_DIMENSIONS+d];
}
}
}
// Sum up the denominator for each cluster
for(int i=1; i < num_gpus; i++) {
for(int c=0; c < NUM_CLUSTERS; c++) {
tempDenominators[0][c] += tempDenominators[i][c];
}
}
stopTimer(timer_cpu);
DEBUG("Reducing cluster values\n");
startTimer(timer_mpi);
if(rank == 0) {
MPI_Reduce(MPI_IN_PLACE,tempClusters[0],NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//MPI_Reduce(tempClusters[0],tempClusters[1],NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//memcpy(tempClusters[0],tempClusters[1],sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS);
MPI_Reduce(MPI_IN_PLACE,tempDenominators[0],NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//MPI_Reduce(tempDenominators[0],tempDenominators[1],NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//memcpy(tempDenominators[0],tempDenominators[1],sizeof(float)*NUM_CLUSTERS);
} else {
MPI_Reduce(tempClusters[0],0,NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce(tempDenominators[0],0,NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD); // not sure if neccesary...
stopTimer(timer_mpi);
startTimer(timer_cpu);
// Divide to get the final clusters
if(rank == 0) {
for(int c=0; c < NUM_CLUSTERS; c++) {
for(int d=0; d < NUM_DIMENSIONS; d++) {
tempClusters[0][c*NUM_DIMENSIONS+d] /= tempDenominators[0][c];
}
}
}
stopTimer(timer_cpu);
startTimer(timer_mpi);
DEBUG("Broadcasting Cluster Values\n");
MPI_Bcast(tempClusters[0],NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,0,MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
stopTimer(timer_mpi);
startTimer(timer_cpu);
diff = 0.0;
for(int i=0; i < NUM_CLUSTERS; i++){
DEBUG("GPU %d, Cluster %d: ",tid,i);
for(int k = 0; k < NUM_DIMENSIONS; k++){
DEBUG("%f ",tempClusters[tid][i*NUM_DIMENSIONS + k]);
diff += fabs(myClusters[i*NUM_DIMENSIONS + k] - tempClusters[tid][i*NUM_DIMENSIONS + k]);
}
DEBUG("\n");
}
memcpy(myClusters,tempClusters[tid],sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS);
DEBUG("Diff = %f\n", diff);
DEBUG("Done with iteration #%d\n", iterations);
stopTimer(timer_cpu);
}
#pragma omp barrier
iterations++;
DEBUG("\n");
} while(iterations < MIN_ITERS || (abs(diff) > THRESHOLD && iterations < MAX_ITERS));
#pragma omp master
{
if(rank == 0) {
printf("Iterations: %d\n",iterations);
}
}
#if ENABLE_OUTPUT
// Compute final membership vaues
startTimer(timer_gpu);
#if LINEAR
// O(M)
hipLaunchKernelGGL(( ComputeDistanceMatrix), dim3(dim3(num_blocks_distance,NUM_CLUSTERS)), dim3(NUM_THREADS_DISTANCE) , 0, 0, d_C, d_E, d_distanceMatrix, my_num_events);
hipLaunchKernelGGL(( ComputeNormalizedMembershipMatrixLinear), dim3(num_blocks_membership), dim3(NUM_THREADS_MEMBERSHIP) , 0, 0, d_distanceMatrix,my_num_events);
#else
// O(M^2)
hipLaunchKernelGGL(( ComputeNormalizedMembershipMatrix), dim3(dim3(num_blocks_membership,NUM_CLUSTERS)), dim3(NUM_THREADS_MEMBERSHIP) , 0, 0, d_distanceMatrix, d_memberships, my_num_events);
#endif
stopTimer(timer_gpu);
// Copy memberships from the GPU
float* temp_memberships = (float*) malloc(sizeof(float)*my_num_events*NUM_CLUSTERS);
startTimer(timer_memcpy);
#if LINEAR
hipMemcpy(temp_memberships,d_distanceMatrix,sizeof(float)*my_num_events*NUM_CLUSTERS,hipMemcpyDeviceToHost);
#else
hipMemcpy(temp_memberships,d_memberships,sizeof(float)*my_num_events*NUM_CLUSTERS,hipMemcpyDeviceToHost);
#endif
stopTimer(timer_memcpy);
startTimer(timer_cpu);
for(int c=0; c < NUM_CLUSTERS; c++) {
memcpy(&(memberships[c*NUM_EVENTS+gpu_num*events_per_gpu]),&(temp_memberships[c*my_num_events]),sizeof(float)*my_num_events);
}
stopTimer(timer_cpu);
#pragma omp barrier
#pragma omp master
{
startTimer(timer_cpu);
// First transpose the memberships, makes it easier to gather the results between nodes
float* temp = (float*) malloc(sizeof(float)*NUM_EVENTS*NUM_CLUSTERS);
for(int e=0; e < NUM_EVENTS; e++) {
for(int c=0; c < NUM_CLUSTERS; c++) {
temp[e*NUM_CLUSTERS+c] = memberships[c*NUM_EVENTS+e];
}
}
memcpy(memberships,temp,sizeof(float)*NUM_EVENTS*NUM_CLUSTERS);
stopTimer(timer_cpu);
// Gather memberships on root
startTimer(timer_mpi);
int memberships_being_sent, memberships_per_node;
memberships_per_node = events_per_gpu*num_gpus*NUM_CLUSTERS;
if(rank == 0) {
for(int i=1; i < num_nodes; i++) {
memberships_being_sent = memberships_per_node;
if(i == num_nodes-1) { // boundary condition
memberships_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_CLUSTERS;
}
MPI_Status s;
MPI_Recv(&(temp[memberships_per_node*i]),memberships_being_sent,MPI_FLOAT,i,1,MPI_COMM_WORLD,&s);
}
} else {
memberships_being_sent = memberships_per_node;
if(rank == num_nodes-1) { // boundary condition
memberships_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_CLUSTERS;
}
MPI_Send(&(memberships[memberships_per_node*rank]),memberships_being_sent,MPI_FLOAT,0,1,MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
stopTimer(timer_mpi);
// Tranpose the memberships again to get original ordering
startTimer(timer_cpu);
if(rank == 0) {
for(int e=0; e < NUM_EVENTS; e++) {
for(int c=0; c<NUM_CLUSTERS; c++) {
memberships[c*NUM_EVENTS+e] = temp[e*NUM_CLUSTERS+c];
}
}
}
free(temp);
stopTimer(timer_cpu);
}
#pragma omp barrier
free(temp_memberships);
#endif // #if ENABLE_OUTPUT
if(tid == 0) {
if(abs(diff) > THRESHOLD){
PRINT("Warning: c-means did not converge to the %f threshold provided\n", THRESHOLD);
}
PRINT("C-means complete\n");
}
#pragma omp barrier // sync threads
#if !ENABLE_MDL
if(tid == 0) {
// Don't attempt MDL, save all clusters
finalClusterConfig = (int*) malloc(sizeof(int)*NUM_CLUSTERS);
memset(finalClusterConfig,1,sizeof(int)*NUM_CLUSTERS);
}
#else
PRINT("Calculating Q Matrix Section %d\n",tid);
// Copy the latest clusters to the device
// (the current ones on the device are 1 iteration old)
startTimer(timer_memcpy);
CUDA_SAFE_CALL(hipMemcpy(d_C, myClusters, size, hipMemcpyHostToDevice));
stopTimer(timer_memcpy);
// Build Q matrix, each gpu handles NUM_DIMENSIONS/num_gpus rows of the matrix
q_matrices[tid] = BuildQGPU(d_E, d_C, d_distanceMatrix, &mdlTime, tid, num_gpus, my_num_events);
#pragma omp barrier // sync threads
if(tid == 0) {
// Combine the partial matrices
int num_matrix_elements = NUM_CLUSTERS*(NUM_CLUSTERS/num_gpus);
for(int i=0; i < num_gpus; i++) {
float* q_matrix_ptr = (float*) q_matrix+i*num_matrix_elements;
float* q_matrices_ptr = (float*) q_matrices[i]+i*num_matrix_elements;
memcpy(q_matrix_ptr,q_matrices_ptr,sizeof(float)*num_matrix_elements);
free(q_matrices[i]);
}
startTimer(timer_cpu);
DEBUG("Searching for optimal configuration...\n");
finalClusterConfig = TabuSearch(q_matrix, argv[1]);
stopTimer(timer_cpu);
DEBUG("Q Matrix:\n");
for(int row=0; row < NUM_CLUSTERS; row++) {
for(int col=0; col < NUM_CLUSTERS; col++) {
DEBUG("%.2e ",q_matrix[row*NUM_CLUSTERS+col]);
}
DEBUG("\n");
}
free(q_matrix);
}
mdlTime /= 1000.0; // CUDA timer returns time in milliseconds, normalize to seconds
#endif
fflush(stdout);
#pragma omp barrier
#pragma omp master
{
printf("\n\n");
printf("Node %d: Thread %d: GPU memcpy Time (ms): %f\n",rank,tid,getTimerValue(timer_memcpy));
printf("Node %d: Thread %d: CPU processing Time (ms): %f\n",rank,tid,getTimerValue(timer_cpu));
printf("Node %d: Thread %d: GPU processing Time (ms): %f\n",rank,tid,getTimerValue(timer_gpu));
printf("Node %d: Thread %d: MPI Time (ms): %f\n",rank,tid,getTimerValue(timer_mpi));
}
#if !CPU_ONLY
CUDA_SAFE_CALL(hipFree(d_E));
CUDA_SAFE_CALL(hipFree(d_C));
CUDA_SAFE_CALL(hipFree(d_nC));
#endif
#pragma omp barrier
DEBUG("Thread %d done.\n",tid);
} // end of omp_parallel block
cutStartTimer(timer_io);
if(rank == 0) {
PRINT("Final Clusters are:\n");
int newCount = 0;
for(int i = 0; i < NUM_CLUSTERS; i++){
if(finalClusterConfig[i]){
for(int j = 0; j < NUM_DIMENSIONS; j++){
newClusters[newCount * NUM_DIMENSIONS + j] = myClusters[i*NUM_DIMENSIONS + j];
PRINT("%.3f\t", myClusters[i*NUM_DIMENSIONS + j]);
}
newCount++;
PRINT("\n");
}
}
#if ENABLE_OUTPUT
ReportSummary(newClusters, newCount, argv[1]);
ReportResults(myEvents, memberships, newCount, argv[1]);
#endif
}
cutStopTimer(timer_io);
cutStopTimer(timer_total);
if(rank == 0) {
printf("Total Time (ms): %f\n",cutGetTimerValue(timer_total));
printf("I/O Time (ms): %f\n",cutGetTimerValue(timer_io));
printf("Main Thread CPU Time (ms): %f\n",cutGetTimerValue(timer_main_cpu));
printf("\n\n");
}
free(newClusters);
free(myClusters);
free(myEvents);
free(transposedEvents);
MPI_Finalize();
return 0;
}
void generateInitialClusters(float* clusters, float* events){
int seed;
for(int i = 0; i < NUM_CLUSTERS; i++){
//seed = i * NUM_EVENTS / NUM_CLUSTERS;
seed = rand() % NUM_EVENTS;
for(int j = 0; j < NUM_DIMENSIONS; j++){
clusters[i*NUM_DIMENSIONS + j] = events[seed*NUM_DIMENSIONS + j];
}
}
}
float* readBIN(char* f) {
FILE* fin = fopen(f,"rb");
int nevents,ndims;
fread(&nevents,4,1,fin);
fread(&ndims,4,1,fin);
int num_elements = (ndims)*(nevents);
printf("Number of rows: %d\n",nevents);
printf("Number of cols: %d\n",ndims);
float* data = (float*) malloc(sizeof(float)*num_elements);
fread(data,sizeof(float),num_elements,fin);
fclose(fin);
return data;
}
float* readCSV(char* filename) {
FILE* myfile = fopen(filename, "r");
if(myfile == NULL){
printf("Error: File DNE\n");
return NULL;
}
char myline[1024];
float* retVal = (float*)malloc(sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS);
myfile = fopen(filename, "r");
#if LINE_LABELS
fgets(myline, 1024, myfile);
for(int i = 0; i < NUM_EVENTS; i++){
fgets(myline, 1024, myfile);
retVal[i*NUM_DIMENSIONS] = (float)atof(strtok(myline, DELIMITER));
for(int j = 1; j < NUM_DIMENSIONS; j++){
retVal[i*NUM_DIMENSIONS + j] = (float)atof(strtok(NULL, DELIMITER));
}
}
#else
for(int i = 0; i < NUM_EVENTS; i++){
fgets(myline, 1024, myfile);
retVal[i*NUM_DIMENSIONS] = (float)atof(strtok(myline, DELIMITER));
for(int j = 1; j < NUM_DIMENSIONS; j++){
retVal[i*NUM_DIMENSIONS + j] = (float)atof(strtok(NULL, DELIMITER));
}
}
#endif
fclose(myfile);
return retVal;
}
float* ParseSampleInput(char* f){
int length = strlen(f);
printf("File Extension: %s\n",f+length-3);
if(strcmp(f+length-3,"bin") == 0) {
return readBIN(f);
} else {
return readCSV(f);
}
}
void FreeMatrix(float* d_matrix){
CUDA_SAFE_CALL(hipFree(d_matrix));
}
float* BuildQGPU(float* d_events, float* d_clusters, float* distanceMatrix, float* mdlTime, int gpu_id, int num_gpus, int my_num_events){
float* d_matrix;
int size = sizeof(float) * NUM_CLUSTERS*NUM_CLUSTERS;
cudaTimer_t timer_gpu;
cudaTimer_t timer_memcpy;
createTimer(&timer_gpu);
createTimer(&timer_memcpy);
startTimer(timer_memcpy);
hipMalloc((void**)&d_matrix, size);
printCudaError();
stopTimer(timer_memcpy);
startTimer(timer_gpu);
dim3 grid(NUM_CLUSTERS / num_gpus, NUM_CLUSTERS);
int start_row = gpu_id*(NUM_CLUSTERS/num_gpus);
printf("GPU %d: Starting row for Q Matrix: %d\n",gpu_id,start_row);
printf("Launching Q Matrix Kernel\n");
hipLaunchKernelGGL(( CalculateQMatrixGPUUpgrade), dim3(grid), dim3(Q_THREADS), 0, 0, d_events, d_clusters, d_matrix, distanceMatrix, start_row, my_num_events);
hipDeviceSynchronize();
printCudaError();
stopTimer(timer_gpu);
startTimer(timer_memcpy);
float* matrix = (float*)malloc(size);
printf("Copying results to CPU\n");
hipError_t error = hipMemcpy(matrix, d_matrix, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf(hipGetErrorString(hipGetLastError()));
printf("\n");
stopTimer(timer_memcpy);
stopTimer(timer_gpu);
*mdlTime = getTimerValue(timer_gpu);
printf("Processing time for MDL GPU: %f (ms) \n", *mdlTime);
printf("Memcpy time for MDL GPU: %f (ms) \n", getTimerValue(timer_memcpy));
deleteTimer(timer_gpu);
deleteTimer(timer_memcpy);
printCudaError();
FreeMatrix(d_matrix);
return matrix;
}
| 21763383bc73dc9b1807d2b9b8fe3200bc28d30d.cu | #include <mpi.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cutil.h>
#include <cmeansMPI.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <float.h>
#include <cmeansMPI_kernel.cu>
#include "MDL.h"
void printCudaError() {
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess) {
printf("%s\n",cudaGetErrorString(error));
}
}
typedef struct {
cudaEvent_t start;
cudaEvent_t stop;
float* et;
} cudaTimer_t;
void createTimer(cudaTimer_t* timer) {
#pragma omp critical (create_timer)
{
cudaEventCreate(&(timer->start));
cudaEventCreate(&(timer->stop));
timer->et = (float*) malloc(sizeof(float));
*(timer->et) = 0.0f;
}
}
void deleteTimer(cudaTimer_t timer) {
#pragma omp critical (delete_timer)
{
cudaEventDestroy(timer.start);
cudaEventDestroy(timer.stop);
free(timer.et);
}
}
void startTimer(cudaTimer_t timer) {
cudaEventRecord(timer.start,0);
}
void stopTimer(cudaTimer_t timer) {
cudaEventRecord(timer.stop,0);
cudaEventSynchronize(timer.stop);
float tmp;
cudaEventElapsedTime(&tmp,timer.start,timer.stop);
*(timer.et) += tmp;
}
float getTimerValue(cudaTimer_t timer) {
return *(timer.et);
}
/************************************************************************/
/* C-means Main */
/************************************************************************/
int main(int argc, char* argv[])
{
int rank, num_nodes, len, provided;
char name[MPI_MAX_PROCESSOR_NAME];
MPI_Init_thread(&argc,&argv,MPI_THREAD_MULTIPLE,&provided);
MPI_Comm_size(MPI_COMM_WORLD,&num_nodes);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Get_processor_name(name, &len);
printf("Hello world from node %d of %d on %s\n",rank,num_nodes,name);
unsigned int timer_io; // Timer for I/O, such as reading FCS file and outputting result files
unsigned int timer_total; // Total time
unsigned int timer_main_cpu; // Total time
cutCreateTimer(&timer_io);
cutCreateTimer(&timer_total);
cutCreateTimer(&timer_main_cpu);
// determine the number of CUDA capable GPUs
int num_gpus = 0; // number of CUDA GPUs
cudaGetDeviceCount(&num_gpus);
if(num_gpus < 1)
{
printf("no CUDA capable devices were detected\n");
return 1;
}
// display CPU and GPU configuration
printf("number of host CPUs:\t%d\n", omp_get_num_procs());
printf("number of CUDA devices:\t%d\n", num_gpus);
for(int i = 0; i < num_gpus; i++)
{
cudaDeviceProp dprop;
cudaGetDeviceProperties(&dprop, i);
printf(" %d: %s\n", i, dprop.name);
}
printf("---------------------------\n");
int total_num_gpus = num_gpus * num_nodes;
cutStartTimer(timer_total);
// [program name] [data file]
if(argc != 2){
printf("Usage Error: must supply data file. e.g. programe_name @opt(flags) file.in\n");
return 1;
}
cutStartTimer(timer_io);
float* myEvents;
int elements_per_node, elements_being_sent;
elements_per_node = NUM_EVENTS / total_num_gpus * num_gpus * NUM_DIMENSIONS;
// Root reads input from file and distributes to each node
if(rank == 0) {
myEvents = ParseSampleInput(argv[1]);
MPI_Request* requests = (MPI_Request*) malloc(sizeof(MPI_Request)*num_nodes);
MPI_Status s;
// Send everything asynchronously
for(int i=1; i < num_nodes; i++) {
elements_being_sent = elements_per_node;
if(i == num_nodes-1) { // boundary condition
elements_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_DIMENSIONS;
}
MPI_Isend(&(myEvents[elements_per_node*i]),elements_being_sent,MPI_FLOAT,i,1,MPI_COMM_WORLD,&requests[i]);
//MPI_Send(&(myEvents[elements_per_node*i]),elements_being_sent,MPI_FLOAT,i,1,MPI_COMM_WORLD);
}
// Wait for the Isends to complete
for(int i=1; i < num_nodes; i++) {
MPI_Wait(&requests[i],&s);
}
free(requests);
elements_being_sent = elements_per_node; // so that its set properly for the root
} else {
myEvents = (float*) malloc(sizeof(float)*NUM_DIMENSIONS*NUM_EVENTS);
elements_being_sent = elements_per_node;
if(rank == num_nodes-1) { // boundary condition
elements_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_DIMENSIONS;
}
MPI_Status s;
MPI_Recv(&(myEvents[elements_per_node*rank]),elements_being_sent,MPI_FLOAT,0,1,MPI_COMM_WORLD,&s);
}
MPI_Barrier(MPI_COMM_WORLD);
cutStopTimer(timer_io);
cutStartTimer(timer_main_cpu);
//srand((unsigned)(time(0)));
srand(2012);
// Allocate arrays for the cluster centers
float* myClusters = (float*)malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
float* newClusters = (float*)malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
// Select random cluster centers
// double t1,t2;
generateInitialClusters(myClusters, myEvents);
// Create an array of arrays for temporary cluster centers from each GPU
float** tempClusters = (float**) malloc(sizeof(float*)*num_gpus);
float** tempDenominators = (float**) malloc(sizeof(float*)*num_gpus);
for(int i=0; i < num_gpus; i++) {
tempClusters[i] = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
tempDenominators[i] = (float*) malloc(sizeof(float)*NUM_CLUSTERS);
memcpy(tempClusters[i],myClusters,sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
}
// Create an array of arrays for temporary Q matrix pieces from each GPU
float** q_matrices = (float**) malloc(sizeof(float*)*num_gpus);
// Create an array for the final Q matrix
float* q_matrix = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_CLUSTERS);
float diff; // used to track difference in cluster centers between iterations
// Transpose the events matrix
float* transposedEvents = (float*)malloc(sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS);
for(int i=0; i<NUM_EVENTS; i++) {
for(int j=0; j<NUM_DIMENSIONS; j++) {
transposedEvents[j*NUM_EVENTS+i] = myEvents[i*NUM_DIMENSIONS+j];
}
}
float* memberships = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_EVENTS);
int* finalClusterConfig;
cutStopTimer(timer_main_cpu);
////////////////////////////////////////////////////////////////
// run as many CPU threads as there are CUDA devices
// num_gpus = 1;
// omp_set_num_threads(num_gpus); // create as many CPU threads as there are CUDA devices
#pragma omp parallel shared(myClusters,diff,tempClusters,tempDenominators,memberships,finalClusterConfig)
{
cudaTimer_t timer_memcpy; // Timer for GPU <---> CPU memory copying
cudaTimer_t timer_cpu; // Timer for processing on CPU
cudaTimer_t timer_gpu; // Timer for kernels on the GPU
cudaTimer_t timer_mpi; // Timer for MPI
unsigned int tid = omp_get_thread_num();
unsigned int num_cpu_threads = omp_get_num_threads();
int gpu_num = rank*num_gpus+tid;
printf("hello from thread %d of %d\n",tid,num_cpu_threads);
// set and check the CUDA device for this CPU thread
int gpu_id = -1;
cudaSetDevice(tid % num_gpus); // "% num_gpus" allows more CPU threads than GPU devices
cudaGetDevice(&gpu_id);
#pragma omp barrier
createTimer(&timer_memcpy);
createTimer(&timer_cpu);
createTimer(&timer_gpu);
createTimer(&timer_mpi);
printf("CPU thread %d (of %d) uses CUDA device %d\n", tid, num_cpu_threads, gpu_id);
// Compute starting/finishing indexes for the events for each gpu
int events_per_gpu = NUM_EVENTS / total_num_gpus;
int my_num_events = events_per_gpu;
if(gpu_num == (total_num_gpus-1)) {
my_num_events += NUM_EVENTS % total_num_gpus;
}
startTimer(timer_memcpy);
float* d_distanceMatrix;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_distanceMatrix, sizeof(float)*my_num_events*NUM_CLUSTERS));
#if !LINEAR
float* d_memberships;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_memberships, sizeof(float)*my_num_events*NUM_CLUSTERS));
#endif
float* d_E;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_E, sizeof(float)*my_num_events*NUM_DIMENSIONS));
float* d_C;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_C, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS));
float* d_nC;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_nC, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS));
float* d_denoms;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_denoms, sizeof(float)*NUM_CLUSTERS));
int size = sizeof(float)*NUM_DIMENSIONS*my_num_events;
// Copying the transposed data is trickier since it's not all contigious for the relavant events
float* temp_fcs_data = (float*) malloc(size);
for(int d=0; d < NUM_DIMENSIONS; d++) {
memcpy(&temp_fcs_data[d*my_num_events],&transposedEvents[d*NUM_EVENTS + gpu_num*events_per_gpu],sizeof(float)*my_num_events);
}
CUDA_SAFE_CALL(cudaMemcpy( d_E, temp_fcs_data, size,cudaMemcpyHostToDevice) );
cudaThreadSynchronize();
free(temp_fcs_data);
size = sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS;
CUDA_SAFE_CALL(cudaMemcpy(d_C, myClusters, size, cudaMemcpyHostToDevice));
stopTimer(timer_memcpy);
printf("Starting C-means\n");
int iterations = 0;
int num_blocks_distance = my_num_events / NUM_THREADS_DISTANCE;
if(my_num_events % NUM_THREADS_DISTANCE) {
num_blocks_distance++;
}
int num_blocks_membership = my_num_events / NUM_THREADS_MEMBERSHIP;
if(my_num_events % NUM_THREADS_DISTANCE) {
num_blocks_membership++;
}
int num_blocks_update = NUM_CLUSTERS / NUM_CLUSTERS_PER_BLOCK;
if(NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK) {
num_blocks_update++;
}
do{
cudaTimer_t timer;
createTimer(&timer);
startTimer(timer);
size = sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS;
// Copy the cluster centers to the GPU
startTimer(timer_memcpy);
CUDA_SAFE_CALL(cudaMemcpy(d_C, myClusters, size, cudaMemcpyHostToDevice));
stopTimer(timer_memcpy);
startTimer(timer_gpu);
DEBUG("Launching ComputeDistanceMatrix kernel\n");
ComputeDistanceMatrix<<< dim3(num_blocks_distance,NUM_CLUSTERS), NUM_THREADS_DISTANCE >>>(d_C, d_E, d_distanceMatrix, my_num_events);
#if LINEAR
// O(M) membership kernel
DEBUG("Launching ComputeMembershipMatrixLinear kernel\n");
ComputeMembershipMatrixLinear<<< num_blocks_membership, NUM_THREADS_MEMBERSHIP >>>(d_distanceMatrix, my_num_events);
DEBUG("Launching UpdateClusterCentersGPU kernel\n");
//UpdateClusterCentersGPU<<< dim3(NUM_CLUSTERS,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix, d_denoms, my_num_events);
//UpdateClusterCentersGPU2<<< dim3(num_blocks_update,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix, my_num_events);
UpdateClusterCentersGPU3<<< dim3(NUM_DIMENSIONS,num_blocks_update), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix, my_num_events);
ComputeClusterSizes<<< NUM_CLUSTERS, 512 >>>( d_distanceMatrix, d_denoms, my_num_events);
#else
// O(M^2) membership kernel
DEBUG("Launching ComputeMembershipMatrix kernel\n");
ComputeMembershipMatrix<<< dim3(num_blocks_membership,NUM_CLUSTERS), NUM_THREADS_MEMBERSHIP >>>(d_distanceMatrix, d_memberships, my_num_events);
DEBUG("Launching UpdateClusterCentersGPU kernel\n");
//UpdateClusterCentersGPU<<< dim3(NUM_CLUSTERS,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships, d_denoms, my_num_events);
//UpdateClusterCentersGPU2<<< dim3(num_blocks_update,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships, my_num_events);
UpdateClusterCentersGPU3<<< dim3(NUM_DIMENSIONS,num_blocks_update), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships, my_num_events);
ComputeClusterSizes<<< NUM_CLUSTERS, 512 >>>( d_memberships, d_denoms, my_num_events );
#endif
cudaThreadSynchronize();
printCudaError();
stopTimer(timer_gpu);
// Copy partial centers and denominators to host
startTimer(timer_memcpy);
cudaMemcpy(tempClusters[tid], d_nC, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS, cudaMemcpyDeviceToHost);
cudaMemcpy(tempDenominators[tid], d_denoms, sizeof(float)*NUM_CLUSTERS, cudaMemcpyDeviceToHost);
printCudaError();
stopTimer(timer_memcpy);
stopTimer(timer);
float thisTime = getTimerValue(timer);
DEBUG("Processing time for GPU %d: %f (ms) \n", tid, thisTime);
deleteTimer(timer);
#pragma omp barrier
#pragma omp master
{
startTimer(timer_cpu);
// Sum up the partial cluster centers (numerators)
for(int i=1; i < num_gpus; i++) {
for(int c=0; c < NUM_CLUSTERS; c++) {
for(int d=0; d < NUM_DIMENSIONS; d++) {
tempClusters[0][c*NUM_DIMENSIONS+d] += tempClusters[i][c*NUM_DIMENSIONS+d];
}
}
}
// Sum up the denominator for each cluster
for(int i=1; i < num_gpus; i++) {
for(int c=0; c < NUM_CLUSTERS; c++) {
tempDenominators[0][c] += tempDenominators[i][c];
}
}
stopTimer(timer_cpu);
DEBUG("Reducing cluster values\n");
startTimer(timer_mpi);
if(rank == 0) {
MPI_Reduce(MPI_IN_PLACE,tempClusters[0],NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//MPI_Reduce(tempClusters[0],tempClusters[1],NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//memcpy(tempClusters[0],tempClusters[1],sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS);
MPI_Reduce(MPI_IN_PLACE,tempDenominators[0],NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//MPI_Reduce(tempDenominators[0],tempDenominators[1],NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//memcpy(tempDenominators[0],tempDenominators[1],sizeof(float)*NUM_CLUSTERS);
} else {
MPI_Reduce(tempClusters[0],0,NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce(tempDenominators[0],0,NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD); // not sure if neccesary...
stopTimer(timer_mpi);
startTimer(timer_cpu);
// Divide to get the final clusters
if(rank == 0) {
for(int c=0; c < NUM_CLUSTERS; c++) {
for(int d=0; d < NUM_DIMENSIONS; d++) {
tempClusters[0][c*NUM_DIMENSIONS+d] /= tempDenominators[0][c];
}
}
}
stopTimer(timer_cpu);
startTimer(timer_mpi);
DEBUG("Broadcasting Cluster Values\n");
MPI_Bcast(tempClusters[0],NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,0,MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
stopTimer(timer_mpi);
startTimer(timer_cpu);
diff = 0.0;
for(int i=0; i < NUM_CLUSTERS; i++){
DEBUG("GPU %d, Cluster %d: ",tid,i);
for(int k = 0; k < NUM_DIMENSIONS; k++){
DEBUG("%f ",tempClusters[tid][i*NUM_DIMENSIONS + k]);
diff += fabs(myClusters[i*NUM_DIMENSIONS + k] - tempClusters[tid][i*NUM_DIMENSIONS + k]);
}
DEBUG("\n");
}
memcpy(myClusters,tempClusters[tid],sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS);
DEBUG("Diff = %f\n", diff);
DEBUG("Done with iteration #%d\n", iterations);
stopTimer(timer_cpu);
}
#pragma omp barrier
iterations++;
DEBUG("\n");
} while(iterations < MIN_ITERS || (abs(diff) > THRESHOLD && iterations < MAX_ITERS));
#pragma omp master
{
if(rank == 0) {
printf("Iterations: %d\n",iterations);
}
}
#if ENABLE_OUTPUT
// Compute final membership vaues
startTimer(timer_gpu);
#if LINEAR
// O(M)
ComputeDistanceMatrix<<< dim3(num_blocks_distance,NUM_CLUSTERS), NUM_THREADS_DISTANCE >>>(d_C, d_E, d_distanceMatrix, my_num_events);
ComputeNormalizedMembershipMatrixLinear<<< num_blocks_membership, NUM_THREADS_MEMBERSHIP >>>(d_distanceMatrix,my_num_events);
#else
// O(M^2)
ComputeNormalizedMembershipMatrix<<< dim3(num_blocks_membership,NUM_CLUSTERS), NUM_THREADS_MEMBERSHIP >>>(d_distanceMatrix, d_memberships, my_num_events);
#endif
stopTimer(timer_gpu);
// Copy memberships from the GPU
float* temp_memberships = (float*) malloc(sizeof(float)*my_num_events*NUM_CLUSTERS);
startTimer(timer_memcpy);
#if LINEAR
cudaMemcpy(temp_memberships,d_distanceMatrix,sizeof(float)*my_num_events*NUM_CLUSTERS,cudaMemcpyDeviceToHost);
#else
cudaMemcpy(temp_memberships,d_memberships,sizeof(float)*my_num_events*NUM_CLUSTERS,cudaMemcpyDeviceToHost);
#endif
stopTimer(timer_memcpy);
startTimer(timer_cpu);
for(int c=0; c < NUM_CLUSTERS; c++) {
memcpy(&(memberships[c*NUM_EVENTS+gpu_num*events_per_gpu]),&(temp_memberships[c*my_num_events]),sizeof(float)*my_num_events);
}
stopTimer(timer_cpu);
#pragma omp barrier
#pragma omp master
{
startTimer(timer_cpu);
// First transpose the memberships, makes it easier to gather the results between nodes
float* temp = (float*) malloc(sizeof(float)*NUM_EVENTS*NUM_CLUSTERS);
for(int e=0; e < NUM_EVENTS; e++) {
for(int c=0; c < NUM_CLUSTERS; c++) {
temp[e*NUM_CLUSTERS+c] = memberships[c*NUM_EVENTS+e];
}
}
memcpy(memberships,temp,sizeof(float)*NUM_EVENTS*NUM_CLUSTERS);
stopTimer(timer_cpu);
// Gather memberships on root
startTimer(timer_mpi);
int memberships_being_sent, memberships_per_node;
memberships_per_node = events_per_gpu*num_gpus*NUM_CLUSTERS;
if(rank == 0) {
for(int i=1; i < num_nodes; i++) {
memberships_being_sent = memberships_per_node;
if(i == num_nodes-1) { // boundary condition
memberships_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_CLUSTERS;
}
MPI_Status s;
MPI_Recv(&(temp[memberships_per_node*i]),memberships_being_sent,MPI_FLOAT,i,1,MPI_COMM_WORLD,&s);
}
} else {
memberships_being_sent = memberships_per_node;
if(rank == num_nodes-1) { // boundary condition
memberships_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_CLUSTERS;
}
MPI_Send(&(memberships[memberships_per_node*rank]),memberships_being_sent,MPI_FLOAT,0,1,MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
stopTimer(timer_mpi);
// Tranpose the memberships again to get original ordering
startTimer(timer_cpu);
if(rank == 0) {
for(int e=0; e < NUM_EVENTS; e++) {
for(int c=0; c<NUM_CLUSTERS; c++) {
memberships[c*NUM_EVENTS+e] = temp[e*NUM_CLUSTERS+c];
}
}
}
free(temp);
stopTimer(timer_cpu);
}
#pragma omp barrier
free(temp_memberships);
#endif // #if ENABLE_OUTPUT
if(tid == 0) {
if(abs(diff) > THRESHOLD){
PRINT("Warning: c-means did not converge to the %f threshold provided\n", THRESHOLD);
}
PRINT("C-means complete\n");
}
#pragma omp barrier // sync threads
#if !ENABLE_MDL
if(tid == 0) {
// Don't attempt MDL, save all clusters
finalClusterConfig = (int*) malloc(sizeof(int)*NUM_CLUSTERS);
memset(finalClusterConfig,1,sizeof(int)*NUM_CLUSTERS);
}
#else
PRINT("Calculating Q Matrix Section %d\n",tid);
// Copy the latest clusters to the device
// (the current ones on the device are 1 iteration old)
startTimer(timer_memcpy);
CUDA_SAFE_CALL(cudaMemcpy(d_C, myClusters, size, cudaMemcpyHostToDevice));
stopTimer(timer_memcpy);
// Build Q matrix, each gpu handles NUM_DIMENSIONS/num_gpus rows of the matrix
q_matrices[tid] = BuildQGPU(d_E, d_C, d_distanceMatrix, &mdlTime, tid, num_gpus, my_num_events);
#pragma omp barrier // sync threads
if(tid == 0) {
// Combine the partial matrices
int num_matrix_elements = NUM_CLUSTERS*(NUM_CLUSTERS/num_gpus);
for(int i=0; i < num_gpus; i++) {
float* q_matrix_ptr = (float*) q_matrix+i*num_matrix_elements;
float* q_matrices_ptr = (float*) q_matrices[i]+i*num_matrix_elements;
memcpy(q_matrix_ptr,q_matrices_ptr,sizeof(float)*num_matrix_elements);
free(q_matrices[i]);
}
startTimer(timer_cpu);
DEBUG("Searching for optimal configuration...\n");
finalClusterConfig = TabuSearch(q_matrix, argv[1]);
stopTimer(timer_cpu);
DEBUG("Q Matrix:\n");
for(int row=0; row < NUM_CLUSTERS; row++) {
for(int col=0; col < NUM_CLUSTERS; col++) {
DEBUG("%.2e ",q_matrix[row*NUM_CLUSTERS+col]);
}
DEBUG("\n");
}
free(q_matrix);
}
mdlTime /= 1000.0; // CUDA timer returns time in milliseconds, normalize to seconds
#endif
fflush(stdout);
#pragma omp barrier
#pragma omp master
{
printf("\n\n");
printf("Node %d: Thread %d: GPU memcpy Time (ms): %f\n",rank,tid,getTimerValue(timer_memcpy));
printf("Node %d: Thread %d: CPU processing Time (ms): %f\n",rank,tid,getTimerValue(timer_cpu));
printf("Node %d: Thread %d: GPU processing Time (ms): %f\n",rank,tid,getTimerValue(timer_gpu));
printf("Node %d: Thread %d: MPI Time (ms): %f\n",rank,tid,getTimerValue(timer_mpi));
}
#if !CPU_ONLY
CUDA_SAFE_CALL(cudaFree(d_E));
CUDA_SAFE_CALL(cudaFree(d_C));
CUDA_SAFE_CALL(cudaFree(d_nC));
#endif
#pragma omp barrier
DEBUG("Thread %d done.\n",tid);
} // end of omp_parallel block
cutStartTimer(timer_io);
if(rank == 0) {
PRINT("Final Clusters are:\n");
int newCount = 0;
for(int i = 0; i < NUM_CLUSTERS; i++){
if(finalClusterConfig[i]){
for(int j = 0; j < NUM_DIMENSIONS; j++){
newClusters[newCount * NUM_DIMENSIONS + j] = myClusters[i*NUM_DIMENSIONS + j];
PRINT("%.3f\t", myClusters[i*NUM_DIMENSIONS + j]);
}
newCount++;
PRINT("\n");
}
}
#if ENABLE_OUTPUT
ReportSummary(newClusters, newCount, argv[1]);
ReportResults(myEvents, memberships, newCount, argv[1]);
#endif
}
cutStopTimer(timer_io);
cutStopTimer(timer_total);
if(rank == 0) {
printf("Total Time (ms): %f\n",cutGetTimerValue(timer_total));
printf("I/O Time (ms): %f\n",cutGetTimerValue(timer_io));
printf("Main Thread CPU Time (ms): %f\n",cutGetTimerValue(timer_main_cpu));
printf("\n\n");
}
free(newClusters);
free(myClusters);
free(myEvents);
free(transposedEvents);
MPI_Finalize();
return 0;
}
void generateInitialClusters(float* clusters, float* events){
int seed;
for(int i = 0; i < NUM_CLUSTERS; i++){
//seed = i * NUM_EVENTS / NUM_CLUSTERS;
seed = rand() % NUM_EVENTS;
for(int j = 0; j < NUM_DIMENSIONS; j++){
clusters[i*NUM_DIMENSIONS + j] = events[seed*NUM_DIMENSIONS + j];
}
}
}
float* readBIN(char* f) {
FILE* fin = fopen(f,"rb");
int nevents,ndims;
fread(&nevents,4,1,fin);
fread(&ndims,4,1,fin);
int num_elements = (ndims)*(nevents);
printf("Number of rows: %d\n",nevents);
printf("Number of cols: %d\n",ndims);
float* data = (float*) malloc(sizeof(float)*num_elements);
fread(data,sizeof(float),num_elements,fin);
fclose(fin);
return data;
}
float* readCSV(char* filename) {
FILE* myfile = fopen(filename, "r");
if(myfile == NULL){
printf("Error: File DNE\n");
return NULL;
}
char myline[1024];
float* retVal = (float*)malloc(sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS);
myfile = fopen(filename, "r");
#if LINE_LABELS
fgets(myline, 1024, myfile);
for(int i = 0; i < NUM_EVENTS; i++){
fgets(myline, 1024, myfile);
retVal[i*NUM_DIMENSIONS] = (float)atof(strtok(myline, DELIMITER));
for(int j = 1; j < NUM_DIMENSIONS; j++){
retVal[i*NUM_DIMENSIONS + j] = (float)atof(strtok(NULL, DELIMITER));
}
}
#else
for(int i = 0; i < NUM_EVENTS; i++){
fgets(myline, 1024, myfile);
retVal[i*NUM_DIMENSIONS] = (float)atof(strtok(myline, DELIMITER));
for(int j = 1; j < NUM_DIMENSIONS; j++){
retVal[i*NUM_DIMENSIONS + j] = (float)atof(strtok(NULL, DELIMITER));
}
}
#endif
fclose(myfile);
return retVal;
}
float* ParseSampleInput(char* f){
int length = strlen(f);
printf("File Extension: %s\n",f+length-3);
if(strcmp(f+length-3,"bin") == 0) {
return readBIN(f);
} else {
return readCSV(f);
}
}
void FreeMatrix(float* d_matrix){
CUDA_SAFE_CALL(cudaFree(d_matrix));
}
float* BuildQGPU(float* d_events, float* d_clusters, float* distanceMatrix, float* mdlTime, int gpu_id, int num_gpus, int my_num_events){
float* d_matrix;
int size = sizeof(float) * NUM_CLUSTERS*NUM_CLUSTERS;
cudaTimer_t timer_gpu;
cudaTimer_t timer_memcpy;
createTimer(&timer_gpu);
createTimer(&timer_memcpy);
startTimer(timer_memcpy);
cudaMalloc((void**)&d_matrix, size);
printCudaError();
stopTimer(timer_memcpy);
startTimer(timer_gpu);
dim3 grid(NUM_CLUSTERS / num_gpus, NUM_CLUSTERS);
int start_row = gpu_id*(NUM_CLUSTERS/num_gpus);
printf("GPU %d: Starting row for Q Matrix: %d\n",gpu_id,start_row);
printf("Launching Q Matrix Kernel\n");
CalculateQMatrixGPUUpgrade<<<grid, Q_THREADS>>>(d_events, d_clusters, d_matrix, distanceMatrix, start_row, my_num_events);
cudaThreadSynchronize();
printCudaError();
stopTimer(timer_gpu);
startTimer(timer_memcpy);
float* matrix = (float*)malloc(size);
printf("Copying results to CPU\n");
cudaError_t error = cudaMemcpy(matrix, d_matrix, size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
printf(cudaGetErrorString(cudaGetLastError()));
printf("\n");
stopTimer(timer_memcpy);
stopTimer(timer_gpu);
*mdlTime = getTimerValue(timer_gpu);
printf("Processing time for MDL GPU: %f (ms) \n", *mdlTime);
printf("Memcpy time for MDL GPU: %f (ms) \n", getTimerValue(timer_memcpy));
deleteTimer(timer_gpu);
deleteTimer(timer_memcpy);
printCudaError();
FreeMatrix(d_matrix);
return matrix;
}
|
a9ddec0a39868b9e3be5403cc83ba5300ff5edd6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaComputeYGradient.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *y_gradient = NULL;
hipMalloc(&y_gradient, XSIZE*YSIZE);
unsigned char *channel = NULL;
hipMalloc(&channel, XSIZE*YSIZE);
int image_width = XSIZE;
int image_height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaComputeYGradient), dim3(gridBlock),dim3(threadBlock), 0, 0, y_gradient,channel,image_width,image_height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaComputeYGradient), dim3(gridBlock),dim3(threadBlock), 0, 0, y_gradient,channel,image_width,image_height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaComputeYGradient), dim3(gridBlock),dim3(threadBlock), 0, 0, y_gradient,channel,image_width,image_height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a9ddec0a39868b9e3be5403cc83ba5300ff5edd6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaComputeYGradient.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *y_gradient = NULL;
cudaMalloc(&y_gradient, XSIZE*YSIZE);
unsigned char *channel = NULL;
cudaMalloc(&channel, XSIZE*YSIZE);
int image_width = XSIZE;
int image_height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaComputeYGradient<<<gridBlock,threadBlock>>>(y_gradient,channel,image_width,image_height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaComputeYGradient<<<gridBlock,threadBlock>>>(y_gradient,channel,image_width,image_height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaComputeYGradient<<<gridBlock,threadBlock>>>(y_gradient,channel,image_width,image_height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f3258bc5dfda0496cf48e421a85b7092ad482248.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "optimizer.h"
#include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
LegionRuntime::Logger::Category log_optimizer("optimizer");
__global__
void sgd_update(int count, float lr, float weight_decay,
float momentum, bool nesterov,
const float* WGrad, float* V, float* W)
{
// Refernce https://pytorch.org/docs/stable/_modules/torch/optim/sgd.html#SGD
CUDA_KERNEL_LOOP(i, count)
{
float gt = WGrad[i] + weight_decay * W[i];
if (momentum > 0.0f) {
V[i] = V[i] * momentum + gt;
if (nesterov)
gt = gt + momentum * V[i];
else
gt = V[i];
}
W[i] -= lr * gt;
}
}
__host__
void SGDOptimizer::update_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
const SGDOptimizer* op = (SGDOptimizer*) task->args;
if (op->momentum > 0.0f) {
assert(regions.size() == 3);
assert(task->regions.size() == 3);
} else {
assert(regions.size() == 2);
assert(task->regions.size() == 2);
}
Domain domain = runtime->get_index_space_domain(ctx,
task->regions[1].region.get_index_space());
const float *w_grad_ptr = NULL;
float *w_ptr = NULL, *v_ptr = NULL;
size_t size = 0, num_replicas = 0;
switch(domain.get_dim()) {
case 1:
{
TensorAccessorR<float, 1> accWGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 1> accW(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
for (int i = 0; i < domain.get_dim(); i++) {
assert(accW.rect.lo[i] == accWGrad.rect.lo[i]);
assert(accW.rect.hi[i] == accWGrad.rect.hi[i]);
}
size = accW.rect.volume();
assert(accWGrad.rect.volume() % accW.rect.volume() == 0);
num_replicas = accWGrad.rect.volume() / accW.rect.volume();
w_grad_ptr = accWGrad.ptr;
w_ptr = accW.ptr;
if (op->momentum > 0.0f) {
TensorAccessorW<float, 1> accV(
regions[2], task->regions[2], FID_DATA, ctx, runtime,
true/*readOutput*/);
assert(accW.rect == accV.rect);
v_ptr = accV.ptr;
}
break;
}
case 2:
{
TensorAccessorR<float, 2> accWGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> accW(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
for (int i = 0; i < domain.get_dim(); i++) {
assert(accW.rect.lo[i] == accWGrad.rect.lo[i]);
assert(accW.rect.hi[i] == accWGrad.rect.hi[i]);
}
size = accW.rect.volume();
assert(accWGrad.rect.volume() % accW.rect.volume() == 0);
num_replicas = accWGrad.rect.volume() / accW.rect.volume();
w_grad_ptr = accWGrad.ptr;
w_ptr = accW.ptr;
if (op->momentum > 0.0f) {
TensorAccessorW<float, 2> accV(
regions[2], task->regions[2], FID_DATA, ctx, runtime,
true/*readOutput*/);
assert(accW.rect == accV.rect);
v_ptr = accV.ptr;
}
break;
}
default:
{
// Unsupported dims
assert(false);
}
}
// Step 1: gather gradients in the first replica
for (int i = 1; i < num_replicas; i++) {
const float* src = w_grad_ptr + i * size;
hipLaunchKernelGGL(( apply_add_with_scale), dim3(GET_BLOCKS(size)), dim3(CUDA_NUM_THREADS), 0, 0,
(float*) w_grad_ptr, src, size, 1.0f);
}
// Step 2: SGD update
hipLaunchKernelGGL(( sgd_update), dim3(GET_BLOCKS(size)), dim3(CUDA_NUM_THREADS), 0, 0,
size, op->lr, op->weight_decay, op->momentum, op->nesterov,
w_grad_ptr, v_ptr, w_ptr);
checkCUDA(hipDeviceSynchronize());
}
| f3258bc5dfda0496cf48e421a85b7092ad482248.cu | /* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "optimizer.h"
#include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
LegionRuntime::Logger::Category log_optimizer("optimizer");
__global__
void sgd_update(int count, float lr, float weight_decay,
float momentum, bool nesterov,
const float* WGrad, float* V, float* W)
{
// Refernce https://pytorch.org/docs/stable/_modules/torch/optim/sgd.html#SGD
CUDA_KERNEL_LOOP(i, count)
{
float gt = WGrad[i] + weight_decay * W[i];
if (momentum > 0.0f) {
V[i] = V[i] * momentum + gt;
if (nesterov)
gt = gt + momentum * V[i];
else
gt = V[i];
}
W[i] -= lr * gt;
}
}
__host__
void SGDOptimizer::update_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
const SGDOptimizer* op = (SGDOptimizer*) task->args;
if (op->momentum > 0.0f) {
assert(regions.size() == 3);
assert(task->regions.size() == 3);
} else {
assert(regions.size() == 2);
assert(task->regions.size() == 2);
}
Domain domain = runtime->get_index_space_domain(ctx,
task->regions[1].region.get_index_space());
const float *w_grad_ptr = NULL;
float *w_ptr = NULL, *v_ptr = NULL;
size_t size = 0, num_replicas = 0;
switch(domain.get_dim()) {
case 1:
{
TensorAccessorR<float, 1> accWGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 1> accW(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
for (int i = 0; i < domain.get_dim(); i++) {
assert(accW.rect.lo[i] == accWGrad.rect.lo[i]);
assert(accW.rect.hi[i] == accWGrad.rect.hi[i]);
}
size = accW.rect.volume();
assert(accWGrad.rect.volume() % accW.rect.volume() == 0);
num_replicas = accWGrad.rect.volume() / accW.rect.volume();
w_grad_ptr = accWGrad.ptr;
w_ptr = accW.ptr;
if (op->momentum > 0.0f) {
TensorAccessorW<float, 1> accV(
regions[2], task->regions[2], FID_DATA, ctx, runtime,
true/*readOutput*/);
assert(accW.rect == accV.rect);
v_ptr = accV.ptr;
}
break;
}
case 2:
{
TensorAccessorR<float, 2> accWGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> accW(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
for (int i = 0; i < domain.get_dim(); i++) {
assert(accW.rect.lo[i] == accWGrad.rect.lo[i]);
assert(accW.rect.hi[i] == accWGrad.rect.hi[i]);
}
size = accW.rect.volume();
assert(accWGrad.rect.volume() % accW.rect.volume() == 0);
num_replicas = accWGrad.rect.volume() / accW.rect.volume();
w_grad_ptr = accWGrad.ptr;
w_ptr = accW.ptr;
if (op->momentum > 0.0f) {
TensorAccessorW<float, 2> accV(
regions[2], task->regions[2], FID_DATA, ctx, runtime,
true/*readOutput*/);
assert(accW.rect == accV.rect);
v_ptr = accV.ptr;
}
break;
}
default:
{
// Unsupported dims
assert(false);
}
}
// Step 1: gather gradients in the first replica
for (int i = 1; i < num_replicas; i++) {
const float* src = w_grad_ptr + i * size;
apply_add_with_scale<<<GET_BLOCKS(size), CUDA_NUM_THREADS>>>(
(float*) w_grad_ptr, src, size, 1.0f);
}
// Step 2: SGD update
sgd_update<<<GET_BLOCKS(size), CUDA_NUM_THREADS>>>(
size, op->lr, op->weight_decay, op->momentum, op->nesterov,
w_grad_ptr, v_ptr, w_ptr);
checkCUDA(cudaDeviceSynchronize());
}
|
061be652fd8ce75d850b607127307abd499a7dda.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file
* @author Wayne Madden <s3197676@student.rmit.edu.au>
* @version 0.3
*
* @section LICENSE
* Free to re-use and reference from within code as long as the original owner
* is referenced as per GNU standards
*
* @section DESCRIPTION
* Basic matrix solver. Accepts two input matrices A and B in the form of
* equation 'A . X = B'. Uses gaussian elimination.
*/
#include "SolverMain.h"
/**
* Main method of matrix solver program
*
* @param argc Number of items to be stored
* @param argsv[] Size to be used for each item by the Memory Manager
*/
int main(int argc, char* argv[])
{
float *hA, *dA;
float *hX, *dX;
float *hB, *dB;
int i, j;
int matrixSide;
int status;
FILE* fp;
char line[BUFFER_SIZE];
char* token;
//LARGE_INTEGER start, end, freq;
int n, gridCount, blockCount, threads;
/*validate arguments*/
if (argc != 4)
{
cout << "Invalid arguments! Press enter to continue...";
cin.ignore(1);
exit(EXIT_FAILURE);
}
/*necesary for microsecond accuracy*/
//QueryPerformanceFrequency(&freq);
/*1. init*/
//QueryPerformanceCounter(&start);
/*set device*/
status = hipSetDevice(0);
if (status != hipSuccess)
{
cout << "No valid device found! Press enter to continue...";
cin.ignore(1);
exit(EXIT_FAILURE);
}
/*read command line input*/
matrixSide = atoi(argv[1]);
/*allocate host memory*/
hA = new float[matrixSide * matrixSide];
hX = new float[matrixSide];
hB = new float[matrixSide];
/*check host memory*/
if (hA == NULL || hX == NULL || hB == NULL)
{
cout << "Unable to allocate host memory! Press enter to continue...";
cin.ignore(1);
exit(EXIT_FAILURE);
}
/*allocate device memory*/
status = hipSuccess;
status += hipMalloc((void**) &dA, sizeof(float) * matrixSide * matrixSide);
status += hipMalloc((void**) &dX, sizeof(float) * matrixSide);
status += hipMalloc((void**) &dB, sizeof(float) * matrixSide);
/*check device memory*/
if (status != hipSuccess)
{
cout << "Unable to allocate host memory! Press enter to continue...";
cin.ignore(1);
exit(EXIT_FAILURE);
}
/*read matrix a into memory*/
fp = fopen(argv[2], "r");
i = 0;
while (i < matrixSide && fgets(line, 1000000, fp) != NULL)
{
j = 0;
token = strtok(line, " ,");
do
{
hA[i * matrixSide + j] = atof(token);
j++;
} while (j < matrixSide && (token = strtok(NULL, " ,")) != NULL);
i++;
}
fclose(fp);
/*read matrix b into memory*/
fp = fopen(argv[3], "r");
i = 0;
while (i < matrixSide && fgets(line, 1000, fp) != NULL)
{
hB[i] = atof(line);
i++;
}
fclose(fp);
/*copy host memory to device*/
hipMemcpy(dA, hA, sizeof(float) * matrixSide * matrixSide,
hipMemcpyHostToDevice);
hipMemcpy(dB, hB, sizeof(float) * matrixSide, hipMemcpyHostToDevice);
//QueryPerformanceCounter(&end);
//cout << (double)(end.QuadPart - start.QuadPart) / freq.QuadPart << ",";
/*first display*/
displayEquation(hA, NULL, hB, matrixSide);
/*calculate threads per block per grid. blockDim is optimally a multiple of
32 (warp size) to keep sm busy (better, 64 as two warps can be interleaved)*/
n = 2;
threads = (matrixSide / (WARP_SIZE * n) + 1) * (WARP_SIZE * n);
do
{
gridCount = threads / (WARP_SIZE * n);
blockCount = threads / gridCount;
n += 2;
} while (gridCount > GRID_MAX);
if (blockCount >= 768)
{
cout << "Matrix is too large for this solver! Press enter to continue...";
cin.ignore(1);
exit(EXIT_FAILURE);
}
/*2. solve matrix*/
//QueryPerformanceCounter(&start);
gaussianElimination(dA, dX, dB, matrixSide, gridCount, blockCount);
//QueryPerformanceCounter(&end);
//cout << (double)(end.QuadPart - start.QuadPart) / freq.QuadPart << ",";
/*copy device memory to host*/
hipMemcpy(hX, dX, sizeof(float) * matrixSide, hipMemcpyDeviceToHost);
/*display the solved values of matrix X*/
displayEquation(hA, hX, hB, matrixSide);
/*4. cleanup*/
//QueryPerformanceCounter(&start);
/*free host memory*/
delete[] hA;
delete[] hX;
delete[] hB;
/*free device memory*/
hipFree(dA);
hipFree(dX);
hipFree(dB);
/*reset device for profiling tool traces*/
hipDeviceReset();
//QueryPerformanceCounter(&end);
//cout << (double)(end.QuadPart - start.QuadPart) / freq.QuadPart;
/*prompt to continue - to allow the user to read output before exiting*/
cout << "Press enter to continue...";
cin.ignore(1);
exit(EXIT_SUCCESS);
}
| 061be652fd8ce75d850b607127307abd499a7dda.cu | /**
* @file
* @author Wayne Madden <s3197676@student.rmit.edu.au>
* @version 0.3
*
* @section LICENSE
* Free to re-use and reference from within code as long as the original owner
* is referenced as per GNU standards
*
* @section DESCRIPTION
* Basic matrix solver. Accepts two input matrices A and B in the form of
* equation 'A . X = B'. Uses gaussian elimination.
*/
#include "SolverMain.h"
/**
* Main method of matrix solver program
*
* @param argc Number of items to be stored
* @param argsv[] Size to be used for each item by the Memory Manager
*/
int main(int argc, char* argv[])
{
float *hA, *dA;
float *hX, *dX;
float *hB, *dB;
int i, j;
int matrixSide;
int status;
FILE* fp;
char line[BUFFER_SIZE];
char* token;
//LARGE_INTEGER start, end, freq;
int n, gridCount, blockCount, threads;
/*validate arguments*/
if (argc != 4)
{
cout << "Invalid arguments! Press enter to continue...";
cin.ignore(1);
exit(EXIT_FAILURE);
}
/*necesary for microsecond accuracy*/
//QueryPerformanceFrequency(&freq);
/*1. init*/
//QueryPerformanceCounter(&start);
/*set device*/
status = cudaSetDevice(0);
if (status != cudaSuccess)
{
cout << "No valid device found! Press enter to continue...";
cin.ignore(1);
exit(EXIT_FAILURE);
}
/*read command line input*/
matrixSide = atoi(argv[1]);
/*allocate host memory*/
hA = new float[matrixSide * matrixSide];
hX = new float[matrixSide];
hB = new float[matrixSide];
/*check host memory*/
if (hA == NULL || hX == NULL || hB == NULL)
{
cout << "Unable to allocate host memory! Press enter to continue...";
cin.ignore(1);
exit(EXIT_FAILURE);
}
/*allocate device memory*/
status = cudaSuccess;
status += cudaMalloc((void**) &dA, sizeof(float) * matrixSide * matrixSide);
status += cudaMalloc((void**) &dX, sizeof(float) * matrixSide);
status += cudaMalloc((void**) &dB, sizeof(float) * matrixSide);
/*check device memory*/
if (status != cudaSuccess)
{
cout << "Unable to allocate host memory! Press enter to continue...";
cin.ignore(1);
exit(EXIT_FAILURE);
}
/*read matrix a into memory*/
fp = fopen(argv[2], "r");
i = 0;
while (i < matrixSide && fgets(line, 1000000, fp) != NULL)
{
j = 0;
token = strtok(line, " ,");
do
{
hA[i * matrixSide + j] = atof(token);
j++;
} while (j < matrixSide && (token = strtok(NULL, " ,")) != NULL);
i++;
}
fclose(fp);
/*read matrix b into memory*/
fp = fopen(argv[3], "r");
i = 0;
while (i < matrixSide && fgets(line, 1000, fp) != NULL)
{
hB[i] = atof(line);
i++;
}
fclose(fp);
/*copy host memory to device*/
cudaMemcpy(dA, hA, sizeof(float) * matrixSide * matrixSide,
cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, sizeof(float) * matrixSide, cudaMemcpyHostToDevice);
//QueryPerformanceCounter(&end);
//cout << (double)(end.QuadPart - start.QuadPart) / freq.QuadPart << ",";
/*first display*/
displayEquation(hA, NULL, hB, matrixSide);
/*calculate threads per block per grid. blockDim is optimally a multiple of
32 (warp size) to keep sm busy (better, 64 as two warps can be interleaved)*/
n = 2;
threads = (matrixSide / (WARP_SIZE * n) + 1) * (WARP_SIZE * n);
do
{
gridCount = threads / (WARP_SIZE * n);
blockCount = threads / gridCount;
n += 2;
} while (gridCount > GRID_MAX);
if (blockCount >= 768)
{
cout << "Matrix is too large for this solver! Press enter to continue...";
cin.ignore(1);
exit(EXIT_FAILURE);
}
/*2. solve matrix*/
//QueryPerformanceCounter(&start);
gaussianElimination(dA, dX, dB, matrixSide, gridCount, blockCount);
//QueryPerformanceCounter(&end);
//cout << (double)(end.QuadPart - start.QuadPart) / freq.QuadPart << ",";
/*copy device memory to host*/
cudaMemcpy(hX, dX, sizeof(float) * matrixSide, cudaMemcpyDeviceToHost);
/*display the solved values of matrix X*/
displayEquation(hA, hX, hB, matrixSide);
/*4. cleanup*/
//QueryPerformanceCounter(&start);
/*free host memory*/
delete[] hA;
delete[] hX;
delete[] hB;
/*free device memory*/
cudaFree(dA);
cudaFree(dX);
cudaFree(dB);
/*reset device for profiling tool traces*/
cudaDeviceReset();
//QueryPerformanceCounter(&end);
//cout << (double)(end.QuadPart - start.QuadPart) / freq.QuadPart;
/*prompt to continue - to allow the user to read output before exiting*/
cout << "Press enter to continue...";
cin.ignore(1);
exit(EXIT_SUCCESS);
}
|
5950e45b9209b95da3f4fa6eb84999400d2ec430.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "TRWP.h"
#include "commonCUDA.cuh"
#ifdef __cplusplus
extern "C" {
#endif
__global__ void CostAggregateKernelBack(const Param param,
const uint n_thread_required,
float* dcost_final_ptr,
float* dunary,
float* dmsg_ptr) {
// cost_final=unary+sum{msg_update}
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= n_thread_required) return;
float dcost_final_value = dcost_final_ptr[tid];
dunary[tid] = dcost_final_value;
for (uint dir = 0; dir < param.n_dir; ++dir)
dmsg_ptr[dir * n_thread_required + tid] = dcost_final_value;
__syncthreads();
}
__global__ void UpdateUnaryKernelBack(const Param param,
const uint n_thread_required,
float* dunary_update_ptr,
float* dunary_ptr,
float* dmsg_ptr) {
// unary_update=rho*(unary+sum{msg}-msg_dir)-msg_dir_inv
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*n_cv*h*w*n_disp
if (tid >= n_thread_required) return;
uint dir = param.dir, dir_inv = param.dir_inv, n_dir = param.n_dir;
float rho = param.rho;
float dunary_update_value = dunary_update_ptr[tid];
float dunary_update_value_rho = rho * dunary_update_value;
for (uint dir = 0; dir < n_dir; ++dir)
atomicAdd(&dmsg_ptr[dir * n_thread_required + tid], dunary_update_value_rho);
atomicAdd(&dunary_ptr[tid], dunary_update_value_rho);
atomicAdd(&dmsg_ptr[dir * n_thread_required + tid], -dunary_update_value_rho);
atomicAdd(&dmsg_ptr[dir_inv * n_thread_required + tid], -dunary_update_value);
__syncthreads();
}
__global__ void HorizontalKernelBack(const Param param,
const uint n_thread_required,
const uint n_thread_a_tree,
const float* context,
const float* edge_weights,
const uchar* msg_min_index,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights) {
static __shared__ float dmsg_update_shared[MAX_DISPARITY];
uint height = param.height, width = param.width;
uint n_disp = param.n_disp, n_trees = param.n_trees;
float rho = param.rho;
int w_step = param.w_step;
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree
uint current_d = threadIdx.x % n_thread_a_tree;
bool enable_seg = (n_disp == 21);
if (tid >= n_thread_required) return;
if (current_d >= n_disp) return;
uint unary_base = tid / (n_trees * n_thread_a_tree) * height * width * n_disp;
uint tree_id = (tid / n_thread_a_tree) % n_trees;
int h_start = tree_id, w_start = (w_step > 0) ? 0 : (width - 1);
uint edge_base = tid / (n_thread_a_tree * n_trees) * height * width;
// The front node is in accordance with forward pass, use + *_step
// msg_min_index(batch,n_cv,h,w,n_disp)
for (uint i = 0; i < width; ++i) {
int current_node_h = h_start;
int current_node_w = w_start + i * w_step;
int front_node_h = current_node_h;
int front_node_w = current_node_w + w_step;
if (0 <= current_node_w && current_node_w < width &&
0 <= front_node_w && front_node_w < width) {
uint msg_offset_base = unary_base + current_node_h * width * n_disp + current_node_w * n_disp;
uint msg_offset = msg_offset_base + current_d;
uint msg_index_offset = tid / n_thread_a_tree * width + current_node_w;
uint edge_weight_offset = edge_base + current_node_h * width + current_node_w;
dmsg_update_shared[current_d] = dmsg[msg_offset];
__syncthreads();
MsgNormNaiveBack(param.n_disp, current_d, msg_norm_index[msg_index_offset], dmsg_update_shared);
uint front_d = uint(msg_min_index[msg_offset]);
float value = dmsg_update_shared[current_d];
float value_rho = rho * value;
uint offset = unary_base + front_node_h * width * n_disp + front_node_w * n_disp + front_d;
uint context_offset = 0;
if (enable_seg)
context_offset = min(current_d, front_d) * n_disp + max(current_d, front_d);
else
context_offset = std::abs(int(current_d) - int(front_d));
atomicAdd(&dunary_update[offset], value);
atomicAdd(&dmsg[offset], value_rho);
atomicAdd(&dedge_weights[edge_weight_offset], context[context_offset] * value);
atomicAdd(&dcontext[context_offset], edge_weights[edge_weight_offset] * value);
__syncthreads();
}
}
}
__global__ void DiagonalKernelNarrowBack(const Param param,
const uint n_thread_required,
const uint n_thread_a_tree,
const float* context,
const float* edge_weights,
const uchar* msg_min_index,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights) {
static __shared__ float dmsg_update_shared[MAX_DISPARITY];
uint height = param.height, width = param.width;
uint n_disp = param.n_disp, n_trees = param.n_trees;
float rho = param.rho;
int h_step = param.h_step, w_step = param.w_step;
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*n_trees*n_thread_a_tree
uint current_d = threadIdx.x % n_thread_a_tree;
uint h_step_abs = std::abs(h_step);
bool enable_seg = (n_disp == 21);
if (tid >= n_thread_required) return;
if (current_d >= n_disp) return;
uint unary_base = tid / (n_trees * n_thread_a_tree) * height * width * n_disp;
uint tree_id = (tid / n_thread_a_tree) % n_trees;
int tree_id_shift = tree_id - (height - 1) * max(w_step, 0);
int common1 = tree_id_shift % h_step_abs;
float common2 = float(tree_id_shift) / float(h_step_abs); // This must be float NOT int, will affect ceilf and floorf
int h_start = 0, w_start = 0;
uint edge_base = tid / (n_thread_a_tree * n_trees) * height * width;
// Use a common mode to calculate start points for shortest chains, read my notes for clarity
if (w_step > 0) {
h_start = (h_step_abs - common1) % h_step_abs;
w_start = ceilf(common2);
} else {
h_start = common1;
w_start = floorf(common2);
}
if (h_step < 0) h_start = height - 1 - h_start;
uint roll_step = (height - 1) / h_step_abs;
for (uint i = 0; i <= roll_step; ++i) {
int current_node_h = h_start + i * h_step;
int current_node_w = w_start + i * w_step;
int front_node_h = current_node_h + h_step;
int front_node_w = current_node_w + w_step;
if (0 <= current_node_h && current_node_h < height &&
0 <= current_node_w && current_node_w < width &&
0 <= front_node_h && front_node_h < height &&
0 <= front_node_w && front_node_w < width) {
uint msg_offset_base = unary_base + current_node_h * width * n_disp + current_node_w * n_disp;
uint msg_offset = msg_offset_base + current_d;
uint msg_index_offset = tid / (n_thread_a_tree * n_trees) * height * width + current_node_h * width + current_node_w;
uint edge_weight_offset = edge_base + current_node_h * width + current_node_w;
dmsg_update_shared[current_d] = dmsg[msg_offset];
__syncthreads();
MsgNormNaiveBack(param.n_disp, current_d, msg_norm_index[msg_index_offset], dmsg_update_shared);
uint front_d = uint(msg_min_index[msg_offset]);
float value = dmsg_update_shared[current_d];
float value_rho = rho * value;
uint offset = unary_base + front_node_h * width * n_disp + front_node_w * n_disp + front_d;
uint context_offset = 0;
if (enable_seg)
context_offset = min(current_d, front_d) * n_disp + max(current_d, front_d);
else
context_offset = std::abs(int(current_d) - int(front_d));
atomicAdd(&dunary_update[offset], value);
atomicAdd(&dmsg[offset], value_rho);
atomicAdd(&dedge_weights[edge_weight_offset], context[context_offset] * value);
atomicAdd(&dcontext[context_offset], edge_weights[edge_weight_offset] * value);
__syncthreads();
}
}
}
__global__ void DiagonalKernelWideBack(const Param param,
const uint n_thread_required,
const uint n_thread_a_tree,
const float* context,
const float* edge_weights,
const uchar* msg_min_index,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights) {
static __shared__ float dmsg_update_shared[MAX_DISPARITY];
uint height = param.height, width = param.width;
uint n_disp = param.n_disp, n_trees = param.n_trees;
float rho = param.rho;
int h_step = param.h_step, w_step = param.w_step;
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree
uint current_d = threadIdx.x % n_thread_a_tree;
bool enable_seg = (n_disp == 21);
if (tid >= n_thread_required) return;
if (current_d >= n_disp) return;
uint unary_base = tid / (n_trees * n_thread_a_tree) * height * width * n_disp;
uint tree_id = (tid / n_thread_a_tree) % n_trees;
int tree_id_shift = tree_id - (height - 1) * max(w_step, 0);
uint h_step_abs = std::abs(h_step), roll_step = (height - 1) / h_step_abs;
int h_start = (h_step > 0) ? 0 : (height - 1), w_start = tree_id_shift;
uint edge_base = tid / (n_thread_a_tree * n_trees) * height * width;
for (uint i = 0; i <= roll_step; ++i) {
int current_node_h = h_start + i * h_step;
int current_node_w = w_start + i * w_step;
int front_node_h = current_node_h + h_step;
int front_node_w = current_node_w + w_step;
if (0 <= current_node_h && current_node_h < height &&
0 <= current_node_w && current_node_w < width &&
0 <= front_node_h && front_node_h < height &&
0 <= front_node_w && front_node_w < width) {
uint msg_offset_base = unary_base + current_node_h * width * n_disp + current_node_w * n_disp;
uint msg_offset = msg_offset_base + current_d;
uint msg_index_offset = tid / (n_thread_a_tree * n_trees) * height * width + current_node_h * width + current_node_w;
uint edge_weight_offset = edge_base + current_node_h * width + current_node_w;
dmsg_update_shared[current_d] = dmsg[msg_offset];
__syncthreads();
MsgNormNaiveBack(param.n_disp, current_d, msg_norm_index[msg_index_offset], dmsg_update_shared);
uint front_d = uint(msg_min_index[msg_offset]);
float value = dmsg_update_shared[current_d];
float value_rho = rho * value;
uint offset = unary_base + front_node_h * width * n_disp + front_node_w * n_disp + front_d;
uint context_offset = 0;
if (enable_seg)
context_offset = min(current_d, front_d) * n_disp + max(current_d, front_d);
else
context_offset = std::abs(int(current_d) - int(front_d));
atomicAdd(&dunary_update[offset], value);
atomicAdd(&dmsg[offset], value_rho);
atomicAdd(&dedge_weights[edge_weight_offset], context[context_offset] * value);
atomicAdd(&dcontext[context_offset], edge_weights[edge_weight_offset] * value);
__syncthreads();
}
}
}
void BackwardCUDA(const float rho,
const at::Tensor context,
const at::Tensor edge_weights,
const at::Tensor dcost_final,
const at::Tensor msg_min_index,
const at::Tensor msg_norm_index,
at::Tensor dunary,
at::Tensor dcontext,
at::Tensor dedge_weights,
at::Tensor dmsg,
at::Tensor dunary_update) {
const uint n_iter = msg_min_index.size(0);
const uint n_dir = msg_min_index.size(1);
const uint batch = msg_min_index.size(2);
const uint n_cv = msg_min_index.size(3);
const uint height = msg_min_index.size(4);
const uint width = msg_min_index.size(5);
const uint n_disp = msg_min_index.size(6);
float* context_ptr = context.data<float>();
float* edge_weight_ptr = edge_weights.data<float>();
float* dcost_final_ptr = dcost_final.data<float>();
uchar* msg_min_index_ptr = msg_min_index.data<uchar>(); // (n_iter,n_dir,batch,n_cv,h,w,n_disp)
uchar* msg_norm_index_ptr = msg_norm_index.data<uchar>(); // (n_iter,n_dir,batch,n_cv,h,w)
float* dunary_ptr = dunary.data<float>(); // (batch,n_cv,h,w,n_disp)
float* dcontext_ptr = dcontext.data<float>();
float* dedge_weight_ptr = dedge_weights.data<float>();
float* dmsg_ptr = dmsg.data<float>();
float* dunary_update_ptr = dunary_update.data<float>();
uint n_thread_a_tree = GetNumThreadATree(n_disp, WARP_SIZE);
bool is_backward = true, is_training = true;
std::vector<float*> dmsg_address(n_dir), edge_weight_address(n_dir), dedge_weight_address(n_dir);
std::vector<uchar*> msg_min_index_address(n_dir), msg_norm_index_address(n_dir);
std::vector<Param> param_list;
uint msg_min_size = batch * n_cv * height * width * n_disp;
uint msg_min_index_size = n_dir * msg_min_size;
uint msg_norm_size = msg_min_size / n_disp;
uint msg_norm_index_size = n_dir * msg_norm_size;
uint n_thread_unary = min(MAX_THREADS_PER_BLOCK, msg_min_size);
uint n_block_unary = (msg_min_size + n_thread_unary - 1) / n_thread_unary;
uint n_thread_msg_norm = min(MAX_THREADS_PER_BLOCK, msg_norm_size);
for (int dir = 0; dir < n_dir; ++dir) {
edge_weight_address[dir] = edge_weight_ptr + dir * msg_norm_size;
dedge_weight_address[dir] = dedge_weight_ptr + dir * msg_norm_size;
dmsg_address[dir] = dmsg_ptr + dir * msg_min_size;
Param param(n_dir, batch, n_cv, height, width, n_disp, dir, rho, is_backward, is_training);
UpdateParam(¶m);
param_list.push_back(param);
}
hipLaunchKernelGGL(( CostAggregateKernelBack), dim3(n_block_unary), dim3(n_thread_unary), 0, 0, param_list[0],
msg_min_size,
dcost_final_ptr,
dunary_ptr,
dmsg_ptr);
#ifdef CUDA_ERROR_CHECK
CUDAErrorCheck();
#endif
for (int iter = n_iter - 1; iter >= 0; --iter) {
for (int dir = n_dir - 1; dir >= 0; --dir) {
msg_min_index_address[dir] = msg_min_index_ptr + iter * msg_min_index_size + dir * msg_min_size;
msg_norm_index_address[dir] = msg_norm_index_ptr + iter * msg_norm_index_size + dir * msg_norm_size;
uint n_threads = batch * n_cv * param_list[dir].n_trees * n_thread_a_tree;
uint n_blocks = GetNumBlock(n_threads, n_thread_a_tree);
// Diagonal
if (4 <= dir) {
uint h_step_abs = std::abs(param_list[dir].h_step);
uint w_step_abs = std::abs(param_list[dir].w_step);
if (h_step_abs > w_step_abs) {
hipLaunchKernelGGL(( DiagonalKernelNarrowBack), dim3(n_blocks), dim3(n_thread_a_tree), 0, 0, param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_min_index_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
} else {
hipLaunchKernelGGL(( DiagonalKernelWideBack), dim3(n_blocks), dim3(n_thread_a_tree), 0, 0, param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_min_index_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
}
}
// Vertical
if ((2 <= dir) && (dir < 4)) {
hipLaunchKernelGGL(( DiagonalKernelWideBack), dim3(n_blocks), dim3(n_thread_a_tree), 0, 0, param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_min_index_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
}
// Horizontal
if (dir < 2) {
hipLaunchKernelGGL(( HorizontalKernelBack), dim3(n_blocks), dim3(n_thread_a_tree), 0, 0, param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_min_index_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
}
#ifdef CUDA_ERROR_CHECK
CUDAErrorCheck();
#endif
hipLaunchKernelGGL(( UpdateUnaryKernelBack), dim3(n_block_unary), dim3(n_thread_unary), 0, 0, param_list[dir],
msg_min_size,
dunary_update_ptr,
dunary_ptr,
dmsg_ptr);
#ifdef CUDA_ERROR_CHECK
CUDAErrorCheck();
#endif
hipMemset(dunary_update_ptr, 0, msg_min_size * sizeof(float));
hipMemset(dmsg_address[dir], 0, msg_min_size * sizeof(float));
}
}
for (uint dir = 0; dir < n_dir; ++dir) {
if (dmsg_address[dir] != nullptr) dmsg_address[dir] = nullptr;
if (msg_min_index_address[dir] != nullptr) msg_min_index_address[dir] = nullptr;
if (msg_norm_index_address[dir] != nullptr) msg_norm_index_address[dir] = nullptr;
if (edge_weight_address[dir] != nullptr) edge_weight_address[dir] = nullptr;
if (dedge_weight_address[dir] != nullptr) dedge_weight_address[dir] = nullptr;
}
}
#ifdef __cplusplus
}
#endif
| 5950e45b9209b95da3f4fa6eb84999400d2ec430.cu | #include "TRWP.h"
#include "commonCUDA.cuh"
#ifdef __cplusplus
extern "C" {
#endif
__global__ void CostAggregateKernelBack(const Param param,
const uint n_thread_required,
float* dcost_final_ptr,
float* dunary,
float* dmsg_ptr) {
// cost_final=unary+sum{msg_update}
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= n_thread_required) return;
float dcost_final_value = dcost_final_ptr[tid];
dunary[tid] = dcost_final_value;
for (uint dir = 0; dir < param.n_dir; ++dir)
dmsg_ptr[dir * n_thread_required + tid] = dcost_final_value;
__syncthreads();
}
__global__ void UpdateUnaryKernelBack(const Param param,
const uint n_thread_required,
float* dunary_update_ptr,
float* dunary_ptr,
float* dmsg_ptr) {
// unary_update=rho*(unary+sum{msg}-msg_dir)-msg_dir_inv
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*n_cv*h*w*n_disp
if (tid >= n_thread_required) return;
uint dir = param.dir, dir_inv = param.dir_inv, n_dir = param.n_dir;
float rho = param.rho;
float dunary_update_value = dunary_update_ptr[tid];
float dunary_update_value_rho = rho * dunary_update_value;
for (uint dir = 0; dir < n_dir; ++dir)
atomicAdd(&dmsg_ptr[dir * n_thread_required + tid], dunary_update_value_rho);
atomicAdd(&dunary_ptr[tid], dunary_update_value_rho);
atomicAdd(&dmsg_ptr[dir * n_thread_required + tid], -dunary_update_value_rho);
atomicAdd(&dmsg_ptr[dir_inv * n_thread_required + tid], -dunary_update_value);
__syncthreads();
}
__global__ void HorizontalKernelBack(const Param param,
const uint n_thread_required,
const uint n_thread_a_tree,
const float* context,
const float* edge_weights,
const uchar* msg_min_index,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights) {
static __shared__ float dmsg_update_shared[MAX_DISPARITY];
uint height = param.height, width = param.width;
uint n_disp = param.n_disp, n_trees = param.n_trees;
float rho = param.rho;
int w_step = param.w_step;
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree
uint current_d = threadIdx.x % n_thread_a_tree;
bool enable_seg = (n_disp == 21);
if (tid >= n_thread_required) return;
if (current_d >= n_disp) return;
uint unary_base = tid / (n_trees * n_thread_a_tree) * height * width * n_disp;
uint tree_id = (tid / n_thread_a_tree) % n_trees;
int h_start = tree_id, w_start = (w_step > 0) ? 0 : (width - 1);
uint edge_base = tid / (n_thread_a_tree * n_trees) * height * width;
// The front node is in accordance with forward pass, use + *_step
// msg_min_index(batch,n_cv,h,w,n_disp)
for (uint i = 0; i < width; ++i) {
int current_node_h = h_start;
int current_node_w = w_start + i * w_step;
int front_node_h = current_node_h;
int front_node_w = current_node_w + w_step;
if (0 <= current_node_w && current_node_w < width &&
0 <= front_node_w && front_node_w < width) {
uint msg_offset_base = unary_base + current_node_h * width * n_disp + current_node_w * n_disp;
uint msg_offset = msg_offset_base + current_d;
uint msg_index_offset = tid / n_thread_a_tree * width + current_node_w;
uint edge_weight_offset = edge_base + current_node_h * width + current_node_w;
dmsg_update_shared[current_d] = dmsg[msg_offset];
__syncthreads();
MsgNormNaiveBack(param.n_disp, current_d, msg_norm_index[msg_index_offset], dmsg_update_shared);
uint front_d = uint(msg_min_index[msg_offset]);
float value = dmsg_update_shared[current_d];
float value_rho = rho * value;
uint offset = unary_base + front_node_h * width * n_disp + front_node_w * n_disp + front_d;
uint context_offset = 0;
if (enable_seg)
context_offset = min(current_d, front_d) * n_disp + max(current_d, front_d);
else
context_offset = std::abs(int(current_d) - int(front_d));
atomicAdd(&dunary_update[offset], value);
atomicAdd(&dmsg[offset], value_rho);
atomicAdd(&dedge_weights[edge_weight_offset], context[context_offset] * value);
atomicAdd(&dcontext[context_offset], edge_weights[edge_weight_offset] * value);
__syncthreads();
}
}
}
__global__ void DiagonalKernelNarrowBack(const Param param,
const uint n_thread_required,
const uint n_thread_a_tree,
const float* context,
const float* edge_weights,
const uchar* msg_min_index,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights) {
static __shared__ float dmsg_update_shared[MAX_DISPARITY];
uint height = param.height, width = param.width;
uint n_disp = param.n_disp, n_trees = param.n_trees;
float rho = param.rho;
int h_step = param.h_step, w_step = param.w_step;
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*n_trees*n_thread_a_tree
uint current_d = threadIdx.x % n_thread_a_tree;
uint h_step_abs = std::abs(h_step);
bool enable_seg = (n_disp == 21);
if (tid >= n_thread_required) return;
if (current_d >= n_disp) return;
uint unary_base = tid / (n_trees * n_thread_a_tree) * height * width * n_disp;
uint tree_id = (tid / n_thread_a_tree) % n_trees;
int tree_id_shift = tree_id - (height - 1) * max(w_step, 0);
int common1 = tree_id_shift % h_step_abs;
float common2 = float(tree_id_shift) / float(h_step_abs); // This must be float NOT int, will affect ceilf and floorf
int h_start = 0, w_start = 0;
uint edge_base = tid / (n_thread_a_tree * n_trees) * height * width;
// Use a common mode to calculate start points for shortest chains, read my notes for clarity
if (w_step > 0) {
h_start = (h_step_abs - common1) % h_step_abs;
w_start = ceilf(common2);
} else {
h_start = common1;
w_start = floorf(common2);
}
if (h_step < 0) h_start = height - 1 - h_start;
uint roll_step = (height - 1) / h_step_abs;
for (uint i = 0; i <= roll_step; ++i) {
int current_node_h = h_start + i * h_step;
int current_node_w = w_start + i * w_step;
int front_node_h = current_node_h + h_step;
int front_node_w = current_node_w + w_step;
if (0 <= current_node_h && current_node_h < height &&
0 <= current_node_w && current_node_w < width &&
0 <= front_node_h && front_node_h < height &&
0 <= front_node_w && front_node_w < width) {
uint msg_offset_base = unary_base + current_node_h * width * n_disp + current_node_w * n_disp;
uint msg_offset = msg_offset_base + current_d;
uint msg_index_offset = tid / (n_thread_a_tree * n_trees) * height * width + current_node_h * width + current_node_w;
uint edge_weight_offset = edge_base + current_node_h * width + current_node_w;
dmsg_update_shared[current_d] = dmsg[msg_offset];
__syncthreads();
MsgNormNaiveBack(param.n_disp, current_d, msg_norm_index[msg_index_offset], dmsg_update_shared);
uint front_d = uint(msg_min_index[msg_offset]);
float value = dmsg_update_shared[current_d];
float value_rho = rho * value;
uint offset = unary_base + front_node_h * width * n_disp + front_node_w * n_disp + front_d;
uint context_offset = 0;
if (enable_seg)
context_offset = min(current_d, front_d) * n_disp + max(current_d, front_d);
else
context_offset = std::abs(int(current_d) - int(front_d));
atomicAdd(&dunary_update[offset], value);
atomicAdd(&dmsg[offset], value_rho);
atomicAdd(&dedge_weights[edge_weight_offset], context[context_offset] * value);
atomicAdd(&dcontext[context_offset], edge_weights[edge_weight_offset] * value);
__syncthreads();
}
}
}
__global__ void DiagonalKernelWideBack(const Param param,
const uint n_thread_required,
const uint n_thread_a_tree,
const float* context,
const float* edge_weights,
const uchar* msg_min_index,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights) {
static __shared__ float dmsg_update_shared[MAX_DISPARITY];
uint height = param.height, width = param.width;
uint n_disp = param.n_disp, n_trees = param.n_trees;
float rho = param.rho;
int h_step = param.h_step, w_step = param.w_step;
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree
uint current_d = threadIdx.x % n_thread_a_tree;
bool enable_seg = (n_disp == 21);
if (tid >= n_thread_required) return;
if (current_d >= n_disp) return;
uint unary_base = tid / (n_trees * n_thread_a_tree) * height * width * n_disp;
uint tree_id = (tid / n_thread_a_tree) % n_trees;
int tree_id_shift = tree_id - (height - 1) * max(w_step, 0);
uint h_step_abs = std::abs(h_step), roll_step = (height - 1) / h_step_abs;
int h_start = (h_step > 0) ? 0 : (height - 1), w_start = tree_id_shift;
uint edge_base = tid / (n_thread_a_tree * n_trees) * height * width;
for (uint i = 0; i <= roll_step; ++i) {
int current_node_h = h_start + i * h_step;
int current_node_w = w_start + i * w_step;
int front_node_h = current_node_h + h_step;
int front_node_w = current_node_w + w_step;
if (0 <= current_node_h && current_node_h < height &&
0 <= current_node_w && current_node_w < width &&
0 <= front_node_h && front_node_h < height &&
0 <= front_node_w && front_node_w < width) {
uint msg_offset_base = unary_base + current_node_h * width * n_disp + current_node_w * n_disp;
uint msg_offset = msg_offset_base + current_d;
uint msg_index_offset = tid / (n_thread_a_tree * n_trees) * height * width + current_node_h * width + current_node_w;
uint edge_weight_offset = edge_base + current_node_h * width + current_node_w;
dmsg_update_shared[current_d] = dmsg[msg_offset];
__syncthreads();
MsgNormNaiveBack(param.n_disp, current_d, msg_norm_index[msg_index_offset], dmsg_update_shared);
uint front_d = uint(msg_min_index[msg_offset]);
float value = dmsg_update_shared[current_d];
float value_rho = rho * value;
uint offset = unary_base + front_node_h * width * n_disp + front_node_w * n_disp + front_d;
uint context_offset = 0;
if (enable_seg)
context_offset = min(current_d, front_d) * n_disp + max(current_d, front_d);
else
context_offset = std::abs(int(current_d) - int(front_d));
atomicAdd(&dunary_update[offset], value);
atomicAdd(&dmsg[offset], value_rho);
atomicAdd(&dedge_weights[edge_weight_offset], context[context_offset] * value);
atomicAdd(&dcontext[context_offset], edge_weights[edge_weight_offset] * value);
__syncthreads();
}
}
}
void BackwardCUDA(const float rho,
const at::Tensor context,
const at::Tensor edge_weights,
const at::Tensor dcost_final,
const at::Tensor msg_min_index,
const at::Tensor msg_norm_index,
at::Tensor dunary,
at::Tensor dcontext,
at::Tensor dedge_weights,
at::Tensor dmsg,
at::Tensor dunary_update) {
const uint n_iter = msg_min_index.size(0);
const uint n_dir = msg_min_index.size(1);
const uint batch = msg_min_index.size(2);
const uint n_cv = msg_min_index.size(3);
const uint height = msg_min_index.size(4);
const uint width = msg_min_index.size(5);
const uint n_disp = msg_min_index.size(6);
float* context_ptr = context.data<float>();
float* edge_weight_ptr = edge_weights.data<float>();
float* dcost_final_ptr = dcost_final.data<float>();
uchar* msg_min_index_ptr = msg_min_index.data<uchar>(); // (n_iter,n_dir,batch,n_cv,h,w,n_disp)
uchar* msg_norm_index_ptr = msg_norm_index.data<uchar>(); // (n_iter,n_dir,batch,n_cv,h,w)
float* dunary_ptr = dunary.data<float>(); // (batch,n_cv,h,w,n_disp)
float* dcontext_ptr = dcontext.data<float>();
float* dedge_weight_ptr = dedge_weights.data<float>();
float* dmsg_ptr = dmsg.data<float>();
float* dunary_update_ptr = dunary_update.data<float>();
uint n_thread_a_tree = GetNumThreadATree(n_disp, WARP_SIZE);
bool is_backward = true, is_training = true;
std::vector<float*> dmsg_address(n_dir), edge_weight_address(n_dir), dedge_weight_address(n_dir);
std::vector<uchar*> msg_min_index_address(n_dir), msg_norm_index_address(n_dir);
std::vector<Param> param_list;
uint msg_min_size = batch * n_cv * height * width * n_disp;
uint msg_min_index_size = n_dir * msg_min_size;
uint msg_norm_size = msg_min_size / n_disp;
uint msg_norm_index_size = n_dir * msg_norm_size;
uint n_thread_unary = min(MAX_THREADS_PER_BLOCK, msg_min_size);
uint n_block_unary = (msg_min_size + n_thread_unary - 1) / n_thread_unary;
uint n_thread_msg_norm = min(MAX_THREADS_PER_BLOCK, msg_norm_size);
for (int dir = 0; dir < n_dir; ++dir) {
edge_weight_address[dir] = edge_weight_ptr + dir * msg_norm_size;
dedge_weight_address[dir] = dedge_weight_ptr + dir * msg_norm_size;
dmsg_address[dir] = dmsg_ptr + dir * msg_min_size;
Param param(n_dir, batch, n_cv, height, width, n_disp, dir, rho, is_backward, is_training);
UpdateParam(¶m);
param_list.push_back(param);
}
CostAggregateKernelBack<<<n_block_unary, n_thread_unary>>>(param_list[0],
msg_min_size,
dcost_final_ptr,
dunary_ptr,
dmsg_ptr);
#ifdef CUDA_ERROR_CHECK
CUDAErrorCheck();
#endif
for (int iter = n_iter - 1; iter >= 0; --iter) {
for (int dir = n_dir - 1; dir >= 0; --dir) {
msg_min_index_address[dir] = msg_min_index_ptr + iter * msg_min_index_size + dir * msg_min_size;
msg_norm_index_address[dir] = msg_norm_index_ptr + iter * msg_norm_index_size + dir * msg_norm_size;
uint n_threads = batch * n_cv * param_list[dir].n_trees * n_thread_a_tree;
uint n_blocks = GetNumBlock(n_threads, n_thread_a_tree);
// Diagonal
if (4 <= dir) {
uint h_step_abs = std::abs(param_list[dir].h_step);
uint w_step_abs = std::abs(param_list[dir].w_step);
if (h_step_abs > w_step_abs) {
DiagonalKernelNarrowBack<<<n_blocks, n_thread_a_tree>>>(param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_min_index_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
} else {
DiagonalKernelWideBack<<<n_blocks, n_thread_a_tree>>>(param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_min_index_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
}
}
// Vertical
if ((2 <= dir) && (dir < 4)) {
DiagonalKernelWideBack<<<n_blocks, n_thread_a_tree>>>(param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_min_index_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
}
// Horizontal
if (dir < 2) {
HorizontalKernelBack<<<n_blocks, n_thread_a_tree>>>(param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_min_index_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
}
#ifdef CUDA_ERROR_CHECK
CUDAErrorCheck();
#endif
UpdateUnaryKernelBack<<<n_block_unary, n_thread_unary>>>(param_list[dir],
msg_min_size,
dunary_update_ptr,
dunary_ptr,
dmsg_ptr);
#ifdef CUDA_ERROR_CHECK
CUDAErrorCheck();
#endif
cudaMemset(dunary_update_ptr, 0, msg_min_size * sizeof(float));
cudaMemset(dmsg_address[dir], 0, msg_min_size * sizeof(float));
}
}
for (uint dir = 0; dir < n_dir; ++dir) {
if (dmsg_address[dir] != nullptr) dmsg_address[dir] = nullptr;
if (msg_min_index_address[dir] != nullptr) msg_min_index_address[dir] = nullptr;
if (msg_norm_index_address[dir] != nullptr) msg_norm_index_address[dir] = nullptr;
if (edge_weight_address[dir] != nullptr) edge_weight_address[dir] = nullptr;
if (dedge_weight_address[dir] != nullptr) dedge_weight_address[dir] = nullptr;
}
}
#ifdef __cplusplus
}
#endif
|
2a28e964e7f326fbeb2a58c01f07ec301dfa7c9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cassert>
#include <cmath>
#include <chrono>
#include <iostream>
#include "utils/utils.h"
#define Z 2
#define Y 5
#define X 5
#define xBound X / 2
#define yBound Y / 2
#define SCALE 8
__constant__ const char filter[Z][Y][X] = { { { -1, -4, -6, -4, -1 },
{ -2, -8, -12, -8, -2 },
{ 0, 0, 0, 0, 0 },
{ 2, 8, 12, 8, 2 },
{ 1, 4, 6, 4, 1 } },
{ { -1, -2, 0, 2, 1 },
{ -4, -8, 0, 8, 4 },
{ -6, -12, 0, 12, 6 },
{ -4, -8, 0, 8, 4 },
{ -1, -2, 0, 2, 1 } } };
__device__ inline int bound_check(int val, int lower, int upper) {
if (val >= lower && val < upper)
return 1;
else
return 0;
}
__global__ void sobel(unsigned char *s, unsigned char *t, unsigned height, unsigned width,
unsigned channels) {
double val[Z][3];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid > height) {
return;
}
int y = tid;
//double val[Z][3];
for (int x = 0; x < width; ++x) {
/* Z axis of filter */
for (int i = 0; i < Z; ++i) {
val[i][2] = 0.;
val[i][1] = 0.;
val[i][0] = 0.;
/* Y and X axis of filter */
for (int v = -yBound; v <= yBound; ++v) {
for (int u = -xBound; u <= xBound; ++u) {
if (bound_check(x + u, 0, width) &&
bound_check(y + v, 0, height)) {
const unsigned char R =
s[channels * (width * (y + v) + (x + u)) + 2];
const unsigned char G =
s[channels * (width * (y + v) + (x + u)) + 1];
const unsigned char B =
s[channels * (width * (y + v) + (x + u)) + 0];
val[i][2] += R * filter[i][u + xBound][v + yBound];
val[i][1] += G * filter[i][u + xBound][v + yBound];
val[i][0] += B * filter[i][u + xBound][v + yBound];
}
}
}
}
double totalR = 0.;
double totalG = 0.;
double totalB = 0.;
for (int i = 0; i < Z; ++i) {
totalR += val[i][2] * val[i][2];
totalG += val[i][1] * val[i][1];
totalB += val[i][0] * val[i][0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.) ? 255 : totalR;
const unsigned char cG = (totalG > 255.) ? 255 : totalG;
const unsigned char cB = (totalB > 255.) ? 255 : totalB;
t[channels * (width * y + x) + 2] = cR;
t[channels * (width * y + x) + 1] = cG;
t[channels * (width * y + x) + 0] = cB;
}
}
int main(int argc, char **argv) {
assert(argc == 3);
unsigned height, width, channels;
unsigned char *src = NULL, *dst;
unsigned *d_height, *d_width, *d_channels;
unsigned char *d_src, *d_dst;
//hipMemcpy(&height, &d_height, sizeof(unsigned), hipMemcpyDeviceToHost);
/* read the image to src, and get height, width, channels */
read_png(argv[1], &src, &height, &width, &channels);
dst = (unsigned char *)malloc(height * width * channels *
sizeof(unsigned char));
//printf("%d\n", height);
hipMalloc((void **)&d_height, sizeof(unsigned));
hipMalloc((void **)&d_width, sizeof(unsigned));
hipMalloc((void **)&d_channels, sizeof(unsigned));
hipMemcpy(d_height, &height, sizeof(unsigned), hipMemcpyHostToDevice);
hipMemcpy(d_width, &width, sizeof(unsigned), hipMemcpyHostToDevice);
hipMemcpy(d_channels, &channels, sizeof(unsigned), hipMemcpyHostToDevice);
//unsigned tmp;
//hipMemcpy(&tmp, d_channels, sizeof(unsigned), hipMemcpyDeviceToHost);
//printf("%d\n%d\n", tmp, height);
hipMalloc(&d_src, height * width * channels * sizeof(unsigned char));
hipMalloc(&d_dst, height * width * channels * sizeof(unsigned char));
hipMemcpy(d_src, src, height * width * channels * sizeof(unsigned char), hipMemcpyHostToDevice);
printf("%d\n", height);
//printf("%d\n", width);
/* computation */
// sobel(src, dst, height, width, channels);
const int num_threads = 256;
const int num_blocks = height / 256 +1;
hipLaunchKernelGGL(( sobel), dim3(num_blocks), dim3(num_threads), 0, 0, d_src, d_dst, height, width, channels);
hipMemcpy(dst, d_dst, height * width * channels * sizeof(unsigned char), hipMemcpyDeviceToHost);
write_png(argv[2], dst, height, width, channels);
return 0;
}
| 2a28e964e7f326fbeb2a58c01f07ec301dfa7c9d.cu | #include <cstdlib>
#include <cassert>
#include <cmath>
#include <chrono>
#include <iostream>
#include "utils/utils.h"
#define Z 2
#define Y 5
#define X 5
#define xBound X / 2
#define yBound Y / 2
#define SCALE 8
__constant__ const char filter[Z][Y][X] = { { { -1, -4, -6, -4, -1 },
{ -2, -8, -12, -8, -2 },
{ 0, 0, 0, 0, 0 },
{ 2, 8, 12, 8, 2 },
{ 1, 4, 6, 4, 1 } },
{ { -1, -2, 0, 2, 1 },
{ -4, -8, 0, 8, 4 },
{ -6, -12, 0, 12, 6 },
{ -4, -8, 0, 8, 4 },
{ -1, -2, 0, 2, 1 } } };
__device__ inline int bound_check(int val, int lower, int upper) {
if (val >= lower && val < upper)
return 1;
else
return 0;
}
__global__ void sobel(unsigned char *s, unsigned char *t, unsigned height, unsigned width,
unsigned channels) {
double val[Z][3];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid > height) {
return;
}
int y = tid;
//double val[Z][3];
for (int x = 0; x < width; ++x) {
/* Z axis of filter */
for (int i = 0; i < Z; ++i) {
val[i][2] = 0.;
val[i][1] = 0.;
val[i][0] = 0.;
/* Y and X axis of filter */
for (int v = -yBound; v <= yBound; ++v) {
for (int u = -xBound; u <= xBound; ++u) {
if (bound_check(x + u, 0, width) &&
bound_check(y + v, 0, height)) {
const unsigned char R =
s[channels * (width * (y + v) + (x + u)) + 2];
const unsigned char G =
s[channels * (width * (y + v) + (x + u)) + 1];
const unsigned char B =
s[channels * (width * (y + v) + (x + u)) + 0];
val[i][2] += R * filter[i][u + xBound][v + yBound];
val[i][1] += G * filter[i][u + xBound][v + yBound];
val[i][0] += B * filter[i][u + xBound][v + yBound];
}
}
}
}
double totalR = 0.;
double totalG = 0.;
double totalB = 0.;
for (int i = 0; i < Z; ++i) {
totalR += val[i][2] * val[i][2];
totalG += val[i][1] * val[i][1];
totalB += val[i][0] * val[i][0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.) ? 255 : totalR;
const unsigned char cG = (totalG > 255.) ? 255 : totalG;
const unsigned char cB = (totalB > 255.) ? 255 : totalB;
t[channels * (width * y + x) + 2] = cR;
t[channels * (width * y + x) + 1] = cG;
t[channels * (width * y + x) + 0] = cB;
}
}
int main(int argc, char **argv) {
assert(argc == 3);
unsigned height, width, channels;
unsigned char *src = NULL, *dst;
unsigned *d_height, *d_width, *d_channels;
unsigned char *d_src, *d_dst;
//cudaMemcpy(&height, &d_height, sizeof(unsigned), cudaMemcpyDeviceToHost);
/* read the image to src, and get height, width, channels */
read_png(argv[1], &src, &height, &width, &channels);
dst = (unsigned char *)malloc(height * width * channels *
sizeof(unsigned char));
//printf("%d\n", height);
cudaMalloc((void **)&d_height, sizeof(unsigned));
cudaMalloc((void **)&d_width, sizeof(unsigned));
cudaMalloc((void **)&d_channels, sizeof(unsigned));
cudaMemcpy(d_height, &height, sizeof(unsigned), cudaMemcpyHostToDevice);
cudaMemcpy(d_width, &width, sizeof(unsigned), cudaMemcpyHostToDevice);
cudaMemcpy(d_channels, &channels, sizeof(unsigned), cudaMemcpyHostToDevice);
//unsigned tmp;
//cudaMemcpy(&tmp, d_channels, sizeof(unsigned), cudaMemcpyDeviceToHost);
//printf("%d\n%d\n", tmp, height);
cudaMalloc(&d_src, height * width * channels * sizeof(unsigned char));
cudaMalloc(&d_dst, height * width * channels * sizeof(unsigned char));
cudaMemcpy(d_src, src, height * width * channels * sizeof(unsigned char), cudaMemcpyHostToDevice);
printf("%d\n", height);
//printf("%d\n", width);
/* computation */
// sobel(src, dst, height, width, channels);
const int num_threads = 256;
const int num_blocks = height / 256 +1;
sobel<<<num_blocks, num_threads>>>(d_src, d_dst, height, width, channels);
cudaMemcpy(dst, d_dst, height * width * channels * sizeof(unsigned char), cudaMemcpyDeviceToHost);
write_png(argv[2], dst, height, width, channels);
return 0;
}
|
8c41f1677966a4061957166d4ae17f8a1617274a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gradientLayersKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Dst = NULL;
hipMalloc(&d_Dst, XSIZE*YSIZE);
float *d_Src = NULL;
hipMalloc(&d_Src, XSIZE*YSIZE);
int imageW = 1;
int imageH = 1;
int imageD = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gradientLayersKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,imageD);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gradientLayersKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,imageD);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gradientLayersKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,imageD);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8c41f1677966a4061957166d4ae17f8a1617274a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gradientLayersKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Dst = NULL;
cudaMalloc(&d_Dst, XSIZE*YSIZE);
float *d_Src = NULL;
cudaMalloc(&d_Src, XSIZE*YSIZE);
int imageW = 1;
int imageH = 1;
int imageD = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gradientLayersKernel<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,imageD);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gradientLayersKernel<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,imageD);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gradientLayersKernel<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,imageD);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b4c91f63976ecd0611622776f5c23ac96133b944.hip | // !!! This is a file automatically generated by hipify!!!
#include "../../../gemtc.cu"
#include<stdio.h>
#include<stdlib.h>
#define BIN_COUNT 256
//#define NUM_RUNS 5
//#define AVG_RUNS 10.0
#define BYTE_COUNT 25600
#include <helper_functions.h>
#include <hip/hip_runtime.h>
int main(int argc, char **argv){
int NUM_TASKS, LOOP_SIZE;
uint byteCount = BYTE_COUNT;
int Overfill = 0;
if (argc != 3){
printf("invalid parameters, use: <NUM_INPUTS> <NUM_TEST>\n");
return -1;
}
int NUM_RUNS = atoi(argv[1]);
int AVG_RUNS = atoi(argv[2]);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
StopWatchInterface *hTimer = NULL;
int iter,warps;
int blocks = devProp.multiProcessorCount;
sdkCreateTimer(&hTimer);
//Starting Iterating
for(iter=0; iter < NUM_RUNS; iter++) {
if(Overfill==1){
warps = devProp.maxThreadsPerBlock/32;
}
if(Overfill==0){
int coresPerSM = _ConvertSMVer2Cores(devProp.major, devProp.minor);
warps = coresPerSM/16; //A warp runs on 16 cores
}
if(Overfill==2){
warps =1;
blocks = 1;
}
NUM_TASKS = warps * blocks;
LOOP_SIZE = 1;
byteCount = byteCount / NUM_TASKS;
//gemtcSetup(25600, Overfill);
int d_size = sizeof(unsigned int) * byteCount;
int h_size = sizeof(int) * BIN_COUNT;
int size = 1 + d_size + h_size;
int j;
int k;
uint *h_params = (uint *) malloc(size);
double dAvgSecs;
srand(2009);
h_params[0] = byteCount;
//printf("ByteCount :%d , NUM_TASKS : %d \n", byteCount,NUM_TASKS);
for (uint i = 1; i <= byteCount; i++)
{
h_params[i] = rand() % 256;
}
gemtcSetup(25600, Overfill);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for(k=0; k < AVG_RUNS ; k++) {
for(j=0; j <NUM_TASKS; j++){
//for(i=0; i < LOOP_SIZE; i++){
uint *d_params = (uint *) gemtcGPUMalloc(size);
gemtcMemcpyHostToDevice(d_params, h_params, size);
gemtcPush(34, 32, j*LOOP_SIZE, d_params);
//}
}
void *ret=NULL;
int id;
while(ret==NULL){
gemtcPoll(&id, &ret);
}
gemtcMemcpyDeviceToHost(h_params, ret, size);
gemtcGPUFree(ret);
}
free(h_params);
sdkStopTimer(&hTimer);
dAvgSecs = 1.0e-3 * (double)sdkGetTimerValue(&hTimer)/(double) AVG_RUNS;
unsigned int problem_size = (byteCount * 4) * NUM_TASKS;
//dAvgSecs = dAvgSecs/(NUM_TASKS/LOOP_SIZE);
printf("%u \t%.4f\t%.5f\n",
problem_size,(1.0e-6 * (double) problem_size / dAvgSecs), dAvgSecs);
byteCount = byteCount * NUM_TASKS * 10;
gemtcCleanup();
}
sdkDeleteTimer(&hTimer);
return 0;
}
| b4c91f63976ecd0611622776f5c23ac96133b944.cu | #include "../../../gemtc.cu"
#include<stdio.h>
#include<stdlib.h>
#define BIN_COUNT 256
//#define NUM_RUNS 5
//#define AVG_RUNS 10.0
#define BYTE_COUNT 25600
#include <helper_functions.h>
#include <cuda_runtime.h>
int main(int argc, char **argv){
int NUM_TASKS, LOOP_SIZE;
uint byteCount = BYTE_COUNT;
int Overfill = 0;
if (argc != 3){
printf("invalid parameters, use: <NUM_INPUTS> <NUM_TEST>\n");
return -1;
}
int NUM_RUNS = atoi(argv[1]);
int AVG_RUNS = atoi(argv[2]);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
StopWatchInterface *hTimer = NULL;
int iter,warps;
int blocks = devProp.multiProcessorCount;
sdkCreateTimer(&hTimer);
//Starting Iterating
for(iter=0; iter < NUM_RUNS; iter++) {
if(Overfill==1){
warps = devProp.maxThreadsPerBlock/32;
}
if(Overfill==0){
int coresPerSM = _ConvertSMVer2Cores(devProp.major, devProp.minor);
warps = coresPerSM/16; //A warp runs on 16 cores
}
if(Overfill==2){
warps =1;
blocks = 1;
}
NUM_TASKS = warps * blocks;
LOOP_SIZE = 1;
byteCount = byteCount / NUM_TASKS;
//gemtcSetup(25600, Overfill);
int d_size = sizeof(unsigned int) * byteCount;
int h_size = sizeof(int) * BIN_COUNT;
int size = 1 + d_size + h_size;
int j;
int k;
uint *h_params = (uint *) malloc(size);
double dAvgSecs;
srand(2009);
h_params[0] = byteCount;
//printf("ByteCount :%d , NUM_TASKS : %d \n", byteCount,NUM_TASKS);
for (uint i = 1; i <= byteCount; i++)
{
h_params[i] = rand() % 256;
}
gemtcSetup(25600, Overfill);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for(k=0; k < AVG_RUNS ; k++) {
for(j=0; j <NUM_TASKS; j++){
//for(i=0; i < LOOP_SIZE; i++){
uint *d_params = (uint *) gemtcGPUMalloc(size);
gemtcMemcpyHostToDevice(d_params, h_params, size);
gemtcPush(34, 32, j*LOOP_SIZE, d_params);
//}
}
void *ret=NULL;
int id;
while(ret==NULL){
gemtcPoll(&id, &ret);
}
gemtcMemcpyDeviceToHost(h_params, ret, size);
gemtcGPUFree(ret);
}
free(h_params);
sdkStopTimer(&hTimer);
dAvgSecs = 1.0e-3 * (double)sdkGetTimerValue(&hTimer)/(double) AVG_RUNS;
unsigned int problem_size = (byteCount * 4) * NUM_TASKS;
//dAvgSecs = dAvgSecs/(NUM_TASKS/LOOP_SIZE);
printf("%u \t%.4f\t%.5f\n",
problem_size,(1.0e-6 * (double) problem_size / dAvgSecs), dAvgSecs);
byteCount = byteCount * NUM_TASKS * 10;
gemtcCleanup();
}
sdkDeleteTimer(&hTimer);
return 0;
}
|
e1a6f41f24771631cb8cbc75e1b206a690468689.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include <stdlib.h>
#include<malloc.h>
#include <time.h>
#include<cuda.h>
typedef char* string;
__global__
void multCU(float* A, int rowsA, int colsA, float* B, int rowsB, int colsB, float* C){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if((row < rowsA) && (col < colsB)) {
int sum = 0;
for(int k = 0; k < rowsB; k++) {
sum += A[row * colsA + k] * B[k * colsB + col];
}
C[row * colsB + col] = sum;
}
}
__host__
void mult(float* A, int rowsA, int colsA, float* B, int rowsB, int colsB, float* C){
int i, j, k;
for(i = 0; i < rowsA; i++){
for(j = 0; j< colsB; j++){
int sum = 0;
for(k = 0; k < rowsB; k++){
sum += A[i * colsA + k] * B[ k * colsB + j];
}
C[i * colsB + j] = sum;
}
}
}
__host__
bool compare(float *A, float *B, int rows, int cols){
int i, j;
for(i = 0; i < rows; i++) {
for(j = 0; j < cols; j++) {
if (A[ i * cols + j] != B[i * cols + j]) return false;
}
}
return true;
}
__host__
void print(float* M, int rows, int cols){
printf("---------------print matrix--------------\n");
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
printf("%f ", M[i * cols + j]);
}
printf("\n");
}
}
__host__
void receive(float *M, FILE *stream, int rows, int cols) {
int i, j;
for(i = 0; i < rows; i++) {
for(j = 0; j < cols; j++) {
fscanf(stream, "%f,", &M[i * cols + j]);
}
}
fclose(stream);
}
__host__
void write(float *M, int rows, int cols, string file_name) {
FILE *stream;
int i, j;
stream = fopen(file_name, "w");
fprintf(stream, "%d\n", rows);
fprintf(stream, "%d\n", cols);
for(i = 0; i < rows; i++) {
for(j = 0; j < cols; j++) {
if (j + 1 == cols) fprintf(stream, "%.2f", M[i * cols + j]);
else fprintf(stream, "%.2f,", M[i * cols + j]);
}
fprintf(stream, "%s\n","");
}
fclose(stream);
}
int main(int argc, char** argv){
if (argc != 3) {
printf("Must be called with the names of the files\n");
return 1;
}
//-------------------------------CPU--------------------------------------
time_t start, end;
float *A, *B, *C;
int rowsA, colsA, rowsB, colsB;
double timeCPU, timeGPU;
FILE *f1, *f2;
f1 = fopen(argv[1], "r");
f2 = fopen(argv[2], "r");
fscanf(f1, "%d", &rowsA);
fscanf(f1, "%d", &colsA);
fscanf(f2, "%d", &rowsB);
fscanf(f2, "%d", &colsB);
A = (float*)malloc(rowsA * colsA * sizeof(float));
B = (float*)malloc(rowsB * colsB * sizeof(float));
C = (float*)malloc(rowsA * colsB * sizeof(float));
receive(A, f1, rowsA, colsA);
// printf("rowsA: %d\n", rowsA);
// printf("colsA: %d\n", colsA);
// print(A, rowsA, colsA);
receive(B, f2, rowsB, colsB);
// printf("rowsA: %d\n", rowsB);
// printf("colsA: %d\n", colsB);
// print(B, rowsB, colsB);
if (colsA != rowsB) return 1; // must be equal
start = clock();
mult(A, rowsA, colsA, B, rowsB, colsB, C);
end = clock();
// print(C, rowsA, colsB);
timeCPU = difftime(end, start);
printf ("Elasped time in CPU: %.2lf seconds.\n", timeCPU);
// write(C, rowsA, colsB, "CPU.out");
//-------------------------------GPU--------------------------------------
hipError_t error = hipSuccess;
float *d_A, *d_B, *d_C, *h_C;
h_C = (float*)malloc(rowsA * colsB * sizeof(float));
error = hipMalloc((void**)&d_A, rowsA * colsA * sizeof(float));
if (error != hipSuccess) {
printf("Error allocating memory to d_A");
return 1;
}
error = hipMalloc((void**)&d_B, rowsB * colsB * sizeof(float));
if (error != hipSuccess) {
printf("Error allocating memory to d_B");
return 1;
}
error = hipMalloc((void**)&d_C, rowsA * colsB * sizeof(float));
if (error != hipSuccess) {
printf("Error allocating memory to d_C");
return 1;
}
hipMemcpy(d_A, A, rowsA * colsA * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, rowsB * colsB * sizeof(float), hipMemcpyHostToDevice);
int blockSize = 32;
dim3 dimblock(blockSize, blockSize, 1);
dim3 dimGrid(ceil((colsB) / float(blockSize)), ceil((rowsA) / float(blockSize)), 1);
start = clock();
hipLaunchKernelGGL(( multCU), dim3(dimGrid),dim3(dimblock), 0, 0, d_A, rowsA, colsA, d_B, rowsB, colsB, d_C);
hipDeviceSynchronize();
end = clock();
timeGPU = difftime(end, start);
printf ("Elasped time in GPU: %.2lf seconds.\n", timeGPU);
hipMemcpy(h_C, d_C, rowsA * colsB * sizeof(float), hipMemcpyDeviceToHost);
// print(h_C, rowsA, colsB);
if (!compare(h_C, C, rowsA, colsB)) {
printf("Error multiplying\n");
} else {
printf("Acceleration time: %lf\n", timeCPU / timeGPU);
// write(h_C, rowsA, colsB, "GPU.out");
}
free(A); free(B); free(C); free(h_C);
hipFree(d_A); hipFree(d_B); hipFree(d_C);
return 0;
}
| e1a6f41f24771631cb8cbc75e1b206a690468689.cu | #include<stdio.h>
#include <stdlib.h>
#include<malloc.h>
#include <time.h>
#include<cuda.h>
typedef char* string;
__global__
void multCU(float* A, int rowsA, int colsA, float* B, int rowsB, int colsB, float* C){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if((row < rowsA) && (col < colsB)) {
int sum = 0;
for(int k = 0; k < rowsB; k++) {
sum += A[row * colsA + k] * B[k * colsB + col];
}
C[row * colsB + col] = sum;
}
}
__host__
void mult(float* A, int rowsA, int colsA, float* B, int rowsB, int colsB, float* C){
int i, j, k;
for(i = 0; i < rowsA; i++){
for(j = 0; j< colsB; j++){
int sum = 0;
for(k = 0; k < rowsB; k++){
sum += A[i * colsA + k] * B[ k * colsB + j];
}
C[i * colsB + j] = sum;
}
}
}
__host__
bool compare(float *A, float *B, int rows, int cols){
int i, j;
for(i = 0; i < rows; i++) {
for(j = 0; j < cols; j++) {
if (A[ i * cols + j] != B[i * cols + j]) return false;
}
}
return true;
}
__host__
void print(float* M, int rows, int cols){
printf("---------------print matrix--------------\n");
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
printf("%f ", M[i * cols + j]);
}
printf("\n");
}
}
__host__
void receive(float *M, FILE *stream, int rows, int cols) {
int i, j;
for(i = 0; i < rows; i++) {
for(j = 0; j < cols; j++) {
fscanf(stream, "%f,", &M[i * cols + j]);
}
}
fclose(stream);
}
__host__
void write(float *M, int rows, int cols, string file_name) {
FILE *stream;
int i, j;
stream = fopen(file_name, "w");
fprintf(stream, "%d\n", rows);
fprintf(stream, "%d\n", cols);
for(i = 0; i < rows; i++) {
for(j = 0; j < cols; j++) {
if (j + 1 == cols) fprintf(stream, "%.2f", M[i * cols + j]);
else fprintf(stream, "%.2f,", M[i * cols + j]);
}
fprintf(stream, "%s\n","");
}
fclose(stream);
}
int main(int argc, char** argv){
if (argc != 3) {
printf("Must be called with the names of the files\n");
return 1;
}
//-------------------------------CPU--------------------------------------
time_t start, end;
float *A, *B, *C;
int rowsA, colsA, rowsB, colsB;
double timeCPU, timeGPU;
FILE *f1, *f2;
f1 = fopen(argv[1], "r");
f2 = fopen(argv[2], "r");
fscanf(f1, "%d", &rowsA);
fscanf(f1, "%d", &colsA);
fscanf(f2, "%d", &rowsB);
fscanf(f2, "%d", &colsB);
A = (float*)malloc(rowsA * colsA * sizeof(float));
B = (float*)malloc(rowsB * colsB * sizeof(float));
C = (float*)malloc(rowsA * colsB * sizeof(float));
receive(A, f1, rowsA, colsA);
// printf("rowsA: %d\n", rowsA);
// printf("colsA: %d\n", colsA);
// print(A, rowsA, colsA);
receive(B, f2, rowsB, colsB);
// printf("rowsA: %d\n", rowsB);
// printf("colsA: %d\n", colsB);
// print(B, rowsB, colsB);
if (colsA != rowsB) return 1; // must be equal
start = clock();
mult(A, rowsA, colsA, B, rowsB, colsB, C);
end = clock();
// print(C, rowsA, colsB);
timeCPU = difftime(end, start);
printf ("Elasped time in CPU: %.2lf seconds.\n", timeCPU);
// write(C, rowsA, colsB, "CPU.out");
//-------------------------------GPU--------------------------------------
cudaError_t error = cudaSuccess;
float *d_A, *d_B, *d_C, *h_C;
h_C = (float*)malloc(rowsA * colsB * sizeof(float));
error = cudaMalloc((void**)&d_A, rowsA * colsA * sizeof(float));
if (error != cudaSuccess) {
printf("Error allocating memory to d_A");
return 1;
}
error = cudaMalloc((void**)&d_B, rowsB * colsB * sizeof(float));
if (error != cudaSuccess) {
printf("Error allocating memory to d_B");
return 1;
}
error = cudaMalloc((void**)&d_C, rowsA * colsB * sizeof(float));
if (error != cudaSuccess) {
printf("Error allocating memory to d_C");
return 1;
}
cudaMemcpy(d_A, A, rowsA * colsA * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, rowsB * colsB * sizeof(float), cudaMemcpyHostToDevice);
int blockSize = 32;
dim3 dimblock(blockSize, blockSize, 1);
dim3 dimGrid(ceil((colsB) / float(blockSize)), ceil((rowsA) / float(blockSize)), 1);
start = clock();
multCU<<<dimGrid,dimblock>>>(d_A, rowsA, colsA, d_B, rowsB, colsB, d_C);
cudaDeviceSynchronize();
end = clock();
timeGPU = difftime(end, start);
printf ("Elasped time in GPU: %.2lf seconds.\n", timeGPU);
cudaMemcpy(h_C, d_C, rowsA * colsB * sizeof(float), cudaMemcpyDeviceToHost);
// print(h_C, rowsA, colsB);
if (!compare(h_C, C, rowsA, colsB)) {
printf("Error multiplying\n");
} else {
printf("Acceleration time: %lf\n", timeCPU / timeGPU);
// write(h_C, rowsA, colsB, "GPU.out");
}
free(A); free(B); free(C); free(h_C);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
return 0;
}
|
809feb0331db22cc2f8ab0f517efa4a7327dfb2f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#define ALLOC_SIZE 1024
int main() {
int *hostAllocMem;
hipMalloc((void**)&hostAllocMem, ALLOC_SIZE*sizeof(int));
hipMemset(hostAllocMem, 0, (ALLOC_SIZE+1)*sizeof(int));
hipFree(hostAllocMem);
hipDeviceReset();
return 0;
}
| 809feb0331db22cc2f8ab0f517efa4a7327dfb2f.cu | #include <stdio.h>
#define ALLOC_SIZE 1024
int main() {
int *hostAllocMem;
cudaMalloc((void**)&hostAllocMem, ALLOC_SIZE*sizeof(int));
cudaMemset(hostAllocMem, 0, (ALLOC_SIZE+1)*sizeof(int));
cudaFree(hostAllocMem);
cudaDeviceReset();
return 0;
}
|
2ef2dcf64c2414011111b0aab15f1e1d786074ba.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "array.hpp"
#include "types.hpp"
#include "vector_traits.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "kernel_dispatcher.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/tensor.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include "../cuda4dnn/kernels/fill_copy.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
#include <vector>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, std::size_t Rank>
__global__ void permute(
array<index_type, Rank> axis_order,
Span<T> output, array<size_type, Rank> outStrides,
View<T> input, array<size_type, Rank> inStrides)
{
for (auto i : grid_stride_range(input.size())) {
index_type oldPosition = 0;
index_type newPosition = i;
for (int j = 0; j < Rank; j++)
{
auto order = axis_order[j];
oldPosition += (newPosition / outStrides[j]) * inStrides[order];
newPosition %= outStrides[j];
}
output[i] = input[oldPosition];
}
}
template <class T, int TILE_SIZE, std::size_t N>
__global__ void transpose(Span<T> output, View<T> input, size_type in_width, size_type out_width)
{
using vector_type = get_vector_type_t<T, N>;
__shared__ T tile[TILE_SIZE][TILE_SIZE + 1];
/* blockDim.y = TILE_SIZE, blockDim.x = TILE_SIZE/N */
const index_type in_x = blockIdx.x * TILE_SIZE + threadIdx.x * N;
const index_type in_y = blockIdx.y * TILE_SIZE + threadIdx.y;
/* Every valid input location has a corresponding output location and vice versa.
* Hence, if we do not load values into the shared memory for a given location, we
* also won't read them for storing in the output.
*/
if (in_x < in_width && in_y < out_width)
{
vector_type vec;
auto input_vPtr = vector_type::get_pointer(input.data());
v_load(vec, input_vPtr[(in_y * in_width + in_x) / N]);
for (int i = 0; i < vector_type::size(); i++)
tile[threadIdx.y][threadIdx.x * N + i] = vec.data[i];
}
__syncthreads();
/* Note that `blockDim.x * N` is equal to `blockDim.y`. Since there are an equal
* number of them, we can interchange `threadIdx.x` and `threadIdx.y` without changing
* result. The advantage of interchanging is that consecutive output indices map to
* consecutive threads. This would allow writes across threds in a warp to be coalesced.
*/
const index_type out_x = blockIdx.y * TILE_SIZE + threadIdx.x * N;
const index_type out_y = blockIdx.x * TILE_SIZE + threadIdx.y;
if (out_x < out_width && out_y < in_width)
{
vector_type vec;
for (int i = 0; i < vector_type::size(); i++)
vec.data[i] = tile[threadIdx.x * N + i][threadIdx.y];
auto output_vPtr = vector_type::get_pointer(output.data());
v_store(output_vPtr[(out_y * out_width + out_x) / N], vec);
}
}
}
template <class T, std::size_t N> static
void launch_transpose_kernel(const Stream& stream, Span<T> output, View<T> input, size_type in_width, size_type out_width)
{
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
CV_Assert(in_width % N == 0);
CV_Assert(out_width % N == 0);
constexpr int TILE_SIZE = 32;
constexpr int TILE_SIZE_X = TILE_SIZE/N, TILE_SIZE_Y = TILE_SIZE;
auto kernel = raw::transpose<T, TILE_SIZE, N>;
dim3 grid_size((in_width/N + TILE_SIZE_X - 1)/TILE_SIZE_X, (out_width + TILE_SIZE_Y - 1)/TILE_SIZE_Y);
dim3 block_size(TILE_SIZE_X, TILE_SIZE_Y);
auto policy = execution_policy(grid_size, block_size, stream);
launch_kernel(kernel, policy, output, input, in_width, out_width);
}
template <class T>
void transpose(const Stream& stream, Span<T> output, View<T> input, std::size_t in_width, std::size_t out_width)
{
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4) && in_width % 4 == 0 && out_width % 4 == 0) {
launch_transpose_kernel<T, 4>(stream, output, input, in_width, out_width);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2) && in_width % 2 == 0 && out_width % 2 == 0) {
launch_transpose_kernel<T, 2>(stream, output, input, in_width, out_width);
} else {
launch_transpose_kernel<T, 1>(stream, output, input, in_width, out_width);
}
}
template void transpose(const Stream&, Span<__half>, View<__half>, std::size_t, std::size_t);
template void transpose(const Stream&, Span<float>, View<float>, std::size_t, std::size_t);
template <class T, std::size_t Rank> static
void launch_permute_kernel(
const Stream& stream,
const std::vector<std::size_t>& order,
Span<T> output, const std::vector<std::size_t>& outStride,
View<T> input, const std::vector<std::size_t>& inStride)
{
CV_Assert(order.size() == Rank);
CV_Assert(outStride.size() == Rank);
CV_Assert(inStride.size() == Rank);
array<index_type, Rank> order_k;
order_k.assign(std::begin(order), std::end(order));
array<size_type, Rank> outStride_k, inStride_k;
outStride_k.assign(std::begin(outStride), std::end(outStride));
inStride_k.assign(std::begin(inStride), std::end(inStride));
auto kernel = raw::permute<T, Rank>;
auto policy = make_policy(kernel, input.size(), 0, stream);
launch_kernel(kernel, policy, order_k, output, outStride_k, input, inStride_k);
}
GENERATE_KERNEL_DISPATCHER(permute_dispatcher, launch_permute_kernel);
template <class T>
void permute(
const Stream& stream,
TensorSpan<T> output, TensorView<T> input,
std::vector<std::size_t> order)
{
CV_Assert(output.rank() == input.rank());
CV_Assert(input.rank() == order.size());
CV_Assert(input.size() == output.size());
auto rank = output.rank();
auto inShape = input.shape_as_vector();
auto outShape = output.shape_as_vector();
/* singleton axes do not contribute towards address calculation
*
* Reasoning:
* ----------
* Suppose an item's indices in the input tensor is [i1, i2, ...]. The indices in the
* output tensor will be some permutation of the input tensor indices. Let the output
* tensor indices be [o1, o2, ...]. The permutation operation essentially copies items
* from the input tensor to new locations in the output tensor as dictated by the indices.
*
* If the size of the nth axis (say i2) of the input is one the input and output indicies for
* all the elements will be of the form be [i1, 0, ...] and [..., 0, ...] respectively.
* The index does not contribute to the element's address calculation and hence would give
* identical result if it weren't there.
*/
for (int i = 0; i < rank; i++)
{
/* index `i` corresponds to the axis index in the output; order[i] has the corresponding axis index in the input */
while (i < rank && outShape[i] == 1)
{
int in_i = order[i];
CV_Assert(inShape[in_i] == 1);
/* delete axis `i` */
inShape.erase(std::begin(inShape) + in_i);
outShape.erase(std::begin(outShape) + i);
/* deletion of an axis reduces an axis in the input tensor which would cause the indices
* of the axes that come after the deleted axis to reduce by one
*/
order.erase(order.begin() + i);
for (auto& axis : order)
if (axis > in_i)
axis--;
rank--;
/* optimizations should not break the invariants */
CV_Assert(rank == order.size());
CV_Assert(inShape.size() == order.size());
CV_Assert(outShape.size() == order.size());
CV_Assert(input.size() == output.size());
}
}
/* contiguous axes whose relative ordering stays same before and after permutation can be merged into one axis
* example: in permute order 0 2 3 1, axes 2 and 3 can be grouped into a single axis
*
* Reasoning:
* ----------
* Suppose an item's indices in the input tensor is [i0, i1, i2, i3, ...]. Let the permutation order be [0, 3, 1, 2, ...].
* Note that i1 and i2 are adjacent axes in the same order in input as well as output. The indices in the output tensor
* will be [i0, i3, i1, i2, ...].
*
* Each axis in the contiguous axes sequence will add an offset of iN * strideN. In the above example,
* the two axes add a total offset of `i1 * (size2 * stride2) + i2 * stride2` which is `(i1 * size2 + i2) * stride2`,
* in both input and output. Note stride2 can be different in the input and output. We can merge the two axes into one axis
* with a size of `size1 * size2`. The new offset added will be `i12 * stride12` as the kernel iterates through `i12`. Note
* that `i12` is actually `(i1 * size2 + i2)` and `stride12` is `stride2`.
*/
for (int i = 0; i < rank; i++) {
/* the indices used in the loops such as `i` and `j` are axis indices in the output tensor */
/* the corresponding input axis indices are `order[i]` and `order[j]`*/
/* loop invariant: `i` is the first axis in the contiguous unpermuted axis sequence */
int j = i + 1; /* `j` is the axis which we will attempt to merge */
while (j < rank && (order[i] + 1) == order[j]) {
/* axis `i` and axis `j` do not change relative order */
auto in_i = order[i], in_j = order[j];
auto new_size = inShape[in_i] * inShape[in_j];
inShape[in_i] = new_size;
outShape[i] = new_size;
/* delete axis `j` */
inShape.erase(std::begin(inShape) + in_j);
outShape.erase(std::begin(outShape) + j);
/* deletion of an axis reduces an axis in the input tensor which would cause the indices
* of the axes that come after the deleted axis to reduce by one
*/
order.erase(order.begin() + j);
for (auto& axis : order)
if (axis > order[i])
axis--;
rank--;
/* optimizations should not break the invariants */
CV_Assert(rank == order.size());
CV_Assert(inShape.size() == order.size());
CV_Assert(outShape.size() == order.size());
CV_Assert(input.size() == output.size());
}
}
std::vector<std::size_t> inStride(rank), outStride(rank);
inStride.back() = 1;
outStride.back() = 1;
/* garbage, ..., garbage, 1 */
std::copy(std::begin(inShape) + 1, std::end(inShape), std::begin(inStride));
std::copy(std::begin(outShape) + 1, std::end(outShape), std::begin(outStride));
/* dim[0], dim[1], ..., dim[-1], 1 */
std::partial_sum(inStride.rbegin(), inStride.rend(), inStride.rbegin(), std::multiplies<std::size_t>());
std::partial_sum(outStride.rbegin(), outStride.rend(), outStride.rbegin(), std::multiplies<std::size_t>());
/* stride[0], stride[1], ..., stride[-2], 1 */
const bool is_in_order = [&order] {
for (int i = 0; i < order.size(); i++)
if (order[i] != i)
return false;
return true;
}();
if (is_in_order)
{
kernels::copy<T>(stream, output, input);
}
else if(rank == 2)
{
/* use the more efficient transpose kernel */
transpose<T>(stream, output, input, inShape[1], outShape[1]);
}
else
{
CV_Assert(3 <= rank && rank <= CSL_MAX_TENSOR_RANK);
permute_dispatcher<T, 3, CSL_MAX_TENSOR_RANK>(rank, stream, order, output, outStride, input, inStride);
}
}
template void permute(const Stream&, TensorSpan<__half>, TensorView<__half>, std::vector<std::size_t>);
template void permute(const Stream&, TensorSpan<float>, TensorView<float>, std::vector<std::size_t>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
| 2ef2dcf64c2414011111b0aab15f1e1d786074ba.cu | // This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "array.hpp"
#include "types.hpp"
#include "vector_traits.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "kernel_dispatcher.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/tensor.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include "../cuda4dnn/kernels/fill_copy.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
#include <vector>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, std::size_t Rank>
__global__ void permute(
array<index_type, Rank> axis_order,
Span<T> output, array<size_type, Rank> outStrides,
View<T> input, array<size_type, Rank> inStrides)
{
for (auto i : grid_stride_range(input.size())) {
index_type oldPosition = 0;
index_type newPosition = i;
for (int j = 0; j < Rank; j++)
{
auto order = axis_order[j];
oldPosition += (newPosition / outStrides[j]) * inStrides[order];
newPosition %= outStrides[j];
}
output[i] = input[oldPosition];
}
}
template <class T, int TILE_SIZE, std::size_t N>
__global__ void transpose(Span<T> output, View<T> input, size_type in_width, size_type out_width)
{
using vector_type = get_vector_type_t<T, N>;
__shared__ T tile[TILE_SIZE][TILE_SIZE + 1];
/* blockDim.y = TILE_SIZE, blockDim.x = TILE_SIZE/N */
const index_type in_x = blockIdx.x * TILE_SIZE + threadIdx.x * N;
const index_type in_y = blockIdx.y * TILE_SIZE + threadIdx.y;
/* Every valid input location has a corresponding output location and vice versa.
* Hence, if we do not load values into the shared memory for a given location, we
* also won't read them for storing in the output.
*/
if (in_x < in_width && in_y < out_width)
{
vector_type vec;
auto input_vPtr = vector_type::get_pointer(input.data());
v_load(vec, input_vPtr[(in_y * in_width + in_x) / N]);
for (int i = 0; i < vector_type::size(); i++)
tile[threadIdx.y][threadIdx.x * N + i] = vec.data[i];
}
__syncthreads();
/* Note that `blockDim.x * N` is equal to `blockDim.y`. Since there are an equal
* number of them, we can interchange `threadIdx.x` and `threadIdx.y` without changing
* result. The advantage of interchanging is that consecutive output indices map to
* consecutive threads. This would allow writes across threds in a warp to be coalesced.
*/
const index_type out_x = blockIdx.y * TILE_SIZE + threadIdx.x * N;
const index_type out_y = blockIdx.x * TILE_SIZE + threadIdx.y;
if (out_x < out_width && out_y < in_width)
{
vector_type vec;
for (int i = 0; i < vector_type::size(); i++)
vec.data[i] = tile[threadIdx.x * N + i][threadIdx.y];
auto output_vPtr = vector_type::get_pointer(output.data());
v_store(output_vPtr[(out_y * out_width + out_x) / N], vec);
}
}
}
template <class T, std::size_t N> static
void launch_transpose_kernel(const Stream& stream, Span<T> output, View<T> input, size_type in_width, size_type out_width)
{
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
CV_Assert(in_width % N == 0);
CV_Assert(out_width % N == 0);
constexpr int TILE_SIZE = 32;
constexpr int TILE_SIZE_X = TILE_SIZE/N, TILE_SIZE_Y = TILE_SIZE;
auto kernel = raw::transpose<T, TILE_SIZE, N>;
dim3 grid_size((in_width/N + TILE_SIZE_X - 1)/TILE_SIZE_X, (out_width + TILE_SIZE_Y - 1)/TILE_SIZE_Y);
dim3 block_size(TILE_SIZE_X, TILE_SIZE_Y);
auto policy = execution_policy(grid_size, block_size, stream);
launch_kernel(kernel, policy, output, input, in_width, out_width);
}
template <class T>
void transpose(const Stream& stream, Span<T> output, View<T> input, std::size_t in_width, std::size_t out_width)
{
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4) && in_width % 4 == 0 && out_width % 4 == 0) {
launch_transpose_kernel<T, 4>(stream, output, input, in_width, out_width);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2) && in_width % 2 == 0 && out_width % 2 == 0) {
launch_transpose_kernel<T, 2>(stream, output, input, in_width, out_width);
} else {
launch_transpose_kernel<T, 1>(stream, output, input, in_width, out_width);
}
}
template void transpose(const Stream&, Span<__half>, View<__half>, std::size_t, std::size_t);
template void transpose(const Stream&, Span<float>, View<float>, std::size_t, std::size_t);
template <class T, std::size_t Rank> static
void launch_permute_kernel(
const Stream& stream,
const std::vector<std::size_t>& order,
Span<T> output, const std::vector<std::size_t>& outStride,
View<T> input, const std::vector<std::size_t>& inStride)
{
CV_Assert(order.size() == Rank);
CV_Assert(outStride.size() == Rank);
CV_Assert(inStride.size() == Rank);
array<index_type, Rank> order_k;
order_k.assign(std::begin(order), std::end(order));
array<size_type, Rank> outStride_k, inStride_k;
outStride_k.assign(std::begin(outStride), std::end(outStride));
inStride_k.assign(std::begin(inStride), std::end(inStride));
auto kernel = raw::permute<T, Rank>;
auto policy = make_policy(kernel, input.size(), 0, stream);
launch_kernel(kernel, policy, order_k, output, outStride_k, input, inStride_k);
}
GENERATE_KERNEL_DISPATCHER(permute_dispatcher, launch_permute_kernel);
template <class T>
void permute(
const Stream& stream,
TensorSpan<T> output, TensorView<T> input,
std::vector<std::size_t> order)
{
CV_Assert(output.rank() == input.rank());
CV_Assert(input.rank() == order.size());
CV_Assert(input.size() == output.size());
auto rank = output.rank();
auto inShape = input.shape_as_vector();
auto outShape = output.shape_as_vector();
/* singleton axes do not contribute towards address calculation
*
* Reasoning:
* ----------
* Suppose an item's indices in the input tensor is [i1, i2, ...]. The indices in the
* output tensor will be some permutation of the input tensor indices. Let the output
* tensor indices be [o1, o2, ...]. The permutation operation essentially copies items
* from the input tensor to new locations in the output tensor as dictated by the indices.
*
* If the size of the nth axis (say i2) of the input is one the input and output indicies for
* all the elements will be of the form be [i1, 0, ...] and [..., 0, ...] respectively.
* The index does not contribute to the element's address calculation and hence would give
* identical result if it weren't there.
*/
for (int i = 0; i < rank; i++)
{
/* index `i` corresponds to the axis index in the output; order[i] has the corresponding axis index in the input */
while (i < rank && outShape[i] == 1)
{
int in_i = order[i];
CV_Assert(inShape[in_i] == 1);
/* delete axis `i` */
inShape.erase(std::begin(inShape) + in_i);
outShape.erase(std::begin(outShape) + i);
/* deletion of an axis reduces an axis in the input tensor which would cause the indices
* of the axes that come after the deleted axis to reduce by one
*/
order.erase(order.begin() + i);
for (auto& axis : order)
if (axis > in_i)
axis--;
rank--;
/* optimizations should not break the invariants */
CV_Assert(rank == order.size());
CV_Assert(inShape.size() == order.size());
CV_Assert(outShape.size() == order.size());
CV_Assert(input.size() == output.size());
}
}
/* contiguous axes whose relative ordering stays same before and after permutation can be merged into one axis
* example: in permute order 0 2 3 1, axes 2 and 3 can be grouped into a single axis
*
* Reasoning:
* ----------
* Suppose an item's indices in the input tensor is [i0, i1, i2, i3, ...]. Let the permutation order be [0, 3, 1, 2, ...].
* Note that i1 and i2 are adjacent axes in the same order in input as well as output. The indices in the output tensor
* will be [i0, i3, i1, i2, ...].
*
* Each axis in the contiguous axes sequence will add an offset of iN * strideN. In the above example,
* the two axes add a total offset of `i1 * (size2 * stride2) + i2 * stride2` which is `(i1 * size2 + i2) * stride2`,
* in both input and output. Note stride2 can be different in the input and output. We can merge the two axes into one axis
* with a size of `size1 * size2`. The new offset added will be `i12 * stride12` as the kernel iterates through `i12`. Note
* that `i12` is actually `(i1 * size2 + i2)` and `stride12` is `stride2`.
*/
for (int i = 0; i < rank; i++) {
/* the indices used in the loops such as `i` and `j` are axis indices in the output tensor */
/* the corresponding input axis indices are `order[i]` and `order[j]`*/
/* loop invariant: `i` is the first axis in the contiguous unpermuted axis sequence */
int j = i + 1; /* `j` is the axis which we will attempt to merge */
while (j < rank && (order[i] + 1) == order[j]) {
/* axis `i` and axis `j` do not change relative order */
auto in_i = order[i], in_j = order[j];
auto new_size = inShape[in_i] * inShape[in_j];
inShape[in_i] = new_size;
outShape[i] = new_size;
/* delete axis `j` */
inShape.erase(std::begin(inShape) + in_j);
outShape.erase(std::begin(outShape) + j);
/* deletion of an axis reduces an axis in the input tensor which would cause the indices
* of the axes that come after the deleted axis to reduce by one
*/
order.erase(order.begin() + j);
for (auto& axis : order)
if (axis > order[i])
axis--;
rank--;
/* optimizations should not break the invariants */
CV_Assert(rank == order.size());
CV_Assert(inShape.size() == order.size());
CV_Assert(outShape.size() == order.size());
CV_Assert(input.size() == output.size());
}
}
std::vector<std::size_t> inStride(rank), outStride(rank);
inStride.back() = 1;
outStride.back() = 1;
/* garbage, ..., garbage, 1 */
std::copy(std::begin(inShape) + 1, std::end(inShape), std::begin(inStride));
std::copy(std::begin(outShape) + 1, std::end(outShape), std::begin(outStride));
/* dim[0], dim[1], ..., dim[-1], 1 */
std::partial_sum(inStride.rbegin(), inStride.rend(), inStride.rbegin(), std::multiplies<std::size_t>());
std::partial_sum(outStride.rbegin(), outStride.rend(), outStride.rbegin(), std::multiplies<std::size_t>());
/* stride[0], stride[1], ..., stride[-2], 1 */
const bool is_in_order = [&order] {
for (int i = 0; i < order.size(); i++)
if (order[i] != i)
return false;
return true;
}();
if (is_in_order)
{
kernels::copy<T>(stream, output, input);
}
else if(rank == 2)
{
/* use the more efficient transpose kernel */
transpose<T>(stream, output, input, inShape[1], outShape[1]);
}
else
{
CV_Assert(3 <= rank && rank <= CSL_MAX_TENSOR_RANK);
permute_dispatcher<T, 3, CSL_MAX_TENSOR_RANK>(rank, stream, order, output, outStride, input, inStride);
}
}
template void permute(const Stream&, TensorSpan<__half>, TensorView<__half>, std::vector<std::size_t>);
template void permute(const Stream&, TensorSpan<float>, TensorView<float>, std::vector<std::size_t>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
8590ee7cb63a631299442e335fe9828b2e098c2c.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/hip/Math.cuh>
#include <ATen/NumericUtils.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void smooth_l1_kernel_cuda(TensorIteratorBase& iter, double beta) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "smooth_l1_cuda", [&iter, beta]() {
scalar_t beta_val(beta);
gpu_kernel(iter, [beta_val] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
auto z = ::abs(a - b);
return z < beta_val ? scalar_t(0.5) * z * z / beta_val : z - scalar_t(0.5) * beta_val;
});
});
}
void huber_kernel_cuda(TensorIterator& iter, double delta) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "huber_cuda", [&iter, delta] {
scalar_t delta_val(delta);
gpu_kernel(iter, [delta_val] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
auto z = ::abs(a - b);
return z < delta_val ? scalar_t(0.5) * z * z : delta_val * (z - scalar_t(0.5) * delta_val);
});
});
}
void mse_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mse_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
auto diff = a - b;
return diff * diff;
});
});
}
void xlogy_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "xlogy_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t y) -> scalar_t {
if (at::_isnan(y)){
return NAN;
}
if (x == 0){
return 0;
}
return x * ::log(y);
});
});
}
void xlog1py_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "xlog1py_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t y) -> scalar_t {
if (at::_isnan(y)){
return NAN;
}
if (x == 0){
return 0;
}
return x * std::log1p(y);
});
});
}
REGISTER_DISPATCH(smooth_l1_stub, &smooth_l1_kernel_cuda);
REGISTER_DISPATCH(huber_stub, &huber_kernel_cuda);
REGISTER_DISPATCH(mse_stub, &mse_kernel_cuda);
REGISTER_DISPATCH(xlogy_stub, &xlogy_kernel_cuda);
REGISTER_DISPATCH(xlog1py_stub, &xlog1py_kernel_cuda);
// DO NOT ADD ANY NEW KERNELS HERE
// CUDA compilation times grow quickly. It's perfectly acceptable to have a file per kernel.
} // namespace at::native
| 8590ee7cb63a631299442e335fe9828b2e098c2c.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/NumericUtils.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void smooth_l1_kernel_cuda(TensorIteratorBase& iter, double beta) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "smooth_l1_cuda", [&iter, beta]() {
scalar_t beta_val(beta);
gpu_kernel(iter, [beta_val] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
auto z = ::abs(a - b);
return z < beta_val ? scalar_t(0.5) * z * z / beta_val : z - scalar_t(0.5) * beta_val;
});
});
}
void huber_kernel_cuda(TensorIterator& iter, double delta) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "huber_cuda", [&iter, delta] {
scalar_t delta_val(delta);
gpu_kernel(iter, [delta_val] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
auto z = ::abs(a - b);
return z < delta_val ? scalar_t(0.5) * z * z : delta_val * (z - scalar_t(0.5) * delta_val);
});
});
}
void mse_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mse_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
auto diff = a - b;
return diff * diff;
});
});
}
void xlogy_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "xlogy_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t y) -> scalar_t {
if (at::_isnan(y)){
return NAN;
}
if (x == 0){
return 0;
}
return x * std::log(y);
});
});
}
void xlog1py_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "xlog1py_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t y) -> scalar_t {
if (at::_isnan(y)){
return NAN;
}
if (x == 0){
return 0;
}
return x * std::log1p(y);
});
});
}
REGISTER_DISPATCH(smooth_l1_stub, &smooth_l1_kernel_cuda);
REGISTER_DISPATCH(huber_stub, &huber_kernel_cuda);
REGISTER_DISPATCH(mse_stub, &mse_kernel_cuda);
REGISTER_DISPATCH(xlogy_stub, &xlogy_kernel_cuda);
REGISTER_DISPATCH(xlog1py_stub, &xlog1py_kernel_cuda);
// DO NOT ADD ANY NEW KERNELS HERE
// CUDA compilation times grow quickly. It's perfectly acceptable to have a file per kernel.
} // namespace at::native
|
0001d63569b4f390623bacd2dcec81af6b51e111.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime_api.h"
#include "distance.h"
#include "atom.cuh"
using namespace std;
inline hipError_t checkCuda(hipError_t result, int line )
{
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s at line %d\n", hipGetErrorString(result),line);
}
return result;
}
/**
* Compute the total internal distance of each rotation. Each block take care of one angle of the rotation and
* store the result in an array. The position in the array corresponds to the angle of the rotation in degrees.
*
* @param res Array that store the final results.
* @param distances Array with the point distances of each atom of the molecule.
* @param number_of_atoms number of the atoms in the molecule.
**/
__global__ void compute_total_distance_kernel_non_matrix(double* res, double* distances, int number_of_atoms){
extern __shared__ double tmp[];
uint tid = threadIdx.x;
uint i = threadIdx.x + blockIdx.x*number_of_atoms;
if(i < (blockIdx.x+1)*number_of_atoms){
tmp[tid] = distances[i];
}
else
tmp[tid] = 0;
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0 ; s >>= 1){
if(tid < s){
tmp[tid] += tmp[tid+s];
}
__syncthreads();
}
__syncthreads();
if(tid == 0){
res[blockIdx.x] = tmp[0];
}
return;
}
/**
* Compute the Eucledian distance of each atom with all the others. Each block compute the result of a Rotation.
*
* @param res store the result.
* @param atoms All the atoms of all the rotations to take into account.
* @param num_of_atoms number of atoms of the molecule.
**/
__global__ void compute_point_distance_non_matrix(double* res, atom_st* atoms, int num_of_atoms ){
int tid = threadIdx.x + blockIdx.x*num_of_atoms;
res[tid] = 0;
if(tid < num_of_atoms*(blockIdx.x+1)){
double dx,dy,dz,distSqr;
for(int j = num_of_atoms*blockIdx.x; j < num_of_atoms*(blockIdx.x+1); j++){
dx = atoms[tid].position.x - atoms[j].position.x;
dy = atoms[tid].position.y - atoms[j].position.y;
dz = atoms[tid].position.z - atoms[j].position.z;
distSqr = dx*dx + dy*dy + dz*dz;
res[tid] += sqrt(distSqr);
}
}
}
/**
* Compute the internal distance of the molecule. It calls two kernels, one for the point distance of each atom
* with the others, and the second to sum all the distance of each atom. It is possible to compute all the distance
* of all the rotation all together calling the kernels with 360 blocks. Each result will be stored in the angle corresponding position.
*
* @param atoms All the atoms of all the rotation computed.
* @param number_of_atoms number of atoms in the molecule
* @param num_of_block numbre of block used in the rotation.
**/
double* distance_v3(vector<atom_st> atoms, int number_of_atoms, int num_of_block){
hipError_t err;
int size_of_atoms = number_of_atoms*sizeof(atom_st);
int deviceId;
double* d_distance;
atom_st* atoms_tmp = (atom_st*)malloc(num_of_block*size_of_atoms);
double* res;
atom_st * d_atoms;
checkCuda( hipMalloc(&d_distance, 2*num_of_block* number_of_atoms * number_of_atoms*sizeof(double)), __LINE__);
checkCuda( hipGetDevice(&deviceId), __LINE__);
checkCuda( hipMallocManaged(&atoms_tmp, num_of_block * size_of_atoms), __LINE__);
checkCuda( hipMallocManaged(&res, num_of_block * sizeof(double)), __LINE__);
for(int i = 0; i < number_of_atoms * num_of_block; i++){
atoms_tmp[i] = atoms[i];
}
checkCuda( hipMalloc(&d_atoms,size_of_atoms*num_of_block), __LINE__);
checkCuda( hipMemcpy(d_atoms, atoms_tmp, size_of_atoms*num_of_block, hipMemcpyHostToDevice), __LINE__);
checkCuda( hipMemPrefetchAsync(res, num_of_block*sizeof(double), deviceId), __LINE__);
hipLaunchKernelGGL(( compute_point_distance_non_matrix), dim3(num_of_block), dim3(512), 0, 0, d_distance, d_atoms, number_of_atoms);
err = hipGetLastError();
if(err != hipSuccess){
printf("Error %s at %d\n", hipGetErrorString(err),__LINE__);
}
hipDeviceSynchronize();
checkCuda( hipDeviceSetSharedMemConfig( hipSharedMemBankSizeEightByte ), __LINE__);
hipLaunchKernelGGL(( compute_total_distance_kernel_non_matrix), dim3(num_of_block), dim3(512), 2*512*sizeof(double), 0, res, d_distance, number_of_atoms);
err = hipGetLastError();
if(err != hipSuccess){
printf("Error %s %d\n", hipGetErrorString(err), __LINE__);
}
hipDeviceSynchronize();
checkCuda ( hipFree(d_distance),__LINE__);
checkCuda ( hipFree(d_atoms), __LINE__);
return res;
} | 0001d63569b4f390623bacd2dcec81af6b51e111.cu | #include <iostream>
#include <vector>
#include "cuda.h"
#include "cuda_runtime.h"
#include "cuda_runtime_api.h"
#include "distance.h"
#include "atom.cuh"
using namespace std;
inline cudaError_t checkCuda(cudaError_t result, int line )
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s at line %d\n", cudaGetErrorString(result),line);
}
return result;
}
/**
* Compute the total internal distance of each rotation. Each block take care of one angle of the rotation and
* store the result in an array. The position in the array corresponds to the angle of the rotation in degrees.
*
* @param res Array that store the final results.
* @param distances Array with the point distances of each atom of the molecule.
* @param number_of_atoms number of the atoms in the molecule.
**/
__global__ void compute_total_distance_kernel_non_matrix(double* res, double* distances, int number_of_atoms){
extern __shared__ double tmp[];
uint tid = threadIdx.x;
uint i = threadIdx.x + blockIdx.x*number_of_atoms;
if(i < (blockIdx.x+1)*number_of_atoms){
tmp[tid] = distances[i];
}
else
tmp[tid] = 0;
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0 ; s >>= 1){
if(tid < s){
tmp[tid] += tmp[tid+s];
}
__syncthreads();
}
__syncthreads();
if(tid == 0){
res[blockIdx.x] = tmp[0];
}
return;
}
/**
* Compute the Eucledian distance of each atom with all the others. Each block compute the result of a Rotation.
*
* @param res store the result.
* @param atoms All the atoms of all the rotations to take into account.
* @param num_of_atoms number of atoms of the molecule.
**/
__global__ void compute_point_distance_non_matrix(double* res, atom_st* atoms, int num_of_atoms ){
int tid = threadIdx.x + blockIdx.x*num_of_atoms;
res[tid] = 0;
if(tid < num_of_atoms*(blockIdx.x+1)){
double dx,dy,dz,distSqr;
for(int j = num_of_atoms*blockIdx.x; j < num_of_atoms*(blockIdx.x+1); j++){
dx = atoms[tid].position.x - atoms[j].position.x;
dy = atoms[tid].position.y - atoms[j].position.y;
dz = atoms[tid].position.z - atoms[j].position.z;
distSqr = dx*dx + dy*dy + dz*dz;
res[tid] += sqrt(distSqr);
}
}
}
/**
* Compute the internal distance of the molecule. It calls two kernels, one for the point distance of each atom
* with the others, and the second to sum all the distance of each atom. It is possible to compute all the distance
* of all the rotation all together calling the kernels with 360 blocks. Each result will be stored in the angle corresponding position.
*
* @param atoms All the atoms of all the rotation computed.
* @param number_of_atoms number of atoms in the molecule
* @param num_of_block numbre of block used in the rotation.
**/
double* distance_v3(vector<atom_st> atoms, int number_of_atoms, int num_of_block){
cudaError_t err;
int size_of_atoms = number_of_atoms*sizeof(atom_st);
int deviceId;
double* d_distance;
atom_st* atoms_tmp = (atom_st*)malloc(num_of_block*size_of_atoms);
double* res;
atom_st * d_atoms;
checkCuda( cudaMalloc(&d_distance, 2*num_of_block* number_of_atoms * number_of_atoms*sizeof(double)), __LINE__);
checkCuda( cudaGetDevice(&deviceId), __LINE__);
checkCuda( cudaMallocManaged(&atoms_tmp, num_of_block * size_of_atoms), __LINE__);
checkCuda( cudaMallocManaged(&res, num_of_block * sizeof(double)), __LINE__);
for(int i = 0; i < number_of_atoms * num_of_block; i++){
atoms_tmp[i] = atoms[i];
}
checkCuda( cudaMalloc(&d_atoms,size_of_atoms*num_of_block), __LINE__);
checkCuda( cudaMemcpy(d_atoms, atoms_tmp, size_of_atoms*num_of_block, cudaMemcpyHostToDevice), __LINE__);
checkCuda( cudaMemPrefetchAsync(res, num_of_block*sizeof(double), deviceId), __LINE__);
compute_point_distance_non_matrix<<<num_of_block, 512>>>(d_distance, d_atoms, number_of_atoms);
err = cudaGetLastError();
if(err != cudaSuccess){
printf("Error %s at %d\n", cudaGetErrorString(err),__LINE__);
}
cudaDeviceSynchronize();
checkCuda( cudaDeviceSetSharedMemConfig( cudaSharedMemBankSizeEightByte ), __LINE__);
compute_total_distance_kernel_non_matrix<<<num_of_block, 512, 2*512*sizeof(double)>>>(res, d_distance, number_of_atoms);
err = cudaGetLastError();
if(err != cudaSuccess){
printf("Error %s %d\n", cudaGetErrorString(err), __LINE__);
}
cudaDeviceSynchronize();
checkCuda ( cudaFree(d_distance),__LINE__);
checkCuda ( cudaFree(d_atoms), __LINE__);
return res;
} |
fa801b838c5888b4931b10cdb150c1223dc0f8d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include "vector.h"
struct Vector *vector_create(int size, int capacity, int init_zero) {
int index;
struct Vector *vector = (struct Vector *)malloc(sizeof(struct Vector));
vector->values = (double *)malloc(capacity * sizeof(double));
vector->size = size;
vector->capacity = capacity;
if (init_zero == 1)
for (index = 0; index < size; index++)
vector->values[index] = 0;
vector->device_values = NULL;
return vector;
}
void vector_destroy(struct Vector *vector) {
free(vector->values);
free(vector);
}
struct Vector *vector_cross(struct Vector *left, struct Vector *right) {
return 0;
}
struct Vector *vector_dot(struct Vector *left, struct Vector *right) {
return 0;
}
void vector_print(struct Vector *vector) {
int index;
for (index = 0; index < vector->size; index++)
printf("%.3f, ", vector->values[index]);
printf("\n");
}
| fa801b838c5888b4931b10cdb150c1223dc0f8d7.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include "vector.h"
struct Vector *vector_create(int size, int capacity, int init_zero) {
int index;
struct Vector *vector = (struct Vector *)malloc(sizeof(struct Vector));
vector->values = (double *)malloc(capacity * sizeof(double));
vector->size = size;
vector->capacity = capacity;
if (init_zero == 1)
for (index = 0; index < size; index++)
vector->values[index] = 0;
vector->device_values = NULL;
return vector;
}
void vector_destroy(struct Vector *vector) {
free(vector->values);
free(vector);
}
struct Vector *vector_cross(struct Vector *left, struct Vector *right) {
return 0;
}
struct Vector *vector_dot(struct Vector *left, struct Vector *right) {
return 0;
}
void vector_print(struct Vector *vector) {
int index;
for (index = 0; index < vector->size; index++)
printf("%.3f, ", vector->values[index]);
printf("\n");
}
|
4c1198fffa1d6b6b00863b6260be860546e25de7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "dropout_layer.hpp"
#include "math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DropoutForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer);
} // namespace caffe
| 4c1198fffa1d6b6b00863b6260be860546e25de7.cu | #include <vector>
#include "dropout_layer.hpp"
#include "math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer);
} // namespace caffe
|
e54a0105e8cdd0738bc058890549f4b16c14bbdc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Matrix multiplication by parts
// Elements stored in row-major order
using namespace std;
#include <stdio.h>
#include <iostream>
#include <fstream>
#include "helper_functions.h"
#define BLOCK_SIZE 16
typedef struct
{ int width;
int height;
float *elements;
} Matrix;
// Forward declaration of matrix mult
__global__ void MatMulKernel (const Matrix, const Matrix, Matrix);
// Host code
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load matrices A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc((void**) &d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
hipMalloc((void**) &d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
// allocate C in device
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = d_C.width * d_C.height * sizeof(float);
hipMalloc((void**) &d_C.elements, size);
// call kernel
dim3 dimBlock(256); // threads per block?
dim3 dimGrid(256); // number of blocks?
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
// copy C to host
hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
// free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
//matrix multiplication kernel
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// each thread computes one element of C and acumulates results to
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if ((row>=A.height) || (col>=B.width)){
return;
}
for (int e=0; e<A.width; e++)
Cvalue += A.elements[row*A.width+ e] *
B.elements[e*B.width + col];
C.elements[row*C.width + col] = Cvalue;
}
// To by bylo calkiem dobre :P
void MatMulHost(Matrix A,Matrix B,Matrix C)
{
for (int row_iter = 0; row_iter < A.height ; row_iter++ )
{
for (int column_iter = 0; column_iter < A.width; column_iter++ )
{
for (int e=0; e<A.width; e++)
Cvalue += A.elements[row*A.width+ e] *B.elements[e*B.width + col];
C.elements[row*C.width + col] = Cvalue;
}
}
}
int main(int argc, char * const argv[])
{
int Width = 16;
Matrix A;
Matrix B;
Matrix C;
A.width = Width;
B.width = Width;
C.width = Width;
A.height = Width;
B.height = Width;
C.height = Width;
A.elements = new float[Width*Width];
B.elements = new float[Width*Width];
C.elements = new float[Width*Width];
//fill matrices
std::ifstream A_input;
std::ifstream B_input;
A_input.open("A.txt");
B_input.open("B.txt");
float a, b;
A_input >> a;
B_input >> b;
int i = 0;
while (!A_input.eof())
{ A.elements[i] = a;
B.elements[i] = b;
A_input >> a;
B_input >> b;
i += 1;
}
A_input.close();
B_input.close();
// TIMER BEGIN
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
MatMul(A, B, C);
sdkStopTimer(&timer);
float time = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
std::cout << "Time of the multiplication :" << time << " ms" << std::endl;
// TIMER END
std::ofstream C_output;
C_output.open("C.txt");
for (int i=0; i<Width; i++)
{ for (int j=0; j<Width; j++)
C_output<<C.elements[i*Width+j]<<"\t";
C_output<<endl;
}
}
| e54a0105e8cdd0738bc058890549f4b16c14bbdc.cu | // Matrix multiplication by parts
// Elements stored in row-major order
using namespace std;
#include <stdio.h>
#include <iostream>
#include <fstream>
#include "helper_functions.h"
#define BLOCK_SIZE 16
typedef struct
{ int width;
int height;
float *elements;
} Matrix;
// Forward declaration of matrix mult
__global__ void MatMulKernel (const Matrix, const Matrix, Matrix);
// Host code
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load matrices A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc((void**) &d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc((void**) &d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// allocate C in device
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = d_C.width * d_C.height * sizeof(float);
cudaMalloc((void**) &d_C.elements, size);
// call kernel
dim3 dimBlock(256); // threads per block?
dim3 dimGrid(256); // number of blocks?
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// copy C to host
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
// free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
//matrix multiplication kernel
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// each thread computes one element of C and acumulates results to
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if ((row>=A.height) || (col>=B.width)){
return;
}
for (int e=0; e<A.width; e++)
Cvalue += A.elements[row*A.width+ e] *
B.elements[e*B.width + col];
C.elements[row*C.width + col] = Cvalue;
}
// To by bylo calkiem dobre :P
void MatMulHost(Matrix A,Matrix B,Matrix C)
{
for (int row_iter = 0; row_iter < A.height ; row_iter++ )
{
for (int column_iter = 0; column_iter < A.width; column_iter++ )
{
for (int e=0; e<A.width; e++)
Cvalue += A.elements[row*A.width+ e] *B.elements[e*B.width + col];
C.elements[row*C.width + col] = Cvalue;
}
}
}
int main(int argc, char * const argv[])
{
int Width = 16;
Matrix A;
Matrix B;
Matrix C;
A.width = Width;
B.width = Width;
C.width = Width;
A.height = Width;
B.height = Width;
C.height = Width;
A.elements = new float[Width*Width];
B.elements = new float[Width*Width];
C.elements = new float[Width*Width];
//fill matrices
std::ifstream A_input;
std::ifstream B_input;
A_input.open("A.txt");
B_input.open("B.txt");
float a, b;
A_input >> a;
B_input >> b;
int i = 0;
while (!A_input.eof())
{ A.elements[i] = a;
B.elements[i] = b;
A_input >> a;
B_input >> b;
i += 1;
}
A_input.close();
B_input.close();
// TIMER BEGIN
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
MatMul(A, B, C);
sdkStopTimer(&timer);
float time = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
std::cout << "Time of the multiplication :" << time << " ms" << std::endl;
// TIMER END
std::ofstream C_output;
C_output.open("C.txt");
for (int i=0; i<Width; i++)
{ for (int j=0; j<Width; j++)
C_output<<C.elements[i*Width+j]<<"\t";
C_output<<endl;
}
}
|
e42cd6ad03639ab640774a50f951c8022fe5f209.hip | // !!! This is a file automatically generated by hipify!!!
//in place 1d dst
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include "hip/device_functions.h"
#include "transformfunc.h"
# define M_PI 3.14159265358979323846
// #define TIME_TEST
hipfftHandle plan_dft_r2c_nocubic;
hipfftHandle plan_dft_c2r_nocubic;
#ifdef TIME_TEST
static long long time_preOp, time_postOp,time_trans_xzy,time_trans_zyx,time_cufft;
//static int conp_cnt=0;
static void timeBegin(struct timeval *tBegin){
gettimeofday(tBegin, NULL);
}
static long long timeEnd(struct timeval tBegin){
struct timeval tEnd;
gettimeofday(&tEnd, NULL);
long long usec=(tEnd.tv_sec-tBegin.tv_sec)*1000*1000+tEnd.tv_usec-tBegin.tv_usec;
return usec;
}
#endif
void dofft_r2c_inplace(double *d_data , int DATA_SIZE,int batch,int nLayer){
int n[1]={DATA_SIZE};
int inembeb[1]={DATA_SIZE+2};
int onembeb[1]={(DATA_SIZE+2)/2};
hipfftPlanMany(&plan_dft_r2c_nocubic,1,n,
inembeb,1,DATA_SIZE+2,
onembeb,1,(DATA_SIZE+2)/2,
HIPFFT_D2Z, (batch+2)*(nLayer+2));
hipfftExecD2Z(plan_dft_r2c_nocubic, reinterpret_cast<double *>(d_data),
reinterpret_cast<hipfftDoubleComplex *>(d_data));
}
void dofft_c2r_inplace(double *d_data , int DATA_SIZE,int batch,int nLayer){
int n[1]={DATA_SIZE};
int inembeb[1]={(DATA_SIZE+2)/2};
int onembeb[1]={(DATA_SIZE+2)};
hipfftPlanMany(&plan_dft_c2r_nocubic,1,n,
inembeb,1,(DATA_SIZE+2)/2,
onembeb,1,(DATA_SIZE+2),
HIPFFT_Z2D, (batch+2)*(nLayer+2));
hipfftExecZ2D(plan_dft_c2r_nocubic, reinterpret_cast<hipfftDoubleComplex *>(d_data),
reinterpret_cast<double *>(d_data));
}
void run_dft_r2c_inplace(double *d_data , int DATA_SIZE,hipfftHandle &plan_dft_r2c_cubic){
#ifdef TIME_TEST
time_postOp=0;
time_preOp=0;
time_trans_xzy=0;
time_trans_zyx=0;
time_cufft=0;
#endif
#ifdef TIME_TEST
struct timeval tBegin2;
timeBegin(&tBegin2);
#endif
hipfftExecD2Z(plan_dft_r2c_cubic, reinterpret_cast<double *>(d_data),
reinterpret_cast<hipfftDoubleComplex *>(d_data));
#ifdef TIME_TEST
hipDeviceSynchronize();
time_cufft += timeEnd(tBegin2);
#endif
#ifdef TIME_TEST
hipDeviceSynchronize();
printf("timepreOp: count=3 totaltime=%lld avetime=%lld \n",time_preOp,time_preOp/3);
printf("timepostOp: count=3 totaltime=%lld avetime=%lld \n",time_postOp,time_postOp/3);
printf("timecufft: count=3 totaltime=%lld avetime=%lld \n",time_cufft,time_cufft/3);
printf("timetransxzy: count=2 totaltime=%lld avetime=%lld \n",time_trans_xzy,time_trans_xzy/2);
printf("timetranszyx: count=2 totaltime=%lld avetime=%lld \n",time_trans_zyx,time_trans_zyx/2);
#endif
}
void run_dft_c2r_inplace(double *d_data , int DATA_SIZE,hipfftHandle &plan_dft_c2r_cubic){
#ifdef TIME_TEST
time_postOp=0;
time_preOp=0;
time_trans_xzy=0;
time_trans_zyx=0;
time_cufft=0;
#endif
#ifdef TIME_TEST
struct timeval tBegin2;
timeBegin(&tBegin2);
#endif
hipfftExecZ2D(plan_dft_c2r_cubic, reinterpret_cast<hipfftDoubleComplex *>(d_data),
reinterpret_cast<double *>(d_data));
#ifdef TIME_TEST
hipDeviceSynchronize();
time_cufft += timeEnd(tBegin2);
#endif
#ifdef TIME_TEST
hipDeviceSynchronize();
printf("timepreOp: count=3 totaltime=%lld avetime=%lld \n",time_preOp,time_preOp/3);
printf("timepostOp: count=3 totaltime=%lld avetime=%lld \n",time_postOp,time_postOp/3);
printf("timecufft: count=3 totaltime=%lld avetime=%lld \n",time_cufft,time_cufft/3);
printf("timetransxzy: count=2 totaltime=%lld avetime=%lld \n",time_trans_xzy,time_trans_xzy/2);
printf("timetranszyx: count=2 totaltime=%lld avetime=%lld \n",time_trans_zyx,time_trans_zyx/2);
#endif
}
//DATA_SIZE+2
//DATA_SIZEfft+2batchnlayer+2
void run_dft_r2c_inplace_nocubic(double *d_data , int DATA_SIZE,int batch,int nLayer){
#ifdef TIME_TEST
time_postOp=0;
time_preOp=0;
time_trans_xzy=0;
time_trans_zyx=0;
time_cufft=0;
#endif
#ifdef TIME_TEST
struct timeval tBegin2;
timeBegin(&tBegin2);
#endif
dofft_r2c_inplace(d_data,DATA_SIZE,batch,nLayer);
#ifdef TIME_TEST
hipDeviceSynchronize();
time_cufft += timeEnd(tBegin2);
#endif
#ifdef TIME_TEST
hipDeviceSynchronize();
printf("timepreOp: count=3 totaltime=%lld avetime=%lld \n",time_preOp,time_preOp/3);
printf("timepostOp: count=3 totaltime=%lld avetime=%lld \n",time_postOp,time_postOp/3);
printf("timecufft: count=3 totaltime=%lld avetime=%lld \n",time_cufft,time_cufft/3);
printf("timetransxzy: count=2 totaltime=%lld avetime=%lld \n",time_trans_xzy,time_trans_xzy/2);
printf("timetranszyx: count=2 totaltime=%lld avetime=%lld \n",time_trans_zyx,time_trans_zyx/2);
#endif
// hipMemcpy(in, d_data, arraySize,hipMemcpyDeviceToHost);
freeMemory_r2c();
}
void run_dft_c2r_inplace_nocubic(double *d_data , int DATA_SIZE,int batch,int nLayer){
#ifdef TIME_TEST
time_postOp=0;
time_preOp=0;
time_trans_xzy=0;
time_trans_zyx=0;
time_cufft=0;
#endif
#ifdef TIME_TEST
struct timeval tBegin2;
timeBegin(&tBegin2);
#endif
dofft_c2r_inplace(d_data,DATA_SIZE,batch,nLayer);
#ifdef TIME_TEST
hipDeviceSynchronize();
time_cufft += timeEnd(tBegin2);
#endif
#ifdef TIME_TEST
hipDeviceSynchronize();
printf("timepreOp: count=3 totaltime=%lld avetime=%lld \n",time_preOp,time_preOp/3);
printf("timepostOp: count=3 totaltime=%lld avetime=%lld \n",time_postOp,time_postOp/3);
printf("timecufft: count=3 totaltime=%lld avetime=%lld \n",time_cufft,time_cufft/3);
printf("timetransxzy: count=2 totaltime=%lld avetime=%lld \n",time_trans_xzy,time_trans_xzy/2);
printf("timetranszyx: count=2 totaltime=%lld avetime=%lld \n",time_trans_zyx,time_trans_zyx/2);
#endif
// hipMemcpy(in, d_data, arraySize,hipMemcpyDeviceToHost);
freeMemory_c2r();
}
void freeMemory_r2c(){
hipfftDestroy(plan_dft_r2c_nocubic);
}
void freeMemory_c2r(){
hipfftDestroy(plan_dft_c2r_nocubic);
} | e42cd6ad03639ab640774a50f951c8022fe5f209.cu | //in place 1d dst
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// includes, project
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include "device_functions.h"
#include "transformfunc.h"
# define M_PI 3.14159265358979323846
// #define TIME_TEST
cufftHandle plan_dft_r2c_nocubic;
cufftHandle plan_dft_c2r_nocubic;
#ifdef TIME_TEST
static long long time_preOp, time_postOp,time_trans_xzy,time_trans_zyx,time_cufft;
//static int conp_cnt=0;
static void timeBegin(struct timeval *tBegin){
gettimeofday(tBegin, NULL);
}
static long long timeEnd(struct timeval tBegin){
struct timeval tEnd;
gettimeofday(&tEnd, NULL);
long long usec=(tEnd.tv_sec-tBegin.tv_sec)*1000*1000+tEnd.tv_usec-tBegin.tv_usec;
return usec;
}
#endif
void dofft_r2c_inplace(double *d_data , int DATA_SIZE,int batch,int nLayer){
int n[1]={DATA_SIZE};
int inembeb[1]={DATA_SIZE+2};
int onembeb[1]={(DATA_SIZE+2)/2};
cufftPlanMany(&plan_dft_r2c_nocubic,1,n,
inembeb,1,DATA_SIZE+2,
onembeb,1,(DATA_SIZE+2)/2,
CUFFT_D2Z, (batch+2)*(nLayer+2));
cufftExecD2Z(plan_dft_r2c_nocubic, reinterpret_cast<double *>(d_data),
reinterpret_cast<cufftDoubleComplex *>(d_data));
}
void dofft_c2r_inplace(double *d_data , int DATA_SIZE,int batch,int nLayer){
int n[1]={DATA_SIZE};
int inembeb[1]={(DATA_SIZE+2)/2};
int onembeb[1]={(DATA_SIZE+2)};
cufftPlanMany(&plan_dft_c2r_nocubic,1,n,
inembeb,1,(DATA_SIZE+2)/2,
onembeb,1,(DATA_SIZE+2),
CUFFT_Z2D, (batch+2)*(nLayer+2));
cufftExecZ2D(plan_dft_c2r_nocubic, reinterpret_cast<cufftDoubleComplex *>(d_data),
reinterpret_cast<double *>(d_data));
}
void run_dft_r2c_inplace(double *d_data , int DATA_SIZE,cufftHandle &plan_dft_r2c_cubic){
#ifdef TIME_TEST
time_postOp=0;
time_preOp=0;
time_trans_xzy=0;
time_trans_zyx=0;
time_cufft=0;
#endif
#ifdef TIME_TEST
struct timeval tBegin2;
timeBegin(&tBegin2);
#endif
cufftExecD2Z(plan_dft_r2c_cubic, reinterpret_cast<double *>(d_data),
reinterpret_cast<cufftDoubleComplex *>(d_data));
#ifdef TIME_TEST
cudaDeviceSynchronize();
time_cufft += timeEnd(tBegin2);
#endif
#ifdef TIME_TEST
cudaDeviceSynchronize();
printf("timepreOp: count=3 totaltime=%lld avetime=%lld \n",time_preOp,time_preOp/3);
printf("timepostOp: count=3 totaltime=%lld avetime=%lld \n",time_postOp,time_postOp/3);
printf("timecufft: count=3 totaltime=%lld avetime=%lld \n",time_cufft,time_cufft/3);
printf("timetransxzy: count=2 totaltime=%lld avetime=%lld \n",time_trans_xzy,time_trans_xzy/2);
printf("timetranszyx: count=2 totaltime=%lld avetime=%lld \n",time_trans_zyx,time_trans_zyx/2);
#endif
}
void run_dft_c2r_inplace(double *d_data , int DATA_SIZE,cufftHandle &plan_dft_c2r_cubic){
#ifdef TIME_TEST
time_postOp=0;
time_preOp=0;
time_trans_xzy=0;
time_trans_zyx=0;
time_cufft=0;
#endif
#ifdef TIME_TEST
struct timeval tBegin2;
timeBegin(&tBegin2);
#endif
cufftExecZ2D(plan_dft_c2r_cubic, reinterpret_cast<cufftDoubleComplex *>(d_data),
reinterpret_cast<double *>(d_data));
#ifdef TIME_TEST
cudaDeviceSynchronize();
time_cufft += timeEnd(tBegin2);
#endif
#ifdef TIME_TEST
cudaDeviceSynchronize();
printf("timepreOp: count=3 totaltime=%lld avetime=%lld \n",time_preOp,time_preOp/3);
printf("timepostOp: count=3 totaltime=%lld avetime=%lld \n",time_postOp,time_postOp/3);
printf("timecufft: count=3 totaltime=%lld avetime=%lld \n",time_cufft,time_cufft/3);
printf("timetransxzy: count=2 totaltime=%lld avetime=%lld \n",time_trans_xzy,time_trans_xzy/2);
printf("timetranszyx: count=2 totaltime=%lld avetime=%lld \n",time_trans_zyx,time_trans_zyx/2);
#endif
}
//这里先默认传入的矩阵每行是DATA_SIZE+2的,后续在考虑是否要进行填充
//DATA_SIZE是需要做fft的数组的长度,传入的长度一般是+2的,batch是这一个平面上有多少个向量,nlayer有多少层,一般也都是要+2的
void run_dft_r2c_inplace_nocubic(double *d_data , int DATA_SIZE,int batch,int nLayer){
#ifdef TIME_TEST
time_postOp=0;
time_preOp=0;
time_trans_xzy=0;
time_trans_zyx=0;
time_cufft=0;
#endif
#ifdef TIME_TEST
struct timeval tBegin2;
timeBegin(&tBegin2);
#endif
dofft_r2c_inplace(d_data,DATA_SIZE,batch,nLayer);
#ifdef TIME_TEST
cudaDeviceSynchronize();
time_cufft += timeEnd(tBegin2);
#endif
#ifdef TIME_TEST
cudaDeviceSynchronize();
printf("timepreOp: count=3 totaltime=%lld avetime=%lld \n",time_preOp,time_preOp/3);
printf("timepostOp: count=3 totaltime=%lld avetime=%lld \n",time_postOp,time_postOp/3);
printf("timecufft: count=3 totaltime=%lld avetime=%lld \n",time_cufft,time_cufft/3);
printf("timetransxzy: count=2 totaltime=%lld avetime=%lld \n",time_trans_xzy,time_trans_xzy/2);
printf("timetranszyx: count=2 totaltime=%lld avetime=%lld \n",time_trans_zyx,time_trans_zyx/2);
#endif
// cudaMemcpy(in, d_data, arraySize,cudaMemcpyDeviceToHost);
freeMemory_r2c();
}
void run_dft_c2r_inplace_nocubic(double *d_data , int DATA_SIZE,int batch,int nLayer){
#ifdef TIME_TEST
time_postOp=0;
time_preOp=0;
time_trans_xzy=0;
time_trans_zyx=0;
time_cufft=0;
#endif
#ifdef TIME_TEST
struct timeval tBegin2;
timeBegin(&tBegin2);
#endif
dofft_c2r_inplace(d_data,DATA_SIZE,batch,nLayer);
#ifdef TIME_TEST
cudaDeviceSynchronize();
time_cufft += timeEnd(tBegin2);
#endif
#ifdef TIME_TEST
cudaDeviceSynchronize();
printf("timepreOp: count=3 totaltime=%lld avetime=%lld \n",time_preOp,time_preOp/3);
printf("timepostOp: count=3 totaltime=%lld avetime=%lld \n",time_postOp,time_postOp/3);
printf("timecufft: count=3 totaltime=%lld avetime=%lld \n",time_cufft,time_cufft/3);
printf("timetransxzy: count=2 totaltime=%lld avetime=%lld \n",time_trans_xzy,time_trans_xzy/2);
printf("timetranszyx: count=2 totaltime=%lld avetime=%lld \n",time_trans_zyx,time_trans_zyx/2);
#endif
// cudaMemcpy(in, d_data, arraySize,cudaMemcpyDeviceToHost);
freeMemory_c2r();
}
void freeMemory_r2c(){
cufftDestroy(plan_dft_r2c_nocubic);
}
void freeMemory_c2r(){
cufftDestroy(plan_dft_c2r_nocubic);
} |
8acaf15393f68d3fa1142449f6d37abfcef02a39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err ){
fprintf(stderr, "ERROR[CUDA]:%s{%s}.\n", msg, hipGetErrorString( err ) );
exit(EXIT_FAILURE);
}
}
//Copy RGB data from shared memory region..
void copy_shmrgb_to_device(unsigned char*rgbs,
unsigned char*devmem,//already allocated throuth cuMemAlloc()
int rgbleft,int rgbtop,
int rgbwidth,int rgbheight,
int width,int height)
{
int offset=(rgbtop*width)<<2;
int offset_left=rgbleft<<2;
int line_siz=width<<2;
int h=0;
for(h=rgbtop;h<rgbheight+rgbtop;h++){
hipMemcpy(devmem+offset+offset_left,rgbs+offset+offset_left,rgbwidth<<2,hipMemcpyHostToDevice);
offset+=line_siz;
}
}
//for TEST ONLY,
void copy_caprgb_to_device(unsigned char*rgbs,
unsigned char*devmem,//already allocated throuth cuMemAlloc()
int patch_left,int patch_top,
int patch_width,int patch_height,
int width,int height)
{
int rgb_offset=0;
int offset=(patch_top*width)<<2;
int offset_left=patch_left<<2;
int line_siz=width<<2;
int h;
for(h=0;h<patch_height;h++){
hipMemcpy(devmem+offset+offset_left,rgbs+rgb_offset,patch_width<<2,hipMemcpyHostToDevice);
offset+=line_siz;
rgb_offset+=(patch_width<<2);
}
}
__global__ void
convert_line_rgb_to_nv12(unsigned char*devrgb,int rgbstride,/*device mem*/
unsigned char*oyuv,int ostride,int ovstride,/*device mem*/
int width,int left,int top)
{
int curline=threadIdx.x;
unsigned char*rgb_p=devrgb+(curline+top)*rgbstride*4;
unsigned char*luma_p=oyuv+(curline+top)*ostride;
unsigned char*chroma_p=oyuv+(ovstride*ostride)+((curline+top)>>1)*ostride;
int r,g,b;
int y,u,v;
int j;
if(curline%2==0){
//even line
for(j=left;j<width+left;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(j%2==0){
u= -0.169*r - 0.331*g + 0.5*b+128;
*(chroma_p+j)=(char)u&0xff;
}
}
}else{
//odd line
for(j=left;j<width+left;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(j%2==0){
v= 0.5*r - 0.419*g - 0.081*b+128;
*(chroma_p+j+1)=(char)v&0xff;
}
}
}
}
//FIXME
__global__ void
convert_line_yv12_to_nv12(unsigned char*pdev,int istride,
unsigned char*oyuv,int ostride,
int width,int height)
{
int curline=threadIdx.x;
int yv12_luma_siz = istride*height;
int yv12_chrome_siz = yv12_luma_siz>>2;
int curpos=curline*istride;
unsigned char*yv12_luma_p=pdev+curpos;
unsigned char*yv12_v_p=pdev+yv12_luma_siz+(curpos>>1);
unsigned char*yv12_u_p=pdev+yv12_luma_siz+yv12_chrome_siz+(curpos>>1);
curpos=curline*ostride;
unsigned char*nv12_luma_p=oyuv+curpos;
unsigned char*nv12_chrome_p=oyuv+(height*ostride)+(curpos>>1);
char val;
int j;
for(j=0;j<width;j++){
val=*(yv12_luma_p+j);
*(nv12_luma_p+j)=val;
val=*(yv12_u_p+j);
*(nv12_chrome_p)=val;
val=*(yv12_v_p+j);
*(nv12_chrome_p+1)=val;
}
}
extern "C" void load_rgb_bgrx_cuda(
unsigned char* oyuv,/*device*/
unsigned char* devrgb,/*device */
unsigned char*rgb, /*input data host*/
int left,int top,int width,int height,//rgb patch rect
int rgbwidth,int rgbheight,//rgb data size
int ostride //yuv data height<pixel>
)
{
//Copy date from shared Memory to Device;
#if 0
// Read rects from shm region.
copy_shmrgb_to_device((unsigned char*)rgb,
(unsigned char*)devrgb,//already allocated throuth cuMemAlloc()
left,top,
width,height,
rgbwidth,rgbheight);
#else
//for TEST :read rects from capture file.
copy_caprgb_to_device((unsigned char*)rgb,
(unsigned char*)devrgb,//already allocated throuth cuMemAlloc()
left,top,
width,height,
rgbwidth,rgbheight);
#endif
int ovstride=rgbheight;
// fprintf(stderr,"rgbwidth:%d ostride:%d ovstride:%d, width:%d, left:%d, top:%d\n",rgbwidth,ostride,ovstride,width,left,top);
hipLaunchKernelGGL(( convert_line_rgb_to_nv12), dim3(1),dim3(height), 0, 0, devrgb,rgbwidth,
oyuv,ostride,ovstride,
width,left,top);
hipDeviceSynchronize();
checkCUDAError("Convert BGRA to NV12\n");
}
extern "C" void load_yuv_yv12_cuda(
unsigned char* oyuv,/*device*/
unsigned char* devyv12,/*device */
unsigned char*iyuv, /*input data host*/
int width,int height,/*real size*/
int istride,int ostride
)
{
// Load yv12 to device buffer
//TODO
int in_luma_siz=istride*height;
int out_luma_siz=ostride*height;
int in_chroma_siz=in_luma_siz>>2;
int out_chroma_siz=out_luma_siz>>2;
unsigned char*in_luma_p=iyuv;
unsigned char*out_luma_p=devyv12;
unsigned char*in_v_p=iyuv+in_luma_siz;
unsigned char*out_v_p=devyv12+out_luma_siz;
unsigned char*in_u_p=iyuv+in_luma_siz+in_chroma_siz;
unsigned char*out_u_p=devyv12+out_luma_siz+out_chroma_siz;
int j;
for(j=0;j<height;j++){
//y
memcpy(out_luma_p+j*ostride,in_luma_p+j*istride,width);
}
for(j=0;j<(height>>1);j++){
//v
memcpy(out_v_p+((j*ostride)>>1),in_v_p+((j*istride)>>1),width>>1);
//u
memcpy(out_u_p+((j*ostride)>>1),in_u_p+((j*istride)>>1),width>>1);
}
// fprintf(stderr,"rgbwidth:%d ostride:%d ovstride:%d, width:%d, left:%d, top:%d\n",rgbwidth,ostride,ovstride,width,left,top);
hipLaunchKernelGGL(( convert_line_yv12_to_nv12), dim3(1),dim3(height), 0, 0, devyv12,istride,
oyuv,ostride,
width,height);
hipDeviceSynchronize();
checkCUDAError("Convert YV12 to NV12\n");
}
| 8acaf15393f68d3fa1142449f6d37abfcef02a39.cu | #include<stdio.h>
#include<stdlib.h>
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err ){
fprintf(stderr, "ERROR[CUDA]:%s{%s}.\n", msg, cudaGetErrorString( err ) );
exit(EXIT_FAILURE);
}
}
//Copy RGB data from shared memory region..
void copy_shmrgb_to_device(unsigned char*rgbs,
unsigned char*devmem,//already allocated throuth cuMemAlloc()
int rgbleft,int rgbtop,
int rgbwidth,int rgbheight,
int width,int height)
{
int offset=(rgbtop*width)<<2;
int offset_left=rgbleft<<2;
int line_siz=width<<2;
int h=0;
for(h=rgbtop;h<rgbheight+rgbtop;h++){
cudaMemcpy(devmem+offset+offset_left,rgbs+offset+offset_left,rgbwidth<<2,cudaMemcpyHostToDevice);
offset+=line_siz;
}
}
//for TEST ONLY,
void copy_caprgb_to_device(unsigned char*rgbs,
unsigned char*devmem,//already allocated throuth cuMemAlloc()
int patch_left,int patch_top,
int patch_width,int patch_height,
int width,int height)
{
int rgb_offset=0;
int offset=(patch_top*width)<<2;
int offset_left=patch_left<<2;
int line_siz=width<<2;
int h;
for(h=0;h<patch_height;h++){
cudaMemcpy(devmem+offset+offset_left,rgbs+rgb_offset,patch_width<<2,cudaMemcpyHostToDevice);
offset+=line_siz;
rgb_offset+=(patch_width<<2);
}
}
__global__ void
convert_line_rgb_to_nv12(unsigned char*devrgb,int rgbstride,/*device mem*/
unsigned char*oyuv,int ostride,int ovstride,/*device mem*/
int width,int left,int top)
{
int curline=threadIdx.x;
unsigned char*rgb_p=devrgb+(curline+top)*rgbstride*4;
unsigned char*luma_p=oyuv+(curline+top)*ostride;
unsigned char*chroma_p=oyuv+(ovstride*ostride)+((curline+top)>>1)*ostride;
int r,g,b;
int y,u,v;
int j;
if(curline%2==0){
//even line
for(j=left;j<width+left;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(j%2==0){
u= -0.169*r - 0.331*g + 0.5*b+128;
*(chroma_p+j)=(char)u&0xff;
}
}
}else{
//odd line
for(j=left;j<width+left;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(j%2==0){
v= 0.5*r - 0.419*g - 0.081*b+128;
*(chroma_p+j+1)=(char)v&0xff;
}
}
}
}
//FIXME
__global__ void
convert_line_yv12_to_nv12(unsigned char*pdev,int istride,
unsigned char*oyuv,int ostride,
int width,int height)
{
int curline=threadIdx.x;
int yv12_luma_siz = istride*height;
int yv12_chrome_siz = yv12_luma_siz>>2;
int curpos=curline*istride;
unsigned char*yv12_luma_p=pdev+curpos;
unsigned char*yv12_v_p=pdev+yv12_luma_siz+(curpos>>1);
unsigned char*yv12_u_p=pdev+yv12_luma_siz+yv12_chrome_siz+(curpos>>1);
curpos=curline*ostride;
unsigned char*nv12_luma_p=oyuv+curpos;
unsigned char*nv12_chrome_p=oyuv+(height*ostride)+(curpos>>1);
char val;
int j;
for(j=0;j<width;j++){
val=*(yv12_luma_p+j);
*(nv12_luma_p+j)=val;
val=*(yv12_u_p+j);
*(nv12_chrome_p)=val;
val=*(yv12_v_p+j);
*(nv12_chrome_p+1)=val;
}
}
extern "C" void load_rgb_bgrx_cuda(
unsigned char* oyuv,/*device*/
unsigned char* devrgb,/*device */
unsigned char*rgb, /*input data host*/
int left,int top,int width,int height,//rgb patch rect
int rgbwidth,int rgbheight,//rgb data size
int ostride //yuv data height<pixel>
)
{
//Copy date from shared Memory to Device;
#if 0
// Read rects from shm region.
copy_shmrgb_to_device((unsigned char*)rgb,
(unsigned char*)devrgb,//already allocated throuth cuMemAlloc()
left,top,
width,height,
rgbwidth,rgbheight);
#else
//for TEST :read rects from capture file.
copy_caprgb_to_device((unsigned char*)rgb,
(unsigned char*)devrgb,//already allocated throuth cuMemAlloc()
left,top,
width,height,
rgbwidth,rgbheight);
#endif
int ovstride=rgbheight;
// fprintf(stderr,"rgbwidth:%d ostride:%d ovstride:%d, width:%d, left:%d, top:%d\n",rgbwidth,ostride,ovstride,width,left,top);
convert_line_rgb_to_nv12<<<1,height>>>(devrgb,rgbwidth,
oyuv,ostride,ovstride,
width,left,top);
cudaThreadSynchronize();
checkCUDAError("Convert BGRA to NV12\n");
}
extern "C" void load_yuv_yv12_cuda(
unsigned char* oyuv,/*device*/
unsigned char* devyv12,/*device */
unsigned char*iyuv, /*input data host*/
int width,int height,/*real size*/
int istride,int ostride
)
{
// Load yv12 to device buffer
//TODO
int in_luma_siz=istride*height;
int out_luma_siz=ostride*height;
int in_chroma_siz=in_luma_siz>>2;
int out_chroma_siz=out_luma_siz>>2;
unsigned char*in_luma_p=iyuv;
unsigned char*out_luma_p=devyv12;
unsigned char*in_v_p=iyuv+in_luma_siz;
unsigned char*out_v_p=devyv12+out_luma_siz;
unsigned char*in_u_p=iyuv+in_luma_siz+in_chroma_siz;
unsigned char*out_u_p=devyv12+out_luma_siz+out_chroma_siz;
int j;
for(j=0;j<height;j++){
//y
memcpy(out_luma_p+j*ostride,in_luma_p+j*istride,width);
}
for(j=0;j<(height>>1);j++){
//v
memcpy(out_v_p+((j*ostride)>>1),in_v_p+((j*istride)>>1),width>>1);
//u
memcpy(out_u_p+((j*ostride)>>1),in_u_p+((j*istride)>>1),width>>1);
}
// fprintf(stderr,"rgbwidth:%d ostride:%d ovstride:%d, width:%d, left:%d, top:%d\n",rgbwidth,ostride,ovstride,width,left,top);
convert_line_yv12_to_nv12<<<1,height>>>(devyv12,istride,
oyuv,ostride,
width,height);
cudaThreadSynchronize();
checkCUDAError("Convert YV12 to NV12\n");
}
|
e6ea9d0d2ac76dfd4e1f3bc54ace134770d8e596.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/fast_rcnn_layers.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype pad_ratio, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
// padding
Dtype pad_w, pad_h;
pad_w = (bottom_rois[3]-bottom_rois[1]+1)*pad_ratio;
pad_h = (bottom_rois[4]-bottom_rois[2]+1)*pad_ratio;
int roi_start_w = round((bottom_rois[1]-pad_w) * spatial_scale);
int roi_start_h = round((bottom_rois[2]-pad_h) * spatial_scale);
int roi_end_w = round((bottom_rois[3]+pad_w) * spatial_scale);
int roi_end_h = round((bottom_rois[4]+pad_h) * spatial_scale);
// clipping
/*roi_start_w = max(roi_start_w,0); roi_start_h = max(roi_start_h,0);
int img_width = round(width / spatial_scale);
int img_height = round(height / spatial_scale);
roi_end_w = min(img_width-1,roi_end_w);
roi_end_h = min(img_height-1,roi_end_h);*/
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, pad_ratio_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const Dtype pad_ratio,
Dtype* bottom_diff, const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
// padding
Dtype pad_w, pad_h;
pad_w = (offset_bottom_rois[3]-offset_bottom_rois[1]+1)*pad_ratio;
pad_h = (offset_bottom_rois[4]-offset_bottom_rois[2]+1)*pad_ratio;
int roi_start_w = round((offset_bottom_rois[1]-pad_w) * spatial_scale);
int roi_start_h = round((offset_bottom_rois[2]-pad_h) * spatial_scale);
int roi_end_w = round((offset_bottom_rois[3]+pad_w) * spatial_scale);
int roi_end_h = round((offset_bottom_rois[4]+pad_h) * spatial_scale);
// clipping
roi_start_w = max(roi_start_w,0); roi_start_h = max(roi_start_h,0);
int img_width = round(width / spatial_scale);
int img_height = round(height / spatial_scale);
roi_end_w = min(img_width-1,roi_end_w);
roi_end_h = min(img_height-1,roi_end_h);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, pad_ratio_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
| e6ea9d0d2ac76dfd4e1f3bc54ace134770d8e596.cu | // ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/fast_rcnn_layers.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype pad_ratio, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
// padding
Dtype pad_w, pad_h;
pad_w = (bottom_rois[3]-bottom_rois[1]+1)*pad_ratio;
pad_h = (bottom_rois[4]-bottom_rois[2]+1)*pad_ratio;
int roi_start_w = round((bottom_rois[1]-pad_w) * spatial_scale);
int roi_start_h = round((bottom_rois[2]-pad_h) * spatial_scale);
int roi_end_w = round((bottom_rois[3]+pad_w) * spatial_scale);
int roi_end_h = round((bottom_rois[4]+pad_h) * spatial_scale);
// clipping
/*roi_start_w = max(roi_start_w,0); roi_start_h = max(roi_start_h,0);
int img_width = round(width / spatial_scale);
int img_height = round(height / spatial_scale);
roi_end_w = min(img_width-1,roi_end_w);
roi_end_h = min(img_height-1,roi_end_h);*/
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, pad_ratio_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const Dtype pad_ratio,
Dtype* bottom_diff, const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
// padding
Dtype pad_w, pad_h;
pad_w = (offset_bottom_rois[3]-offset_bottom_rois[1]+1)*pad_ratio;
pad_h = (offset_bottom_rois[4]-offset_bottom_rois[2]+1)*pad_ratio;
int roi_start_w = round((offset_bottom_rois[1]-pad_w) * spatial_scale);
int roi_start_h = round((offset_bottom_rois[2]-pad_h) * spatial_scale);
int roi_end_w = round((offset_bottom_rois[3]+pad_w) * spatial_scale);
int roi_end_h = round((offset_bottom_rois[4]+pad_h) * spatial_scale);
// clipping
roi_start_w = max(roi_start_w,0); roi_start_h = max(roi_start_h,0);
int img_width = round(width / spatial_scale);
int img_height = round(height / spatial_scale);
roi_end_w = min(img_width-1,roi_end_w);
roi_end_h = min(img_height-1,roi_end_h);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, pad_ratio_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
|
fd5ab7764ec6fec60e36e024888e2125ae77bd27.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
//these are access control for staggered action
#ifdef GPU_STAGGERED_DIRAC
#if (__COMPUTE_CAPABILITY__ >= 300) // Kepler works best with texture loads only
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#else // Fermi
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#endif
#endif // GPU_STAGGERED_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace improvedstaggered {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
#undef GPU_NDEG_TWISTED_MASS_DIRAC
#undef GPU_CLOVER_DIRAC
#undef GPU_DOMAIN_WALL_DIRAC
#define DD_IMPROVED 1
#include <staggered_dslash_def.h> // staggered Dslash kernels
#undef DD_IMPROVED
#include <dslash_quda.cuh>
} // end namespace improvedstaggered
// declare the dslash events
#include <dslash_events.cuh>
using namespace improvedstaggered;
#ifdef GPU_STAGGERED_DIRAC
template <typename sFloat, typename fatGFloat, typename longGFloat, typename phaseFloat>
class StaggeredDslashCuda : public DslashCuda {
private:
const GaugeField &fatGauge;
const GaugeField &longGauge;
const unsigned int nSrc;
protected:
bool tuneAuxDim() const { return true; } // Do tune the aux dimensions.
unsigned int sharedBytesPerThread() const
{
#ifdef PARALLEL_DIR
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return 6 * reg_size;
#else
return 0;
#endif
}
public:
StaggeredDslashCuda(cudaColorSpinorField *out, const GaugeField &fatGauge, const GaugeField &longGauge,
const cudaColorSpinorField *in, const cudaColorSpinorField *x, const double a,
const int parity, const int dagger, const int *commOverride)
: DslashCuda(out, in, x, longGauge, parity, dagger, commOverride),
fatGauge(fatGauge), longGauge(longGauge), nSrc(in->X(4))
{
#ifdef MULTI_GPU
for(int i=0;i < 4; i++){
if(comm_dim_partitioned(i) && (fatGauge.X()[i] < 6)){
errorQuda("ERROR: partitioned dimension with local size less than 6 is not supported in improved staggered dslash\n");
}
}
#endif
bindFatGaugeTex(static_cast<const cudaGaugeField&>(fatGauge), parity, dslashParam);
bindLongGaugeTex(static_cast<const cudaGaugeField&>(longGauge), parity, dslashParam);
if (in->Precision() != fatGauge.Precision() || in->Precision() != longGauge.Precision()){
errorQuda("Mixing gauge and spinor precision not supported"
"(precision=%d, fatlinkGauge.precision=%d, longGauge.precision=%d",
in->Precision(), fatGauge.Precision(), longGauge.Precision());
}
dslashParam.a = a;
dslashParam.a_f = a;
dslashParam.fat_link_max = fatGauge.LinkMax();
dslashParam.coeff = 1.0/longGauge.Scale();
dslashParam.coeff_f = (float)dslashParam.coeff;
}
virtual ~StaggeredDslashCuda() {
unbindSpinorTex<sFloat>(in, out, x);
unbindFatGaugeTex(static_cast<const cudaGaugeField&>(fatGauge));
unbindLongGaugeTex(static_cast<const cudaGaugeField&>(longGauge));
}
void apply(const hipStream_t &stream)
{
#ifndef USE_TEXTURE_OBJECTS
if (dslashParam.kernel_type == INTERIOR_KERNEL) bindSpinorTex<sFloat>(in, out, x);
#endif // USE_TEXTURE_OBJECTS
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
setParam();
dslashParam.gauge_stride = fatGauge.Stride();
dslashParam.long_gauge_stride = longGauge.Stride();
dslashParam.swizzle = tp.aux.x;
IMPROVED_STAGGERED_DSLASH(tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
}
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = deviceProp.sharedMemPerBlock;
// first try to advance block.y (number of right-hand sides per block)
if (param.block.y < nSrc && param.block.y < (unsigned int)deviceProp.maxThreadsDim[1] &&
sharedBytesPerThread()*param.block.x*param.block.y < max_shared &&
(param.block.x*(param.block.y+1u)) <= (unsigned int)deviceProp.maxThreadsPerBlock) {
param.block.y++;
param.grid.y = (nSrc + param.block.y - 1) / param.block.y;
return true;
} else {
bool rtn = DslashCuda::advanceBlockDim(param);
param.block.y = 1;
param.grid.y = nSrc;
return rtn;
}
}
bool advanceAux(TuneParam ¶m) const
{
#ifdef SWIZZLE
if (param.aux.x < 2*deviceProp.multiProcessorCount) {
param.aux.x++;
return true;
} else {
param.aux.x = 1;
return false;
}
#else
return false;
#endif
}
void initTuneParam(TuneParam ¶m) const
{
DslashCuda::initTuneParam(param);
param.block.y = 1;
param.grid.y = nSrc;
param.aux.x = 1;
}
void defaultTuneParam(TuneParam ¶m) const { initTuneParam(param); }
int Nface() const { return 6; }
/*
per direction / dimension flops
SU(3) matrix-vector flops = (8 Nc - 2) * Nc
xpay = 2 * 2 * Nc * Ns
So for the full dslash we have
flops = (2 * 2 * Nd * (8*Nc-2) * Nc) + ((2 * 2 * Nd - 1) * 2 * Nc * Ns)
flops_xpay = flops + 2 * 2 * Nc * Ns
For Asqtad this should give 1146 for Nc=3,Ns=2 and 1158 for the axpy equivalent
*/
virtual long long flops() const {
int mv_flops = (8 * in->Ncolor() - 2) * in->Ncolor(); // SU(3) matrix-vector flops
int ghost_flops = (3 + 1) * (mv_flops + 2*in->Ncolor()*in->Nspin());
int xpay_flops = 2 * 2 * in->Ncolor() * in->Nspin(); // multiply and add per real component
int num_dir = 2 * 4; // dir * dim
long long flops = 0;
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
flops = ghost_flops * 2 * in->GhostFace()[dslashParam.kernel_type];
break;
case EXTERIOR_KERNEL_ALL:
{
long long ghost_sites = 2 * (in->GhostFace()[0]+in->GhostFace()[1]+in->GhostFace()[2]+in->GhostFace()[3]);
flops = ghost_flops * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY:
{
long long sites = in->VolumeCB();
flops = (2*num_dir*mv_flops + // SU(3) matrix-vector multiplies
(2*num_dir-1)*2*in->Ncolor()*in->Nspin()) * sites; // accumulation
if (x) flops += xpay_flops * sites; // axpy is always on interior
if (dslashParam.kernel_type == KERNEL_POLICY) break;
// now correct for flops done by exterior kernel
long long ghost_sites = 0;
for (int d=0; d<4; d++) if (dslashParam.commDim[d]) ghost_sites += 2 * in->GhostFace()[d];
flops -= ghost_flops * ghost_sites;
break;
}
}
return flops;
}
virtual long long bytes() const {
int gauge_bytes_fat = QUDA_RECONSTRUCT_NO * in->Precision();
int gauge_bytes_long = reconstruct * in->Precision();
bool isHalf = in->Precision() == sizeof(short) ? true : false;
int spinor_bytes = 2 * in->Ncolor() * in->Nspin() * in->Precision() + (isHalf ? sizeof(float) : 0);
int ghost_bytes = 3 * (spinor_bytes + gauge_bytes_long) + (spinor_bytes + gauge_bytes_fat) + spinor_bytes;
int num_dir = 2 * 4; // set to 4 dimensions since we take care of 5-d fermions in derived classes where necessary
long long bytes = 0;
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
bytes = ghost_bytes * 2 * in->GhostFace()[dslashParam.kernel_type];
break;
case EXTERIOR_KERNEL_ALL:
{
long long ghost_sites = 2 * (in->GhostFace()[0]+in->GhostFace()[1]+in->GhostFace()[2]+in->GhostFace()[3]);
bytes = ghost_bytes * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY:
{
long long sites = in->VolumeCB();
bytes = (num_dir*(gauge_bytes_fat + gauge_bytes_long) + // gauge reads
num_dir*2*spinor_bytes + // spinor reads
spinor_bytes)*sites; // spinor write
if (x) bytes += spinor_bytes;
if (dslashParam.kernel_type == KERNEL_POLICY) break;
// now correct for bytes done by exterior kernel
long long ghost_sites = 0;
for (int d=0; d<4; d++) if (dslashParam.commDim[d]) ghost_sites += 2*in->GhostFace()[d];
bytes -= ghost_bytes * ghost_sites;
break;
}
}
return bytes;
}
};
#endif // GPU_STAGGERED_DIRAC
#include <dslash_policy.cuh>
void improvedStaggeredDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &fatGauge,
const cudaGaugeField &longGauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x,
const double &k, const int *commOverride, TimeProfile &profile)
{
#ifdef GPU_STAGGERED_DIRAC
const_cast<cudaColorSpinorField*>(in)->createComms(3);
DslashCuda *dslash = nullptr;
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new StaggeredDslashCuda<double2, double2, double2, double>
(out, fatGauge, longGauge, in, x, k, parity, dagger, commOverride);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new StaggeredDslashCuda<float2, float2, float4, float>
(out, fatGauge, longGauge, in, x, k, parity, dagger, commOverride);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new StaggeredDslashCuda<short2, short2, short4, short>
(out, fatGauge, longGauge, in, x, k, parity, dagger, commOverride);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), in->Volume()/in->X(4), ghostFace, profile);
dslash_policy.apply(0);
delete dslash;
#else
errorQuda("Staggered dslash has not been built");
#endif // GPU_STAGGERED_DIRAC
}
}
| fd5ab7764ec6fec60e36e024888e2125ae77bd27.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
//these are access control for staggered action
#ifdef GPU_STAGGERED_DIRAC
#if (__COMPUTE_CAPABILITY__ >= 300) // Kepler works best with texture loads only
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#else // Fermi
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#endif
#endif // GPU_STAGGERED_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace improvedstaggered {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
#undef GPU_NDEG_TWISTED_MASS_DIRAC
#undef GPU_CLOVER_DIRAC
#undef GPU_DOMAIN_WALL_DIRAC
#define DD_IMPROVED 1
#include <staggered_dslash_def.h> // staggered Dslash kernels
#undef DD_IMPROVED
#include <dslash_quda.cuh>
} // end namespace improvedstaggered
// declare the dslash events
#include <dslash_events.cuh>
using namespace improvedstaggered;
#ifdef GPU_STAGGERED_DIRAC
template <typename sFloat, typename fatGFloat, typename longGFloat, typename phaseFloat>
class StaggeredDslashCuda : public DslashCuda {
private:
const GaugeField &fatGauge;
const GaugeField &longGauge;
const unsigned int nSrc;
protected:
bool tuneAuxDim() const { return true; } // Do tune the aux dimensions.
unsigned int sharedBytesPerThread() const
{
#ifdef PARALLEL_DIR
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return 6 * reg_size;
#else
return 0;
#endif
}
public:
StaggeredDslashCuda(cudaColorSpinorField *out, const GaugeField &fatGauge, const GaugeField &longGauge,
const cudaColorSpinorField *in, const cudaColorSpinorField *x, const double a,
const int parity, const int dagger, const int *commOverride)
: DslashCuda(out, in, x, longGauge, parity, dagger, commOverride),
fatGauge(fatGauge), longGauge(longGauge), nSrc(in->X(4))
{
#ifdef MULTI_GPU
for(int i=0;i < 4; i++){
if(comm_dim_partitioned(i) && (fatGauge.X()[i] < 6)){
errorQuda("ERROR: partitioned dimension with local size less than 6 is not supported in improved staggered dslash\n");
}
}
#endif
bindFatGaugeTex(static_cast<const cudaGaugeField&>(fatGauge), parity, dslashParam);
bindLongGaugeTex(static_cast<const cudaGaugeField&>(longGauge), parity, dslashParam);
if (in->Precision() != fatGauge.Precision() || in->Precision() != longGauge.Precision()){
errorQuda("Mixing gauge and spinor precision not supported"
"(precision=%d, fatlinkGauge.precision=%d, longGauge.precision=%d",
in->Precision(), fatGauge.Precision(), longGauge.Precision());
}
dslashParam.a = a;
dslashParam.a_f = a;
dslashParam.fat_link_max = fatGauge.LinkMax();
dslashParam.coeff = 1.0/longGauge.Scale();
dslashParam.coeff_f = (float)dslashParam.coeff;
}
virtual ~StaggeredDslashCuda() {
unbindSpinorTex<sFloat>(in, out, x);
unbindFatGaugeTex(static_cast<const cudaGaugeField&>(fatGauge));
unbindLongGaugeTex(static_cast<const cudaGaugeField&>(longGauge));
}
void apply(const cudaStream_t &stream)
{
#ifndef USE_TEXTURE_OBJECTS
if (dslashParam.kernel_type == INTERIOR_KERNEL) bindSpinorTex<sFloat>(in, out, x);
#endif // USE_TEXTURE_OBJECTS
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
setParam();
dslashParam.gauge_stride = fatGauge.Stride();
dslashParam.long_gauge_stride = longGauge.Stride();
dslashParam.swizzle = tp.aux.x;
IMPROVED_STAGGERED_DSLASH(tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
}
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = deviceProp.sharedMemPerBlock;
// first try to advance block.y (number of right-hand sides per block)
if (param.block.y < nSrc && param.block.y < (unsigned int)deviceProp.maxThreadsDim[1] &&
sharedBytesPerThread()*param.block.x*param.block.y < max_shared &&
(param.block.x*(param.block.y+1u)) <= (unsigned int)deviceProp.maxThreadsPerBlock) {
param.block.y++;
param.grid.y = (nSrc + param.block.y - 1) / param.block.y;
return true;
} else {
bool rtn = DslashCuda::advanceBlockDim(param);
param.block.y = 1;
param.grid.y = nSrc;
return rtn;
}
}
bool advanceAux(TuneParam ¶m) const
{
#ifdef SWIZZLE
if (param.aux.x < 2*deviceProp.multiProcessorCount) {
param.aux.x++;
return true;
} else {
param.aux.x = 1;
return false;
}
#else
return false;
#endif
}
void initTuneParam(TuneParam ¶m) const
{
DslashCuda::initTuneParam(param);
param.block.y = 1;
param.grid.y = nSrc;
param.aux.x = 1;
}
void defaultTuneParam(TuneParam ¶m) const { initTuneParam(param); }
int Nface() const { return 6; }
/*
per direction / dimension flops
SU(3) matrix-vector flops = (8 Nc - 2) * Nc
xpay = 2 * 2 * Nc * Ns
So for the full dslash we have
flops = (2 * 2 * Nd * (8*Nc-2) * Nc) + ((2 * 2 * Nd - 1) * 2 * Nc * Ns)
flops_xpay = flops + 2 * 2 * Nc * Ns
For Asqtad this should give 1146 for Nc=3,Ns=2 and 1158 for the axpy equivalent
*/
virtual long long flops() const {
int mv_flops = (8 * in->Ncolor() - 2) * in->Ncolor(); // SU(3) matrix-vector flops
int ghost_flops = (3 + 1) * (mv_flops + 2*in->Ncolor()*in->Nspin());
int xpay_flops = 2 * 2 * in->Ncolor() * in->Nspin(); // multiply and add per real component
int num_dir = 2 * 4; // dir * dim
long long flops = 0;
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
flops = ghost_flops * 2 * in->GhostFace()[dslashParam.kernel_type];
break;
case EXTERIOR_KERNEL_ALL:
{
long long ghost_sites = 2 * (in->GhostFace()[0]+in->GhostFace()[1]+in->GhostFace()[2]+in->GhostFace()[3]);
flops = ghost_flops * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY:
{
long long sites = in->VolumeCB();
flops = (2*num_dir*mv_flops + // SU(3) matrix-vector multiplies
(2*num_dir-1)*2*in->Ncolor()*in->Nspin()) * sites; // accumulation
if (x) flops += xpay_flops * sites; // axpy is always on interior
if (dslashParam.kernel_type == KERNEL_POLICY) break;
// now correct for flops done by exterior kernel
long long ghost_sites = 0;
for (int d=0; d<4; d++) if (dslashParam.commDim[d]) ghost_sites += 2 * in->GhostFace()[d];
flops -= ghost_flops * ghost_sites;
break;
}
}
return flops;
}
virtual long long bytes() const {
int gauge_bytes_fat = QUDA_RECONSTRUCT_NO * in->Precision();
int gauge_bytes_long = reconstruct * in->Precision();
bool isHalf = in->Precision() == sizeof(short) ? true : false;
int spinor_bytes = 2 * in->Ncolor() * in->Nspin() * in->Precision() + (isHalf ? sizeof(float) : 0);
int ghost_bytes = 3 * (spinor_bytes + gauge_bytes_long) + (spinor_bytes + gauge_bytes_fat) + spinor_bytes;
int num_dir = 2 * 4; // set to 4 dimensions since we take care of 5-d fermions in derived classes where necessary
long long bytes = 0;
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
bytes = ghost_bytes * 2 * in->GhostFace()[dslashParam.kernel_type];
break;
case EXTERIOR_KERNEL_ALL:
{
long long ghost_sites = 2 * (in->GhostFace()[0]+in->GhostFace()[1]+in->GhostFace()[2]+in->GhostFace()[3]);
bytes = ghost_bytes * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY:
{
long long sites = in->VolumeCB();
bytes = (num_dir*(gauge_bytes_fat + gauge_bytes_long) + // gauge reads
num_dir*2*spinor_bytes + // spinor reads
spinor_bytes)*sites; // spinor write
if (x) bytes += spinor_bytes;
if (dslashParam.kernel_type == KERNEL_POLICY) break;
// now correct for bytes done by exterior kernel
long long ghost_sites = 0;
for (int d=0; d<4; d++) if (dslashParam.commDim[d]) ghost_sites += 2*in->GhostFace()[d];
bytes -= ghost_bytes * ghost_sites;
break;
}
}
return bytes;
}
};
#endif // GPU_STAGGERED_DIRAC
#include <dslash_policy.cuh>
void improvedStaggeredDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &fatGauge,
const cudaGaugeField &longGauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x,
const double &k, const int *commOverride, TimeProfile &profile)
{
#ifdef GPU_STAGGERED_DIRAC
const_cast<cudaColorSpinorField*>(in)->createComms(3);
DslashCuda *dslash = nullptr;
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new StaggeredDslashCuda<double2, double2, double2, double>
(out, fatGauge, longGauge, in, x, k, parity, dagger, commOverride);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new StaggeredDslashCuda<float2, float2, float4, float>
(out, fatGauge, longGauge, in, x, k, parity, dagger, commOverride);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new StaggeredDslashCuda<short2, short2, short4, short>
(out, fatGauge, longGauge, in, x, k, parity, dagger, commOverride);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), in->Volume()/in->X(4), ghostFace, profile);
dslash_policy.apply(0);
delete dslash;
#else
errorQuda("Staggered dslash has not been built");
#endif // GPU_STAGGERED_DIRAC
}
}
|
e728bd27b7b8e5ba4cb9794769263b6a6275e9da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Array Search with CUDA (Fall 2016):
*
* Members:
* Emanuelle Crespi, Tolga Keskinoglu
*
* This test implements an array search discussed in the methodology section
* of Optimizing CPU-GPU Interactions.
*
* The following code makes use of the kernel call search(int n, char *data, char *out, char c)
* to perform a parallel search for char c amongst a large vector of characters.
*
* The result is a one-one mapping of the data array with 1s and 0s in the out array at
* corresponding indices where the character has been found. The output is written to
* the file 'parallel_array_search_result.txt' for validation with 'array_search_result.txt'
* when running the executable for array_search.c
*
* While the overhead of executing 'cudaMemCpy(...)' slows down execution time,
* the performance of the parallel search itself is significantly faster than it's
* serial counterpart.
*
* The output of the performance is displayed in seconds for verification.
*
* References:
* NVIDIA CUDA C Programming Guide Version 3.2
*/
// System includes
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <time.h>
// Jetson TK1 has device capability 1.x allowing 1024 threads/block
// We also indicate a threshold of 67108864 for vectored data
#define THREADS_PER_BLOCK 1024
#define THRESHOLD 67108864
//Identify failures
#define FILE_OPEN_FAIL -1
#define MALLOC_FAIL -2
/*
* Indicates the task search to be performed on the GPU
* for char c in array data both of size n
*
* The output 1 or 0 is written in out to indicate 'found' and 'not-found' respectively
* Results are written to device memory and must be fetched back from out for verification.
* search( n, ['a','b','d','c','d','e',...], result, 'd') ==> result = ['0','0','1','0','1','0',...]
*/
__global__ void search(int n, char *data, char *out, char c){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n){
if (data[i] == c){
out[i] = '1';
}else{
out[i] = '0';
}
}
}
int main(){
FILE *fp_data, *fp_out;
char *data, c;
char *d_data, *d_out;
int s_data = 0, j = 0, i = 0;
int flag = 0;
hipError_t error;
//printf("Computing file size...\n");
if (!(fp_data = fopen("../../file.txt", "r"))){
perror("failed to open file.txt\n");
return FILE_OPEN_FAIL;
}
while( fscanf(fp_data,"%c",&c) != EOF ){
s_data++;
}
int rem = s_data % THRESHOLD;
int sections = (THRESHOLD+s_data-1)/THRESHOLD;
//printf("Mallocing %d bytes of data on CPU...\n", s_data);
/* Allocate necessary space for host buffer */
hipHostMalloc(&data, sizeof(char)*s_data);
/* Allocate necessary space for device buffer */
//printf("Mallocing %d bytes of data on GPU...\n", s_data);
hipMalloc( (void **) &d_data, sizeof(char)*s_data);
hipMalloc( (void **) &d_out, sizeof(char)*s_data);
fseek(fp_data, 0, 0);
/* Read file into buffer */
//printf("Reading data into buffer...\n");
for( j= 0; fscanf(fp_data,"%c",&data[j]) != EOF; j++ ){ }
/* Identify our streams */
hipStream_t stream[sections];
for (int j = 0; j < sections; j++){
hipStreamCreate(&stream[j]);
}
/* Time the search algorithm */
//printf("Executing search on GPU...\n");
if( rem == 0 ){
flag = 0;
}else{
flag = 1;
}
/*******************************for testing purposes****************************************
*******************************************************************************************/
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("Running...\n");
// Execute the kernel
for(j = 0; j < sections-flag; j++){
hipMemcpyAsync(d_data + j * THRESHOLD, data + j * THRESHOLD,
THRESHOLD, hipMemcpyHostToDevice, stream[j]);
hipLaunchKernelGGL(( search), dim3((THRESHOLD+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK),dim3(THREADS_PER_BLOCK),0,stream[j], THRESHOLD,
data+(j*THRESHOLD), d_out+(j*THRESHOLD), 'D');
hipStreamSynchronize(stream[j]);
hipMemcpyAsync( data + j * THRESHOLD, d_out + j * THRESHOLD,
THRESHOLD, hipMemcpyDeviceToHost, stream[j]);
}
/* Define and run stream for remainder */
hipMemcpyAsync(d_data + j * THRESHOLD, data + j * THRESHOLD,
rem, hipMemcpyHostToDevice, stream[j]);
hipLaunchKernelGGL(( search), dim3((rem+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK),dim3(THREADS_PER_BLOCK),0,stream[j], rem,
data + j * THRESHOLD, d_out + j * THRESHOLD, 'D');
hipStreamSynchronize(stream[j]);
hipMemcpyAsync( data + j * THRESHOLD, d_out + j * THRESHOLD,
rem, hipMemcpyDeviceToHost, stream[j]);
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerisPrime = msecTotal / 1;
printf( "Performance= %.06f sec\n", msecPerisPrime/1000.0 );
/*******************************************************************************************
****************************** for testing purposes ***************************************/
/* Destroy streams */
for (int j = 0; j < sections; j++){
hipStreamDestroy(stream[j]);
}
/* Copy result back to host */
//printf("Writing result to 'parallel_array_search_result.txt'...\n");
if ( !(fp_out = fopen("parallel_array_search_result.txt", "w")) ){
perror("failed to open results file\n");
return FILE_OPEN_FAIL;
}
//output data to file
for (j = 0; j < s_data; j++){
if( i == 32 ){
fprintf(fp_out, "%c\n", data[j]);
i = 0;
}else{
fprintf(fp_out, "%c", data[j]);
i++;
}
}
/* Cleanup */
hipHostFree(data);
hipFree(d_data); hipFree(d_out);
fclose(fp_data); fclose(fp_out);
return 0;
}
| e728bd27b7b8e5ba4cb9794769263b6a6275e9da.cu | /**
* Array Search with CUDA (Fall 2016):
*
* Members:
* Emanuelle Crespi, Tolga Keskinoglu
*
* This test implements an array search discussed in the methodology section
* of Optimizing CPU-GPU Interactions.
*
* The following code makes use of the kernel call search(int n, char *data, char *out, char c)
* to perform a parallel search for char c amongst a large vector of characters.
*
* The result is a one-one mapping of the data array with 1s and 0s in the out array at
* corresponding indices where the character has been found. The output is written to
* the file 'parallel_array_search_result.txt' for validation with 'array_search_result.txt'
* when running the executable for array_search.c
*
* While the overhead of executing 'cudaMemCpy(...)' slows down execution time,
* the performance of the parallel search itself is significantly faster than it's
* serial counterpart.
*
* The output of the performance is displayed in seconds for verification.
*
* References:
* NVIDIA CUDA C Programming Guide Version 3.2
*/
// System includes
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <time.h>
// Jetson TK1 has device capability 1.x allowing 1024 threads/block
// We also indicate a threshold of 67108864 for vectored data
#define THREADS_PER_BLOCK 1024
#define THRESHOLD 67108864
//Identify failures
#define FILE_OPEN_FAIL -1
#define MALLOC_FAIL -2
/*
* Indicates the task search to be performed on the GPU
* for char c in array data both of size n
*
* The output 1 or 0 is written in out to indicate 'found' and 'not-found' respectively
* Results are written to device memory and must be fetched back from out for verification.
* search( n, ['a','b','d','c','d','e',...], result, 'd') ==> result = ['0','0','1','0','1','0',...]
*/
__global__ void search(int n, char *data, char *out, char c){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n){
if (data[i] == c){
out[i] = '1';
}else{
out[i] = '0';
}
}
}
int main(){
FILE *fp_data, *fp_out;
char *data, c;
char *d_data, *d_out;
int s_data = 0, j = 0, i = 0;
int flag = 0;
cudaError_t error;
//printf("Computing file size...\n");
if (!(fp_data = fopen("../../file.txt", "r"))){
perror("failed to open file.txt\n");
return FILE_OPEN_FAIL;
}
while( fscanf(fp_data,"%c",&c) != EOF ){
s_data++;
}
int rem = s_data % THRESHOLD;
int sections = (THRESHOLD+s_data-1)/THRESHOLD;
//printf("Mallocing %d bytes of data on CPU...\n", s_data);
/* Allocate necessary space for host buffer */
cudaMallocHost(&data, sizeof(char)*s_data);
/* Allocate necessary space for device buffer */
//printf("Mallocing %d bytes of data on GPU...\n", s_data);
cudaMalloc( (void **) &d_data, sizeof(char)*s_data);
cudaMalloc( (void **) &d_out, sizeof(char)*s_data);
fseek(fp_data, 0, 0);
/* Read file into buffer */
//printf("Reading data into buffer...\n");
for( j= 0; fscanf(fp_data,"%c",&data[j]) != EOF; j++ ){ }
/* Identify our streams */
cudaStream_t stream[sections];
for (int j = 0; j < sections; j++){
cudaStreamCreate(&stream[j]);
}
/* Time the search algorithm */
//printf("Executing search on GPU...\n");
if( rem == 0 ){
flag = 0;
}else{
flag = 1;
}
/*******************************for testing purposes****************************************
*******************************************************************************************/
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("Running...\n");
// Execute the kernel
for(j = 0; j < sections-flag; j++){
cudaMemcpyAsync(d_data + j * THRESHOLD, data + j * THRESHOLD,
THRESHOLD, cudaMemcpyHostToDevice, stream[j]);
search<<<(THRESHOLD+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK,0,stream[j]>>>(THRESHOLD,
data+(j*THRESHOLD), d_out+(j*THRESHOLD), 'D');
cudaStreamSynchronize(stream[j]);
cudaMemcpyAsync( data + j * THRESHOLD, d_out + j * THRESHOLD,
THRESHOLD, cudaMemcpyDeviceToHost, stream[j]);
}
/* Define and run stream for remainder */
cudaMemcpyAsync(d_data + j * THRESHOLD, data + j * THRESHOLD,
rem, cudaMemcpyHostToDevice, stream[j]);
search<<<(rem+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK,0,stream[j]>>>(rem,
data + j * THRESHOLD, d_out + j * THRESHOLD, 'D');
cudaStreamSynchronize(stream[j]);
cudaMemcpyAsync( data + j * THRESHOLD, d_out + j * THRESHOLD,
rem, cudaMemcpyDeviceToHost, stream[j]);
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerisPrime = msecTotal / 1;
printf( "Performance= %.06f sec\n", msecPerisPrime/1000.0 );
/*******************************************************************************************
****************************** for testing purposes ***************************************/
/* Destroy streams */
for (int j = 0; j < sections; j++){
cudaStreamDestroy(stream[j]);
}
/* Copy result back to host */
//printf("Writing result to 'parallel_array_search_result.txt'...\n");
if ( !(fp_out = fopen("parallel_array_search_result.txt", "w")) ){
perror("failed to open results file\n");
return FILE_OPEN_FAIL;
}
//output data to file
for (j = 0; j < s_data; j++){
if( i == 32 ){
fprintf(fp_out, "%c\n", data[j]);
i = 0;
}else{
fprintf(fp_out, "%c", data[j]);
i++;
}
}
/* Cleanup */
cudaFreeHost(data);
cudaFree(d_data); cudaFree(d_out);
fclose(fp_data); fclose(fp_out);
return 0;
}
|
a569bd1e31e61c9259f3d521d01d31029042ae04.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernel.h"
#include "kernels.h"
#include "hip/hip_runtime.h"
#include "corecrt_math.h"
#include "utils.h"
// __syncthreads()
#ifndef __HIPCC__
#define __HIPCC__
#endif // !__HIPCC__
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <cstdio>
namespace cudaKernel {
__global__ void calcFinalPosition(
float* points, size_t nInterpolation, size_t frame,
const size_t* groupOffsets, const size_t* groupStarts,
const size_t* startFrames, const float* xShiftMatrix,
const float* yShiftMatrix, size_t shiftsize) {
size_t bid = blockIdx.x;
size_t tid = threadIdx.x;
float* basePtr = points + groupOffsets[bid] * 3;
size_t numPointsThisGroup = groupOffsets[bid + 1] - groupOffsets[bid];
if (tid < numPointsThisGroup) {
size_t start = startFrames[bid] * (nInterpolation + 1);
size_t end = (startFrames[bid] + groupStarts[bid]) * (nInterpolation + 1) + tid; // (groupStarts[bid] - startFrames[bid]) * (nInterpolation + 1) + tid;
basePtr[3 * tid] += xShiftMatrix[start * shiftsize + end];
basePtr[3 * tid + 1] += yShiftMatrix[start * shiftsize + end];
if (tid == 0) {
deviceDebugPrint("%llu, %llu: %llu, %llu, %llu\n",
bid, tid, startFrames[bid], groupStarts[bid], end);
deviceDebugPrint("FinalPos: (%llu, %llu, %llu) : %llu, %llu, %f, %f\n",
bid, tid, numPointsThisGroup, start, end,
xShiftMatrix[start * shiftsize + end],
yShiftMatrix[start * shiftsize + end]);
}
}
}
void calcFinalPosition(float* dPoints, size_t nGroups, size_t maxSize,
size_t nInterpolation, size_t frame, const size_t* dGroupOffsets,
const size_t* dGroupStarts, const size_t* dStartFrames,
const float* dXShiftMatrix, const float* dYShiftMatrix, size_t shiftsize) {
calcFinalPosition << <nGroups, maxSize >> > (
dPoints, nInterpolation, frame, dGroupOffsets, dGroupStarts,
dStartFrames, dXShiftMatrix, dYShiftMatrix, shiftsize);
CUDACHECK(hipGetLastError());
}
} | a569bd1e31e61c9259f3d521d01d31029042ae04.cu | #include "kernel.h"
#include "kernels.h"
#include "cuda_runtime.h"
#include "corecrt_math.h"
#include "utils.h"
// 为了让__syncthreads()通过语法检查
#ifndef __CUDACC__
#define __CUDACC__
#endif // !__CUDACC__
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <cstdio>
namespace cudaKernel {
__global__ void calcFinalPosition(
float* points, size_t nInterpolation, size_t frame,
const size_t* groupOffsets, const size_t* groupStarts,
const size_t* startFrames, const float* xShiftMatrix,
const float* yShiftMatrix, size_t shiftsize) {
size_t bid = blockIdx.x;
size_t tid = threadIdx.x;
float* basePtr = points + groupOffsets[bid] * 3;
size_t numPointsThisGroup = groupOffsets[bid + 1] - groupOffsets[bid];
if (tid < numPointsThisGroup) {
size_t start = startFrames[bid] * (nInterpolation + 1);
size_t end = (startFrames[bid] + groupStarts[bid]) * (nInterpolation + 1) + tid; // (groupStarts[bid] - startFrames[bid]) * (nInterpolation + 1) + tid;
basePtr[3 * tid] += xShiftMatrix[start * shiftsize + end];
basePtr[3 * tid + 1] += yShiftMatrix[start * shiftsize + end];
if (tid == 0) {
deviceDebugPrint("%llu, %llu: %llu, %llu, %llu\n",
bid, tid, startFrames[bid], groupStarts[bid], end);
deviceDebugPrint("FinalPos: (%llu, %llu, %llu) : %llu, %llu, %f, %f\n",
bid, tid, numPointsThisGroup, start, end,
xShiftMatrix[start * shiftsize + end],
yShiftMatrix[start * shiftsize + end]);
}
}
}
void calcFinalPosition(float* dPoints, size_t nGroups, size_t maxSize,
size_t nInterpolation, size_t frame, const size_t* dGroupOffsets,
const size_t* dGroupStarts, const size_t* dStartFrames,
const float* dXShiftMatrix, const float* dYShiftMatrix, size_t shiftsize) {
calcFinalPosition << <nGroups, maxSize >> > (
dPoints, nInterpolation, frame, dGroupOffsets, dGroupStarts,
dStartFrames, dXShiftMatrix, dYShiftMatrix, shiftsize);
CUDACHECK(cudaGetLastError());
}
} |
89e626c57333f7c9162041264519bfcc3d992bfc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vadd.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const double *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
double *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vadd), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,b,c);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vadd), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,b,c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vadd), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,b,c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 89e626c57333f7c9162041264519bfcc3d992bfc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vadd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const double *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
double *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vadd<<<gridBlock,threadBlock>>>(n,a,b,c);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vadd<<<gridBlock,threadBlock>>>(n,a,b,c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vadd<<<gridBlock,threadBlock>>>(n,a,b,c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7e24e8156e8248a5b7aca5d9a13b0bc7ae3881c2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <stdint.h>
#include <cstdint>
#include <numeric>
#include <hip/hip_cooperative_groups.h>
namespace cg = cooperative_groups;
//********************** CUDA_ERROR
inline void HandleError(hipError_t err, const char *file, int line) {
//Error handling micro, wrap it around function whenever possible
if (err != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err), file, line);
#ifdef _WIN32
system("pause");
#else
exit(EXIT_FAILURE);
#endif
}
}
#define CUDA_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//******************************************************************************
//********************** testing cg kernel
__global__ void testing_cg_grid_sync(const uint32_t num_elements,
uint32_t *d_arr){
uint32_t tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < num_elements){
uint32_t my_element = d_arr[tid];
//to sync across the whole grid
cg::grid_group barrier = cg::this_grid();
//to sync within a single block
//cg::thread_block barrier = cg::this_thread_block();
//wait for all reads
barrier.sync();
uint32_t tar_id = num_elements - tid - 1;
d_arr[tar_id] = my_element;
}
}
//******************************************************************************
//********************** execute
void execute_test(const int sm_count){
//host array
const uint32_t arr_size = 1 << 20; //1M
uint32_t* h_arr = (uint32_t*)malloc(arr_size * sizeof(uint32_t));
//with with sequential numbers
std::iota(h_arr, h_arr + arr_size, 0);
//device array
uint32_t* d_arr;
CUDA_ERROR(hipMalloc((void**)&d_arr, arr_size*sizeof(uint32_t)));
CUDA_ERROR(hipMemcpy(d_arr, h_arr, arr_size*sizeof(uint32_t),
hipMemcpyHostToDevice));
//launch config
const int threads = 512;
//following the same steps done in conjugateGradientMultiBlockCG.cu
//cuda sample to launch kernel that sync across grid
//https://github.com/NVIDIA/cuda-samples/blob/master/Samples/conjugateGradientMultiBlockCG/conjugateGradientMultiBlockCG.cu#L436
int num_blocks_per_sm = 0;
CUDA_ERROR(hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm,
(void*)testing_cg_grid_sync, threads, 0));
dim3 grid_dim(sm_count * num_blocks_per_sm, 1, 1), block_dim(threads, 1, 1);
printf("\n Launching %d blcoks, each containing %d threads", grid_dim.x,
block_dim.x);
//argument passed to the kernel
void *kernel_args[] = {
(void *)&arr_size,
(void *)&d_arr,};
//finally launch the kernel
hipLaunchCooperativeKernel((void*)testing_cg_grid_sync,
grid_dim, block_dim, kernel_args);
//make sure everything went okay
CUDA_ERROR(hipGetLastError());
CUDA_ERROR(hipDeviceSynchronize());
//get results on the host
CUDA_ERROR(hipMemcpy(h_arr, d_arr, arr_size*sizeof(uint32_t),
hipMemcpyDeviceToHost));
//validate
for (uint32_t i = 0; i < arr_size; i++){
if (h_arr[i] != arr_size - i - 1){
printf("\n Result mismatch in h_arr[%u] = %u\n", i, h_arr[i]);
exit(EXIT_FAILURE);
}
}
}
//******************************************************************************
int main(int argc, char**argv) {
//set to Titan V
uint32_t device_id = 0;
hipSetDevice(device_id);
//get sm count
hipDeviceProp_t devProp;
CUDA_ERROR(hipGetDeviceProperties(&devProp, device_id));
int sm_count = devProp.multiProcessorCount;
//execute
execute_test(sm_count);
printf("\n Mission accomplished \n");
return 0;
}
| 7e24e8156e8248a5b7aca5d9a13b0bc7ae3881c2.cu | #include <cuda_runtime_api.h>
#include <stdio.h>
#include <stdint.h>
#include <cstdint>
#include <numeric>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
//********************** CUDA_ERROR
inline void HandleError(cudaError_t err, const char *file, int line) {
//Error handling micro, wrap it around function whenever possible
if (err != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err), file, line);
#ifdef _WIN32
system("pause");
#else
exit(EXIT_FAILURE);
#endif
}
}
#define CUDA_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//******************************************************************************
//********************** testing cg kernel
__global__ void testing_cg_grid_sync(const uint32_t num_elements,
uint32_t *d_arr){
uint32_t tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < num_elements){
uint32_t my_element = d_arr[tid];
//to sync across the whole grid
cg::grid_group barrier = cg::this_grid();
//to sync within a single block
//cg::thread_block barrier = cg::this_thread_block();
//wait for all reads
barrier.sync();
uint32_t tar_id = num_elements - tid - 1;
d_arr[tar_id] = my_element;
}
}
//******************************************************************************
//********************** execute
void execute_test(const int sm_count){
//host array
const uint32_t arr_size = 1 << 20; //1M
uint32_t* h_arr = (uint32_t*)malloc(arr_size * sizeof(uint32_t));
//with with sequential numbers
std::iota(h_arr, h_arr + arr_size, 0);
//device array
uint32_t* d_arr;
CUDA_ERROR(cudaMalloc((void**)&d_arr, arr_size*sizeof(uint32_t)));
CUDA_ERROR(cudaMemcpy(d_arr, h_arr, arr_size*sizeof(uint32_t),
cudaMemcpyHostToDevice));
//launch config
const int threads = 512;
//following the same steps done in conjugateGradientMultiBlockCG.cu
//cuda sample to launch kernel that sync across grid
//https://github.com/NVIDIA/cuda-samples/blob/master/Samples/conjugateGradientMultiBlockCG/conjugateGradientMultiBlockCG.cu#L436
int num_blocks_per_sm = 0;
CUDA_ERROR(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm,
(void*)testing_cg_grid_sync, threads, 0));
dim3 grid_dim(sm_count * num_blocks_per_sm, 1, 1), block_dim(threads, 1, 1);
printf("\n Launching %d blcoks, each containing %d threads", grid_dim.x,
block_dim.x);
//argument passed to the kernel
void *kernel_args[] = {
(void *)&arr_size,
(void *)&d_arr,};
//finally launch the kernel
cudaLaunchCooperativeKernel((void*)testing_cg_grid_sync,
grid_dim, block_dim, kernel_args);
//make sure everything went okay
CUDA_ERROR(cudaGetLastError());
CUDA_ERROR(cudaDeviceSynchronize());
//get results on the host
CUDA_ERROR(cudaMemcpy(h_arr, d_arr, arr_size*sizeof(uint32_t),
cudaMemcpyDeviceToHost));
//validate
for (uint32_t i = 0; i < arr_size; i++){
if (h_arr[i] != arr_size - i - 1){
printf("\n Result mismatch in h_arr[%u] = %u\n", i, h_arr[i]);
exit(EXIT_FAILURE);
}
}
}
//******************************************************************************
int main(int argc, char**argv) {
//set to Titan V
uint32_t device_id = 0;
cudaSetDevice(device_id);
//get sm count
cudaDeviceProp devProp;
CUDA_ERROR(cudaGetDeviceProperties(&devProp, device_id));
int sm_count = devProp.multiProcessorCount;
//execute
execute_test(sm_count);
printf("\n Mission accomplished \n");
return 0;
}
|
cb5eeaee51c889dffc713145873f1d58f8fc1cb7.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/host_vector.h>
#include <thrust/random.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <algorithm>
#include <omp.h>
#include <fstream>
#define KEPLER 0
#include "ErrorCheck.h"
#include "include/encode.cuh"
#include "include/decode.cuh"
#include "include/cuZFP.cuh"
#include "zfparray3.h"
enum ENGHS_t{ N_LEFT, N_RIGHT, N_UP, N_DOWN, N_NEAR, N_FAR } enghs;
using namespace thrust;
using namespace std;
#define index(x, y, z) ((x) + 4 * ((y) + 4 * (z)))
const size_t nx = 512;
const size_t ny = 512;
const size_t nz = 512;
const int nt = 0;
const double pi = 3.14159265358979323846;
//BSIZE is the length of the array in class Bit
//It's tied to MAXBITS such that
//MAXBITS = sizeof(Word) * BSIZE
//which is really
//MAXBITS = wsize * BSIZE
//e.g. if we match bits one-to-one, double -> unsigned long long
// then BSIZE = 64 and MAXPBITS = 4096
#define BSIZE 16
uint minbits = BSIZE*64;
uint MAXBITS = BSIZE*64;
uint MAXPREC = 64;
int MINEXP = -1074;
const double rate = BSIZE;
size_t blksize = 0;
unsigned long long group_count = 0x46acca631ull;
uint size = 64;
int EBITS = 11; /* number of exponent bits */
const int EBIAS = 1023;
const int intprec = 64;
static const unsigned char
perm[64] = {
index(0, 0, 0), // 0 : 0
index(1, 0, 0), // 1 : 1
index(0, 1, 0), // 2 : 1
index(0, 0, 1), // 3 : 1
index(0, 1, 1), // 4 : 2
index(1, 0, 1), // 5 : 2
index(1, 1, 0), // 6 : 2
index(2, 0, 0), // 7 : 2
index(0, 2, 0), // 8 : 2
index(0, 0, 2), // 9 : 2
index(1, 1, 1), // 10 : 3
index(2, 1, 0), // 11 : 3
index(2, 0, 1), // 12 : 3
index(0, 2, 1), // 13 : 3
index(1, 2, 0), // 14 : 3
index(1, 0, 2), // 15 : 3
index(0, 1, 2), // 16 : 3
index(3, 0, 0), // 17 : 3
index(0, 3, 0), // 18 : 3
index(0, 0, 3), // 19 : 3
index(2, 1, 1), // 20 : 4
index(1, 2, 1), // 21 : 4
index(1, 1, 2), // 22 : 4
index(0, 2, 2), // 23 : 4
index(2, 0, 2), // 24 : 4
index(2, 2, 0), // 25 : 4
index(3, 1, 0), // 26 : 4
index(3, 0, 1), // 27 : 4
index(0, 3, 1), // 28 : 4
index(1, 3, 0), // 29 : 4
index(1, 0, 3), // 30 : 4
index(0, 1, 3), // 31 : 4
index(1, 2, 2), // 32 : 5
index(2, 1, 2), // 33 : 5
index(2, 2, 1), // 34 : 5
index(3, 1, 1), // 35 : 5
index(1, 3, 1), // 36 : 5
index(1, 1, 3), // 37 : 5
index(3, 2, 0), // 38 : 5
index(3, 0, 2), // 39 : 5
index(0, 3, 2), // 40 : 5
index(2, 3, 0), // 41 : 5
index(2, 0, 3), // 42 : 5
index(0, 2, 3), // 43 : 5
index(2, 2, 2), // 44 : 6
index(3, 2, 1), // 45 : 6
index(3, 1, 2), // 46 : 6
index(1, 3, 2), // 47 : 6
index(2, 3, 1), // 48 : 6
index(2, 1, 3), // 49 : 6
index(1, 2, 3), // 50 : 6
index(0, 3, 3), // 51 : 6
index(3, 0, 3), // 52 : 6
index(3, 3, 0), // 53 : 6
index(3, 2, 2), // 54 : 7
index(2, 3, 2), // 55 : 7
index(2, 2, 3), // 56 : 7
index(1, 3, 3), // 57 : 7
index(3, 1, 3), // 58 : 7
index(3, 3, 1), // 59 : 7
index(2, 3, 3), // 60 : 8
index(3, 2, 3), // 61 : 8
index(3, 3, 2), // 62 : 8
index(3, 3, 3), // 63 : 9
};
static size_t block_size(double rate) { return (lrint(64 * rate) + CHAR_BIT - 1) / CHAR_BIT; }
template<class Scalar>
void setupConst(const unsigned char *perm,
uint maxbits_,
uint maxprec_,
int minexp_,
int ebits_,
int ebias_
)
{
ErrorCheck ec;
ec.chk("setupConst start");
hipMemcpyToSymbol(c_perm, perm, sizeof(unsigned char) * 64, 0); ec.chk("setupConst: c_perm");
hipMemcpyToSymbol(c_maxbits, &MAXBITS, sizeof(uint)); ec.chk("setupConst: c_maxbits");
const uint sizeof_scalar = sizeof(Scalar);
hipMemcpyToSymbol(c_sizeof_scalar, &sizeof_scalar, sizeof(uint)); ec.chk("setupConst: c_sizeof_scalar");
hipMemcpyToSymbol(c_maxprec, &maxprec_, sizeof(uint)); ec.chk("setupConst: c_maxprec");
hipMemcpyToSymbol(c_minexp, &minexp_, sizeof(int)); ec.chk("setupConst: c_minexp");
hipMemcpyToSymbol(c_ebits, &ebits_, sizeof(int)); ec.chk("setupConst: c_ebits");
hipMemcpyToSymbol(c_ebias, &ebias_, sizeof(int)); ec.chk("setupConst: c_ebias");
ec.chk("setupConst finished");
}
//Used to generate rand array in CUDA with Thrust
struct RandGen
{
RandGen() {}
__device__ float operator () (const uint idx)
{
thrust::default_random_engine randEng;
thrust::uniform_real_distribution<float> uniDist(0.0, 0.0001);
randEng.discard(idx);
return uniDist(randEng);
}
};
__device__
static inline
int idx(int x, int y, int z)
{
return x + y * (blockDim.x * gridDim.x) + z * (blockDim.x * gridDim.x * blockDim.y * gridDim.y);
}
template<typename Scalar>
__global__
void cudaDiffusion
(
const Scalar *u,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k,
const Scalar tfinal,
Scalar *du
)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
Scalar uxx = (u[idx(max(0, x - 1), y, z)] - 2 * u[idx(x, y, z)] + u[idx(min(blockDim.x*gridDim.x - 1, x + 1), y, z)]) / (dx * dx);
Scalar uyy = (u[idx(x, max(0, y - 1), z)] - 2 * u[idx(x, y, z)] + u[idx(x, min(blockDim.y*gridDim.y - 1, y + 1), z)]) / (dy * dy);
Scalar uzz = (u[idx(x, y, max(0, z - 1))] - 2 * u[idx(x, y, z)] + u[idx(x, y, min(blockDim.z*gridDim.z-1, z + 1))]) / (dz * dz);
du[idx(x, y, z)] = dt * k * (uxx + uyy + uzz);
}
template<typename Scalar>
__global__
void cudaSum
(
Scalar *u,
const Scalar *du
)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
u[idx(x, y, z)] += du[idx(x, y, z)];
}
template<class Int, class UInt, class Scalar, uint bsize, int intprec>
__global__
void
__launch_bounds__(64, 5)
cudaZFPDiffusion
(
const Scalar *u,
Word *du,
uint size,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k
)
{
uint x = threadIdx.x;
uint y = threadIdx.y;
uint z = threadIdx.z;
uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y;
uint bidx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x);
uint bdim = blockDim.x*blockDim.y*blockDim.z;
uint tbidx = bidx*bdim;
extern __shared__ unsigned char smem[];
__shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext;
s_u = (Scalar*)&smem[0];
s_du = (Scalar*)&s_u[64];
s_u_ext = (Scalar*)&s_du[64];
s_nghs = (Scalar*)&s_u_ext[216];
unsigned char *new_smem = (unsigned char*)&s_nghs[64];
//cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + bidx*bsize, new_smem, tid, s_u);
//cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + bidx*bsize, new_smem, tid, s_du);
//__syncthreads();
int3 utid = make_int3(threadIdx.x + blockDim.x * blockIdx.x, threadIdx.y + blockDim.y * blockIdx.y, threadIdx.z + blockDim.z * blockIdx.z);
Scalar uxx = (u[idx(max(0, utid.x - 1), utid.y, utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(min(blockDim.x*gridDim.x - 1, utid.x + 1), utid.y, utid.z)]) / (dx * dx);
Scalar uyy = (u[idx(utid.x, max(0, utid.y - 1), utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, min(blockDim.y*gridDim.y - 1, utid.y + 1), utid.z)]) / (dy * dy);
Scalar uzz = (u[idx(utid.x, utid.y, max(0, utid.z - 1))] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, utid.y, min(blockDim.z*gridDim.z - 1, utid.z + 1))]) / (dz * dz);
s_du[tid] = dt*k * (uxx + uyy + uzz);
__syncthreads();
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(
s_du,
size,
new_smem,
bidx * bsize,
du
);
}
template<class Int, class UInt, class Scalar, uint bsize, int intprec>
__global__
void
__launch_bounds__(64, 5)
cudaZFPDiffusion
(
const Word *u,
Word *du,
uint size,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k
)
{
uint x = threadIdx.x;
uint y = threadIdx.y;
uint z = threadIdx.z;
uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y;
uint idx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x);
uint bdim = blockDim.x*blockDim.y*blockDim.z;
uint bidx = idx*bdim;
extern __shared__ unsigned char smem[];
__shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext;
s_u = (Scalar*)&smem[0];
s_du = (Scalar*)&s_u[64];
s_u_ext = (Scalar*)&s_du[64];
s_nghs = (Scalar*)&s_u_ext[216];
unsigned char *new_smem = (unsigned char*)&s_nghs[64];
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + idx*bsize, new_smem, tid, s_u);
//cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + idx*bsize, new_smem, tid, s_du);
for (int i = 0; i < 3; i++){
s_u_ext[i * 64 + tid] = 0;
}
if (tid < 24)
s_u_ext[192 + tid] = 0;
__syncthreads();
//left
s_nghs[tid] = 0;
if (blockIdx.x > 0){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + ((blockIdx.x-1) + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[(i+1) * 6 + (j+1) * 36] = s_nghs[3 + i * blockDim.x + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[(x + 1) * 6 + (y + 1) * 36] = s_nghs[3 + x * blockDim.x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//right
s_nghs[tid] = 0;
if (blockIdx.x+1 < gridDim.x){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (1 + blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[5 + (i+1) * 6 + (j+1) * 36] = s_nghs[i*blockDim.x + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[5 + (x + 1) * 6 + (y + 1) * 36] = s_nghs[x*blockDim.x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//down
s_nghs[tid] = 0;
if (blockIdx.y > 0){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y - 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + (j+1) * 36] = s_nghs[i + 3*blockDim.x + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + (y + 1) * 36] = s_nghs[x + 3 * blockDim.x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//up
s_nghs[tid] = 0;
if (blockIdx.y + 1 < gridDim.y){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y + 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + 5*6 + (j+1) * 36] = s_nghs[i + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + 5 * 6 + (y + 1) * 36] = s_nghs[x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//near
s_nghs[tid] = 0;
if (blockIdx.z > 0){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z - 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + (j + 1) * 6] = s_nghs[i + (j)*blockDim.x + 3 * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + (y + 1) * 6] = s_nghs[x + (y)*blockDim.x + 3 * blockDim.x * blockDim.y];
}
__syncthreads();
//far
s_nghs[tid] = 0;
if (blockIdx.z + 1 < gridDim.z){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z + 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + (j + 1) * 6 + 5 * 36] = s_nghs[i + (j)*blockDim.x ];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + (y + 1) * 6 + 5 * 36] = s_nghs[x + (y)*blockDim.x];
}
__syncthreads();
s_u_ext[1 + x + (y + 1) * 6 + (z + 1) * 36] = s_u[tid];
__syncthreads();
Scalar uxx = (s_u_ext[x + (y + 1) * 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 2 + (y + 1) * 6 + (z + 1) * 36]) / (dx * dx);
Scalar uyy = (s_u_ext[x + 1 + (y)* 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 2) * 6 + (z + 1) * 36]) / (dy * dy);
Scalar uzz = (s_u_ext[x + 1 + (y + 1) * 6 + (z)* 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 1) * 6 + (z + 2) * 36]) / (dz * dz);
s_du[tid] = dt*k * (uxx + uyy + uzz);
__syncthreads();
//if (uxx < 0 || uyy < 0 || uzz < 0){
// printf("%d, %f, %f, %f, %f %f %f %d %d %d %d\n", tid, dt, k, s_du[tid], uxx, uyy, uzz, threadIdx.x + blockIdx.x * blockDim.x, threadIdx.y + blockIdx.y * blockDim.y, threadIdx.z + blockIdx.z * blockDim.z, threadIdx.x + blockIdx.x * blockDim.x + (threadIdx.y + blockIdx.y * blockDim.y)*gridDim.x * blockDim.x + (threadIdx.z + blockIdx.z * blockDim.z)*gridDim.x * blockDim.x * gridDim.y * blockDim.y);
//}
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(
s_du,
size,
new_smem,
idx * bsize,
du
);
//out[(threadIdx.z + blockIdx.z * 4)*gridDim.x * gridDim.y * blockDim.x * blockDim.y + (threadIdx.y + blockIdx.y * 4)*gridDim.x * blockDim.x + (threadIdx.x + blockIdx.x * 4)] = s_dblock[tid];
}
template<class Int, class UInt, class Scalar, uint bsize, int intprec>
void gpuZFPDiffusion
(
int nx, int ny, int nz,
device_vector<Word > &u,
device_vector<Word > &du,
device_vector<Scalar> &df_u,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k,
const Scalar tfinal
)
{
dim3 block_size = dim3(4, 4, 4);
dim3 grid_size = dim3(nx, ny, nz);
grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z;
cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> >
(
thrust::raw_pointer_cast(u.data()),
thrust::raw_pointer_cast(du.data()),
size,
dx,dy,dz,dt,k
);
// cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(
// nx, ny, nz,
// u, df_u,
// group_count
// );
//cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> >
// (
// thrust::raw_pointer_cast(df_u.data()),
// thrust::raw_pointer_cast(du.data()),
// size,
// dx,dy,dz,dt,k
// );
cuZFP::transform <Int, UInt, Scalar, bsize, intprec>
(
nx,ny,nz,
size,
u,
du,
thrust::plus<Scalar>()
);
//Scalar sum_u = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u);
//Scalar sum_du = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, du);
//cout << "post-transform du: " << sum_du << " u: " << sum_u << endl;
}
template<class Int, class UInt, class Scalar, uint bsize>
void gpuEncode
(
host_vector<Scalar> &h_u
)
{
device_vector<Scalar> d_u;
d_u = h_u;
ErrorCheck ec;
hipEvent_t start, stop;
float millisecs;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
dim3 emax_size(nx / 4, ny / 4, nz / 4);
device_vector<Word > u(emax_size.x * emax_size.y * emax_size.z * bsize);
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, d_u, u, group_count, size);
hipStreamSynchronize(0);
ec.chk("cudaEncode");
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&millisecs, start, stop);
ec.chk("cudaencode");
cout << "encode GPU in time: " << millisecs/1000.0 << endl;
cout << "sum: " << cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u) << endl;
double tot_sum = 0, max_diff = 0, min_diff = 1e16;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u, d_u, group_count);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&millisecs, start, stop);
ec.chk("cudadecoe");
cout << "decode GPU in time: " << millisecs / 1000.0 << endl;
host_vector<Scalar> h_out = d_u;
//array3d out(nx, ny, nz, rate);
//for (int i = 0; i < h_out.size(); i++){
// out[i] = h_out[i];
//}
}
int main()
{
host_vector<double> h_vec_in(nx*ny*nz, 0);
ifstream ifs("../../llnl__0270_512_double.raw", ios::binary);
if (ifs) {
double read;
for (int i = 0; i < nx*ny*nz; i++){
ifs.read(reinterpret_cast<char*>(&read), sizeof read);
h_vec_in[i] = read;
}
}
ifs.close();
cout << "cpu encode start" << endl;
double start_time = omp_get_wtime();
zfp::array3d u(nx, ny, nz, rate);
for (int i = 0; i < nx*ny*nz; i++){
u[i] = h_vec_in[i];
}
double time = omp_get_wtime() - start_time;
cout << "decode cpu time: " << time << endl;
host_vector<double> h_vec_out(nx*ny*nz, 0);
cout << "cpu decode start" << endl;
start_time = omp_get_wtime();
for (int z = 0; z < nz; z++){
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
h_vec_out[z*nx*ny + y*nx + x] = u(x, y, z);
}
}
}
time = omp_get_wtime() - start_time;
cout << "decode cpu time: " << time << endl;
cout << "sum: " << thrust::reduce(h_vec_out.begin(), h_vec_out.end()) << endl;
cout << "GPU ZFP encode start" << endl;
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
setupConst<double>(perm, MAXBITS, MAXPREC, MINEXP, EBITS, EBIAS);
cout << "Begin gpuDiffusion" << endl;
gpuEncode<long long, unsigned long long, double, BSIZE>(h_vec_in);
cout << "Finish gpuDiffusion" << endl;
}
| cb5eeaee51c889dffc713145873f1d58f8fc1cb7.cu | #include <iostream>
#include <iomanip>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/host_vector.h>
#include <thrust/random.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <algorithm>
#include <omp.h>
#include <fstream>
#define KEPLER 0
#include "ErrorCheck.h"
#include "include/encode.cuh"
#include "include/decode.cuh"
#include "include/cuZFP.cuh"
#include "zfparray3.h"
enum ENGHS_t{ N_LEFT, N_RIGHT, N_UP, N_DOWN, N_NEAR, N_FAR } enghs;
using namespace thrust;
using namespace std;
#define index(x, y, z) ((x) + 4 * ((y) + 4 * (z)))
const size_t nx = 512;
const size_t ny = 512;
const size_t nz = 512;
const int nt = 0;
const double pi = 3.14159265358979323846;
//BSIZE is the length of the array in class Bit
//It's tied to MAXBITS such that
//MAXBITS = sizeof(Word) * BSIZE
//which is really
//MAXBITS = wsize * BSIZE
//e.g. if we match bits one-to-one, double -> unsigned long long
// then BSIZE = 64 and MAXPBITS = 4096
#define BSIZE 16
uint minbits = BSIZE*64;
uint MAXBITS = BSIZE*64;
uint MAXPREC = 64;
int MINEXP = -1074;
const double rate = BSIZE;
size_t blksize = 0;
unsigned long long group_count = 0x46acca631ull;
uint size = 64;
int EBITS = 11; /* number of exponent bits */
const int EBIAS = 1023;
const int intprec = 64;
static const unsigned char
perm[64] = {
index(0, 0, 0), // 0 : 0
index(1, 0, 0), // 1 : 1
index(0, 1, 0), // 2 : 1
index(0, 0, 1), // 3 : 1
index(0, 1, 1), // 4 : 2
index(1, 0, 1), // 5 : 2
index(1, 1, 0), // 6 : 2
index(2, 0, 0), // 7 : 2
index(0, 2, 0), // 8 : 2
index(0, 0, 2), // 9 : 2
index(1, 1, 1), // 10 : 3
index(2, 1, 0), // 11 : 3
index(2, 0, 1), // 12 : 3
index(0, 2, 1), // 13 : 3
index(1, 2, 0), // 14 : 3
index(1, 0, 2), // 15 : 3
index(0, 1, 2), // 16 : 3
index(3, 0, 0), // 17 : 3
index(0, 3, 0), // 18 : 3
index(0, 0, 3), // 19 : 3
index(2, 1, 1), // 20 : 4
index(1, 2, 1), // 21 : 4
index(1, 1, 2), // 22 : 4
index(0, 2, 2), // 23 : 4
index(2, 0, 2), // 24 : 4
index(2, 2, 0), // 25 : 4
index(3, 1, 0), // 26 : 4
index(3, 0, 1), // 27 : 4
index(0, 3, 1), // 28 : 4
index(1, 3, 0), // 29 : 4
index(1, 0, 3), // 30 : 4
index(0, 1, 3), // 31 : 4
index(1, 2, 2), // 32 : 5
index(2, 1, 2), // 33 : 5
index(2, 2, 1), // 34 : 5
index(3, 1, 1), // 35 : 5
index(1, 3, 1), // 36 : 5
index(1, 1, 3), // 37 : 5
index(3, 2, 0), // 38 : 5
index(3, 0, 2), // 39 : 5
index(0, 3, 2), // 40 : 5
index(2, 3, 0), // 41 : 5
index(2, 0, 3), // 42 : 5
index(0, 2, 3), // 43 : 5
index(2, 2, 2), // 44 : 6
index(3, 2, 1), // 45 : 6
index(3, 1, 2), // 46 : 6
index(1, 3, 2), // 47 : 6
index(2, 3, 1), // 48 : 6
index(2, 1, 3), // 49 : 6
index(1, 2, 3), // 50 : 6
index(0, 3, 3), // 51 : 6
index(3, 0, 3), // 52 : 6
index(3, 3, 0), // 53 : 6
index(3, 2, 2), // 54 : 7
index(2, 3, 2), // 55 : 7
index(2, 2, 3), // 56 : 7
index(1, 3, 3), // 57 : 7
index(3, 1, 3), // 58 : 7
index(3, 3, 1), // 59 : 7
index(2, 3, 3), // 60 : 8
index(3, 2, 3), // 61 : 8
index(3, 3, 2), // 62 : 8
index(3, 3, 3), // 63 : 9
};
static size_t block_size(double rate) { return (lrint(64 * rate) + CHAR_BIT - 1) / CHAR_BIT; }
template<class Scalar>
void setupConst(const unsigned char *perm,
uint maxbits_,
uint maxprec_,
int minexp_,
int ebits_,
int ebias_
)
{
ErrorCheck ec;
ec.chk("setupConst start");
cudaMemcpyToSymbol(c_perm, perm, sizeof(unsigned char) * 64, 0); ec.chk("setupConst: c_perm");
cudaMemcpyToSymbol(c_maxbits, &MAXBITS, sizeof(uint)); ec.chk("setupConst: c_maxbits");
const uint sizeof_scalar = sizeof(Scalar);
cudaMemcpyToSymbol(c_sizeof_scalar, &sizeof_scalar, sizeof(uint)); ec.chk("setupConst: c_sizeof_scalar");
cudaMemcpyToSymbol(c_maxprec, &maxprec_, sizeof(uint)); ec.chk("setupConst: c_maxprec");
cudaMemcpyToSymbol(c_minexp, &minexp_, sizeof(int)); ec.chk("setupConst: c_minexp");
cudaMemcpyToSymbol(c_ebits, &ebits_, sizeof(int)); ec.chk("setupConst: c_ebits");
cudaMemcpyToSymbol(c_ebias, &ebias_, sizeof(int)); ec.chk("setupConst: c_ebias");
ec.chk("setupConst finished");
}
//Used to generate rand array in CUDA with Thrust
struct RandGen
{
RandGen() {}
__device__ float operator () (const uint idx)
{
thrust::default_random_engine randEng;
thrust::uniform_real_distribution<float> uniDist(0.0, 0.0001);
randEng.discard(idx);
return uniDist(randEng);
}
};
__device__
static inline
int idx(int x, int y, int z)
{
return x + y * (blockDim.x * gridDim.x) + z * (blockDim.x * gridDim.x * blockDim.y * gridDim.y);
}
template<typename Scalar>
__global__
void cudaDiffusion
(
const Scalar *u,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k,
const Scalar tfinal,
Scalar *du
)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
Scalar uxx = (u[idx(max(0, x - 1), y, z)] - 2 * u[idx(x, y, z)] + u[idx(min(blockDim.x*gridDim.x - 1, x + 1), y, z)]) / (dx * dx);
Scalar uyy = (u[idx(x, max(0, y - 1), z)] - 2 * u[idx(x, y, z)] + u[idx(x, min(blockDim.y*gridDim.y - 1, y + 1), z)]) / (dy * dy);
Scalar uzz = (u[idx(x, y, max(0, z - 1))] - 2 * u[idx(x, y, z)] + u[idx(x, y, min(blockDim.z*gridDim.z-1, z + 1))]) / (dz * dz);
du[idx(x, y, z)] = dt * k * (uxx + uyy + uzz);
}
template<typename Scalar>
__global__
void cudaSum
(
Scalar *u,
const Scalar *du
)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
u[idx(x, y, z)] += du[idx(x, y, z)];
}
template<class Int, class UInt, class Scalar, uint bsize, int intprec>
__global__
void
__launch_bounds__(64, 5)
cudaZFPDiffusion
(
const Scalar *u,
Word *du,
uint size,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k
)
{
uint x = threadIdx.x;
uint y = threadIdx.y;
uint z = threadIdx.z;
uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y;
uint bidx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x);
uint bdim = blockDim.x*blockDim.y*blockDim.z;
uint tbidx = bidx*bdim;
extern __shared__ unsigned char smem[];
__shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext;
s_u = (Scalar*)&smem[0];
s_du = (Scalar*)&s_u[64];
s_u_ext = (Scalar*)&s_du[64];
s_nghs = (Scalar*)&s_u_ext[216];
unsigned char *new_smem = (unsigned char*)&s_nghs[64];
//cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + bidx*bsize, new_smem, tid, s_u);
//cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + bidx*bsize, new_smem, tid, s_du);
//__syncthreads();
int3 utid = make_int3(threadIdx.x + blockDim.x * blockIdx.x, threadIdx.y + blockDim.y * blockIdx.y, threadIdx.z + blockDim.z * blockIdx.z);
Scalar uxx = (u[idx(max(0, utid.x - 1), utid.y, utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(min(blockDim.x*gridDim.x - 1, utid.x + 1), utid.y, utid.z)]) / (dx * dx);
Scalar uyy = (u[idx(utid.x, max(0, utid.y - 1), utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, min(blockDim.y*gridDim.y - 1, utid.y + 1), utid.z)]) / (dy * dy);
Scalar uzz = (u[idx(utid.x, utid.y, max(0, utid.z - 1))] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, utid.y, min(blockDim.z*gridDim.z - 1, utid.z + 1))]) / (dz * dz);
s_du[tid] = dt*k * (uxx + uyy + uzz);
__syncthreads();
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(
s_du,
size,
new_smem,
bidx * bsize,
du
);
}
template<class Int, class UInt, class Scalar, uint bsize, int intprec>
__global__
void
__launch_bounds__(64, 5)
cudaZFPDiffusion
(
const Word *u,
Word *du,
uint size,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k
)
{
uint x = threadIdx.x;
uint y = threadIdx.y;
uint z = threadIdx.z;
uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y;
uint idx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x);
uint bdim = blockDim.x*blockDim.y*blockDim.z;
uint bidx = idx*bdim;
extern __shared__ unsigned char smem[];
__shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext;
s_u = (Scalar*)&smem[0];
s_du = (Scalar*)&s_u[64];
s_u_ext = (Scalar*)&s_du[64];
s_nghs = (Scalar*)&s_u_ext[216];
unsigned char *new_smem = (unsigned char*)&s_nghs[64];
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + idx*bsize, new_smem, tid, s_u);
//cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + idx*bsize, new_smem, tid, s_du);
for (int i = 0; i < 3; i++){
s_u_ext[i * 64 + tid] = 0;
}
if (tid < 24)
s_u_ext[192 + tid] = 0;
__syncthreads();
//left
s_nghs[tid] = 0;
if (blockIdx.x > 0){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + ((blockIdx.x-1) + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[(i+1) * 6 + (j+1) * 36] = s_nghs[3 + i * blockDim.x + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[(x + 1) * 6 + (y + 1) * 36] = s_nghs[3 + x * blockDim.x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//right
s_nghs[tid] = 0;
if (blockIdx.x+1 < gridDim.x){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (1 + blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[5 + (i+1) * 6 + (j+1) * 36] = s_nghs[i*blockDim.x + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[5 + (x + 1) * 6 + (y + 1) * 36] = s_nghs[x*blockDim.x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//down
s_nghs[tid] = 0;
if (blockIdx.y > 0){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y - 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + (j+1) * 36] = s_nghs[i + 3*blockDim.x + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + (y + 1) * 36] = s_nghs[x + 3 * blockDim.x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//up
s_nghs[tid] = 0;
if (blockIdx.y + 1 < gridDim.y){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y + 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + 5*6 + (j+1) * 36] = s_nghs[i + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + 5 * 6 + (y + 1) * 36] = s_nghs[x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//near
s_nghs[tid] = 0;
if (blockIdx.z > 0){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z - 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + (j + 1) * 6] = s_nghs[i + (j)*blockDim.x + 3 * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + (y + 1) * 6] = s_nghs[x + (y)*blockDim.x + 3 * blockDim.x * blockDim.y];
}
__syncthreads();
//far
s_nghs[tid] = 0;
if (blockIdx.z + 1 < gridDim.z){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z + 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + (j + 1) * 6 + 5 * 36] = s_nghs[i + (j)*blockDim.x ];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + (y + 1) * 6 + 5 * 36] = s_nghs[x + (y)*blockDim.x];
}
__syncthreads();
s_u_ext[1 + x + (y + 1) * 6 + (z + 1) * 36] = s_u[tid];
__syncthreads();
Scalar uxx = (s_u_ext[x + (y + 1) * 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 2 + (y + 1) * 6 + (z + 1) * 36]) / (dx * dx);
Scalar uyy = (s_u_ext[x + 1 + (y)* 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 2) * 6 + (z + 1) * 36]) / (dy * dy);
Scalar uzz = (s_u_ext[x + 1 + (y + 1) * 6 + (z)* 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 1) * 6 + (z + 2) * 36]) / (dz * dz);
s_du[tid] = dt*k * (uxx + uyy + uzz);
__syncthreads();
//if (uxx < 0 || uyy < 0 || uzz < 0){
// printf("%d, %f, %f, %f, %f %f %f %d %d %d %d\n", tid, dt, k, s_du[tid], uxx, uyy, uzz, threadIdx.x + blockIdx.x * blockDim.x, threadIdx.y + blockIdx.y * blockDim.y, threadIdx.z + blockIdx.z * blockDim.z, threadIdx.x + blockIdx.x * blockDim.x + (threadIdx.y + blockIdx.y * blockDim.y)*gridDim.x * blockDim.x + (threadIdx.z + blockIdx.z * blockDim.z)*gridDim.x * blockDim.x * gridDim.y * blockDim.y);
//}
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(
s_du,
size,
new_smem,
idx * bsize,
du
);
//out[(threadIdx.z + blockIdx.z * 4)*gridDim.x * gridDim.y * blockDim.x * blockDim.y + (threadIdx.y + blockIdx.y * 4)*gridDim.x * blockDim.x + (threadIdx.x + blockIdx.x * 4)] = s_dblock[tid];
}
template<class Int, class UInt, class Scalar, uint bsize, int intprec>
void gpuZFPDiffusion
(
int nx, int ny, int nz,
device_vector<Word > &u,
device_vector<Word > &du,
device_vector<Scalar> &df_u,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k,
const Scalar tfinal
)
{
dim3 block_size = dim3(4, 4, 4);
dim3 grid_size = dim3(nx, ny, nz);
grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z;
cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> >
(
thrust::raw_pointer_cast(u.data()),
thrust::raw_pointer_cast(du.data()),
size,
dx,dy,dz,dt,k
);
// cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(
// nx, ny, nz,
// u, df_u,
// group_count
// );
//cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> >
// (
// thrust::raw_pointer_cast(df_u.data()),
// thrust::raw_pointer_cast(du.data()),
// size,
// dx,dy,dz,dt,k
// );
cuZFP::transform <Int, UInt, Scalar, bsize, intprec>
(
nx,ny,nz,
size,
u,
du,
thrust::plus<Scalar>()
);
//Scalar sum_u = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u);
//Scalar sum_du = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, du);
//cout << "post-transform du: " << sum_du << " u: " << sum_u << endl;
}
template<class Int, class UInt, class Scalar, uint bsize>
void gpuEncode
(
host_vector<Scalar> &h_u
)
{
device_vector<Scalar> d_u;
d_u = h_u;
ErrorCheck ec;
cudaEvent_t start, stop;
float millisecs;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dim3 emax_size(nx / 4, ny / 4, nz / 4);
device_vector<Word > u(emax_size.x * emax_size.y * emax_size.z * bsize);
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, d_u, u, group_count, size);
cudaStreamSynchronize(0);
ec.chk("cudaEncode");
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millisecs, start, stop);
ec.chk("cudaencode");
cout << "encode GPU in time: " << millisecs/1000.0 << endl;
cout << "sum: " << cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u) << endl;
double tot_sum = 0, max_diff = 0, min_diff = 1e16;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u, d_u, group_count);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millisecs, start, stop);
ec.chk("cudadecoe");
cout << "decode GPU in time: " << millisecs / 1000.0 << endl;
host_vector<Scalar> h_out = d_u;
//array3d out(nx, ny, nz, rate);
//for (int i = 0; i < h_out.size(); i++){
// out[i] = h_out[i];
//}
}
int main()
{
host_vector<double> h_vec_in(nx*ny*nz, 0);
ifstream ifs("../../llnl__0270_512_double.raw", ios::binary);
if (ifs) {
double read;
for (int i = 0; i < nx*ny*nz; i++){
ifs.read(reinterpret_cast<char*>(&read), sizeof read);
h_vec_in[i] = read;
}
}
ifs.close();
cout << "cpu encode start" << endl;
double start_time = omp_get_wtime();
zfp::array3d u(nx, ny, nz, rate);
for (int i = 0; i < nx*ny*nz; i++){
u[i] = h_vec_in[i];
}
double time = omp_get_wtime() - start_time;
cout << "decode cpu time: " << time << endl;
host_vector<double> h_vec_out(nx*ny*nz, 0);
cout << "cpu decode start" << endl;
start_time = omp_get_wtime();
for (int z = 0; z < nz; z++){
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
h_vec_out[z*nx*ny + y*nx + x] = u(x, y, z);
}
}
}
time = omp_get_wtime() - start_time;
cout << "decode cpu time: " << time << endl;
cout << "sum: " << thrust::reduce(h_vec_out.begin(), h_vec_out.end()) << endl;
cout << "GPU ZFP encode start" << endl;
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
setupConst<double>(perm, MAXBITS, MAXPREC, MINEXP, EBITS, EBIAS);
cout << "Begin gpuDiffusion" << endl;
gpuEncode<long long, unsigned long long, double, BSIZE>(h_vec_in);
cout << "Finish gpuDiffusion" << endl;
}
|
c3e95bfcc1832577766ada3f94686beee4995ce0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "concat_op.hpp"
namespace Shadow {
namespace Vision {
#if defined(USE_ROCM)
template <typename T>
__global__ void KernelConcat(const T *in_data, int count, int num_concats,
int concat_size, int top_concat_axis,
int bottom_concat_axis, int offset_concat_axis,
T *out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int total_concat_size = concat_size * bottom_concat_axis;
int concat_num = globalid / total_concat_size;
int concat_index = globalid % total_concat_size;
int top_index =
concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
out_data[top_index] = in_data[globalid];
}
}
template <typename T>
void Concat(const T *in_data, int count, int num_concats, int concat_size,
int top_concat_axis, int bottom_concat_axis, int offset_concat_axis,
T *out_data) {
hipLaunchKernelGGL(( KernelConcat<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0, 0,
in_data, count, num_concats, concat_size, top_concat_axis,
bottom_concat_axis, offset_concat_axis, out_data);
CUDA_CHECK(hipPeekAtLastError());
}
template void Concat(const float *in_data, int count, int num_concats,
int concat_size, int top_concat_axis,
int bottom_concat_axis, int offset_concat_axis,
float *out_data);
#endif
} // namespace Vision
} // namespace Shadow
| c3e95bfcc1832577766ada3f94686beee4995ce0.cu | #include "concat_op.hpp"
namespace Shadow {
namespace Vision {
#if defined(USE_CUDA)
template <typename T>
__global__ void KernelConcat(const T *in_data, int count, int num_concats,
int concat_size, int top_concat_axis,
int bottom_concat_axis, int offset_concat_axis,
T *out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int total_concat_size = concat_size * bottom_concat_axis;
int concat_num = globalid / total_concat_size;
int concat_index = globalid % total_concat_size;
int top_index =
concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
out_data[top_index] = in_data[globalid];
}
}
template <typename T>
void Concat(const T *in_data, int count, int num_concats, int concat_size,
int top_concat_axis, int bottom_concat_axis, int offset_concat_axis,
T *out_data) {
KernelConcat<T><<<GetBlocks(count), NumThreads>>>(
in_data, count, num_concats, concat_size, top_concat_axis,
bottom_concat_axis, offset_concat_axis, out_data);
CUDA_CHECK(cudaPeekAtLastError());
}
template void Concat(const float *in_data, int count, int num_concats,
int concat_size, int top_concat_axis,
int bottom_concat_axis, int offset_concat_axis,
float *out_data);
#endif
} // namespace Vision
} // namespace Shadow
|
81ed3117c9e6f82ce278dd319e3d190549f7ec83.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const bool preforward_flag) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom,
const bool prebackward_flag) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
| 81ed3117c9e6f82ce278dd319e3d190549f7ec83.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const bool preforward_flag) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom,
const bool prebackward_flag) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
|
b7298a5579706f808eebfef5ba6567f953632eb2.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) NVIDIA Corporation and Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This is cuda kernels for longformer attention softmax that does not use compact memory.
// It uses two temporary matrix of BxNxSxS, and consumes more memory when sequence length is large.
// Its logic is simpler with less constraints (like number of global tokens could be larger than attention windows).
#include <hipcub/hipcub.hpp>
#include <rocblas.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <math_constants.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "longformer_attention_softmax.h"
#include "attention_impl.h"
using namespace onnxruntime::cuda;
using namespace cub;
#define CHECK(expr) \
if (!CUBLAS_CALL(expr)) { \
return false; \
}
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <typename T, int blockSize>
__launch_bounds__(blockSize)
__global__ void LongformerSoftmaxSimpleKernel(const int* global_attention,
const int* global_index,
const int* batch_global_num,
const T* input,
const T* attention_mask,
T* output,
float scaler,
int dim0,
int sequence_length,
int attention_window) {
typedef hipcub::BlockReduce<float, blockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage block_reduce_temp;
__shared__ float max_shared;
__shared__ float sum_shared;
const T* input_block = input + sequence_length * blockIdx.x;
T* output_block = output + sequence_length * blockIdx.x;
const int batch_index = blockIdx.x / dim0;
const int row_index = blockIdx.x % sequence_length;
const int global_num = batch_global_num[batch_index];
// To be consistent with Huggingface Longformer, the row of maksed word are set as zero.
if ((float)attention_mask[batch_index * sequence_length + row_index] < 0.0f) {
for (int i = threadIdx.x; i < sequence_length; i += blockSize) {
output_block[i] = (T)(0);
}
return;
}
// local attention token
int col_start = 0;
int col_end = sequence_length;
bool is_local_row = (global_attention[batch_index * sequence_length + row_index] == (int)0);
if (is_local_row) {
col_start = row_index - attention_window;
if (col_start < 0) {
col_start = 0;
}
col_end = row_index + attention_window + 1;
if (col_end > sequence_length) {
col_end = sequence_length;
}
}
const T* mask_block = attention_mask + sequence_length * batch_index;
int tid = threadIdx.x;
// calculate max input
float max_input = -CUDART_INF_F;
// #pragma unroll 16
for (int i = tid + col_start; i < col_end; i += blockSize) {
float x = input_block[i];
x = x * scaler + (float)mask_block[i];
if (max_input < x) {
max_input = x;
}
}
if (is_local_row) {
for (int g = tid; g < global_num; g += blockSize) {
int i = global_index[g];
if (i < col_start || i >= col_end) {
float x = input_block[i];
x = x * scaler + (float)mask_block[i];
if (max_input < x) {
max_input = x;
}
}
}
}
float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, hipcub::Max());
if (tid == 0) {
max_shared = max_block;
}
__syncthreads();
float sum_input = 0.f;
// #pragma unroll 16
for (int i = tid + col_start; i < col_end; i += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
sum_input += x;
}
if (is_local_row) {
for (int g = tid; g < global_num; g += blockSize) {
int i = global_index[g];
if (i < col_start || i >= col_end) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
sum_input += x;
}
}
}
float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, hipcub::Sum());
if (tid == 0) {
sum_shared = sum_block;
}
__syncthreads();
float recip_sum = 1.f / sum_shared;
if (is_local_row) {
// We only need to fill in zeros for blocks that will be used in the matrix multiplication
// following the Softmax.
//
// For now zero-out only [row_index - 2*attention_window, row_index + 2*attention_window],
// we can even be more agressive and reduce the zeroing out window size since
// each row has entries in 3 blocks (3*attention_window size instead of 4*attention_window)
int zero_start = row_index - 2 * attention_window;
if (zero_start < 0) {
zero_start = 0;
}
int zero_end = row_index + 2 * attention_window;
if (zero_end > sequence_length) {
zero_end = sequence_length;
}
for (int i = tid + zero_start; i < zero_end; i += blockSize) {
if (i < col_start || i >= col_end) {
output_block[i] = (T)(0.);
}
}
}
__syncthreads();
if (is_local_row) {
for (int g = tid; g < global_num; g += blockSize) {
int i = global_index[g];
if (i < col_start || i >= col_end) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
output_block[i] = (T)(recip_sum * x);
}
}
}
// #pragma unroll 16
for (int i = tid + col_start; i < col_end; i += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
output_block[i] = (T)(recip_sum * x);
}
}
// Launch the softmax kernel for non compact memory.
bool LaunchLongformerSoftmaxSimpleKernel(
hipStream_t stream,
hipblasHandle_t cublas,
void* workspace, // softmax space
const void* q, // transposed Q with shape (B, N, S, H)
const void* k, // transposed K with shape (B, N, S, H)
const void* v, // transposed V with shape (B, N, S, H)
const void* attention_mask, // attention mask with shape (B, S), with value 0.0 not masked, and -10000.0 masked.
const void* global_q, // Q for global tokens with shape (B, N, S, H)
const void* global_k, // K for global tokens with shape (B, N, S, H)
const void* global_v, // V for global tokens with shape (B, N, S, H)
const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global.
const int* global_index, // Global index with shape (B, S)
const int* batch_global_num, // Number of global tokens per batch with shape (B, 1)
void* pinned_buffer, // Pinned memory in CPU. Number of global tokens per batch with shape (B, 1)
void* output, // output with shape (B, N, S, H)
float scaler, // scalar
int batch_size, // batch size
int sequence_length, // sequence length
int num_heads, // number of heads
int head_size, // hidden size per head
int attention_window, // one sided windows size
size_t element_size) { // size of element: 2 for half, and 4 for float
bool is_fp16 = (element_size == 2);
void* scratch1 = reinterpret_cast<char*>(workspace);
size_t scratch1_size = GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, sequence_length);
void* scratch2 = reinterpret_cast<char*>(scratch1) + scratch1_size;
// setup shared parameters for two strided batched matrix multiplies
hipDataType Atype;
hipDataType Btype;
hipDataType Ctype;
hipDataType resultType;
hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT;
__half one_fp16, zero_fp16;
float one_fp32, zero_fp32;
void *alpha, *beta_0, *beta_1;
if (is_fp16) {
one_fp16 = __float2half(1.f);
zero_fp16 = __float2half(0.f);
alpha = static_cast<void*>(&one_fp16);
beta_0 = static_cast<void*>(&zero_fp16);
beta_1 = static_cast<void*>(&one_fp16);
Atype = HIP_R_16F;
Btype = HIP_R_16F;
Ctype = HIP_R_16F;
resultType = HIP_R_16F;
algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
} else {
one_fp32 = 1.f;
zero_fp32 = 0.f;
alpha = static_cast<void*>(&one_fp32);
beta_0 = static_cast<void*>(&zero_fp32);
beta_1 = static_cast<void*>(&one_fp32);
Atype = HIP_R_32F;
Btype = HIP_R_32F;
Ctype = HIP_R_32F;
resultType = HIP_R_32F;
}
// Strided batch matrix multiply
// qk = q * k^T
// Shapes: q and k = B x N x S x H, qk = B x N x S x S
// Convert col-major to row-major by swapping q and k in Gemm
// Local attention part
// S x S is calculated using sliding block WxW (W is one sided window size) like the following:
// [W][W]
// [W][W][W]
// [W][W][W]
// [W][W]
// The first and last rows have 2 blocks, and the remaining has 3 blocks per row.
// The calculation are splited into 3 parts: Fill the middle rows, then the first row and finally the last row.
// The results are stored in scratch1.
int w = attention_window;
int x_offset = num_heads * sequence_length * head_size;
int y_offset = num_heads * sequence_length * sequence_length;
int last_block = (sequence_length / w) - 1;
int strideA = sequence_length * head_size;
int strideB = sequence_length * head_size;
int strideC = sequence_length * sequence_length;
// When S == 2W, there is no middle rows of blocks:
// [W][W]
// [W][W]
// We can use normal matrix multiplication in this case.
if (sequence_length == 2 * w) {
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
sequence_length,
sequence_length,
head_size,
alpha,
k,
Atype,
head_size,
sequence_length * head_size,
q,
Btype,
head_size,
sequence_length * head_size,
beta_0,
scratch1,
Ctype,
sequence_length,
sequence_length * sequence_length,
batch_size * num_heads,
resultType,
algo));
} else { // sequence_length > 2 * w
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_heads; ++j) {
const void* q_head = reinterpret_cast<const char*>(q) + \
(i * x_offset + j * sequence_length * head_size + w * head_size) * element_size;
const void* k_head = reinterpret_cast<const char*>(k) + (i * x_offset + j * sequence_length * head_size) * element_size;
void* qk_head = reinterpret_cast<char*>(scratch1) + \
(i * y_offset + j * sequence_length * sequence_length + w * sequence_length) * element_size;
int count = (sequence_length - 2 * w) / w;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
3 * w, // m
w, // n
head_size, // k
alpha, // alpha
k_head, // A
Atype, // A type
head_size, // lda
w * head_size, // strideA
q_head, // B
Btype, // B type
head_size, // ldb
w * head_size, // strideB
beta_0, // beta
qk_head, // C
Ctype, // C type
sequence_length, // ldc
sequence_length * w + w, // strideC
count, // batch count
resultType,
algo));
}
}
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
2 * w, // m
w, // n
head_size, // k
alpha, // alpha
k, // A
Atype, // A type
head_size, // lda
strideA, // strideA
q, // B
Btype, // B type
head_size, // ldb
strideB, // strideB
beta_0, // beta
scratch1, // C
Ctype, // C type
sequence_length, // ldc
strideC, // strideC
batch_size * num_heads, // batch count
resultType,
algo));
const void* q_head = reinterpret_cast<const char*>(q) + (last_block * w * head_size) * element_size;
const void* k_head = reinterpret_cast<const char*>(k) + ((last_block - 1) * w * head_size) * element_size;
void* qk_head = reinterpret_cast<char*>(scratch1) + \
(last_block * w * sequence_length + (last_block - 1) * w) * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
2 * w,
w,
head_size,
alpha,
k_head,
Atype,
head_size,
strideA,
q_head,
Btype,
head_size,
strideB,
beta_0,
qk_head,
Ctype,
sequence_length,
strideC,
batch_size * num_heads,
resultType,
algo));
}
const int* batch_global_count = reinterpret_cast<const int*>(pinned_buffer);
// Global attention part
for (int i = 0; i < batch_size; ++i) {
if (batch_global_count[i] > 0) {
const void* q_batch = reinterpret_cast<const char*>(q) + (i * x_offset) * element_size;
const void* k_batch = reinterpret_cast<const char*>(k) + (i * x_offset) * element_size;
void* qk_batch = reinterpret_cast<char*>(scratch1) + (i * y_offset) * element_size;
// Local tokens attending global tokens
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
batch_global_count[i],
sequence_length,
head_size,
alpha,
k_batch,
Atype,
head_size,
strideA,
q_batch,
Btype,
head_size,
strideB,
beta_0,
qk_batch,
Ctype,
sequence_length,
strideC,
num_heads,
resultType,
algo));
const void* global_q_batch = reinterpret_cast<const char*>(global_q) + \
(i * num_heads * sequence_length * head_size) * element_size;
const void* global_k_batch = reinterpret_cast<const char*>(global_k) + (i * x_offset) * element_size;
int strideB_global = sequence_length * head_size;
// Global tokens attending everything
// This GEMMs need to be last to make sure all global token entries are re-written.
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
sequence_length,
batch_global_count[i],
head_size,
alpha,
global_k_batch,
Atype,
head_size,
strideA,
global_q_batch,
Btype,
head_size,
strideB_global,
beta_0,
qk_batch,
Ctype,
sequence_length,
strideC,
num_heads,
resultType,
algo));
}
}
int dim0 = sequence_length * num_heads;
int dim1 = sequence_length;
void* softmax_out = scratch2;
const int blockSize = 64;
const int gridSize = batch_size * num_heads * sequence_length;
if (is_fp16) {
hipLaunchKernelGGL(( LongformerSoftmaxSimpleKernel<__half, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream,
global_attention,
global_index,
batch_global_num,
static_cast<const __half*>(scratch1),
static_cast<const __half*>(attention_mask),
static_cast<__half*>(softmax_out), scaler, dim0, dim1, attention_window);
} else {
hipLaunchKernelGGL(( LongformerSoftmaxSimpleKernel<float, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream,
global_attention,
global_index,
batch_global_num,
static_cast<const float*>(scratch1),
static_cast<const float*>(attention_mask),
static_cast<float*>(softmax_out), scaler, dim0, dim1, attention_window);
}
// Run the matrix multiply: output = softmax_out * v
// softmax_out: B x N x S x S
// v: B x N x S x H
// attn_out: B x N x S x H
// Calculation uses full Gemm (S == 2W) or sliding blocks (S > 2W) in a way similar to local attention part.
if (sequence_length == 2 * w) {
// convert col-major to row-major by swapping softmax_out and v
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size,
sequence_length,
sequence_length,
alpha,
v,
Atype,
head_size,
sequence_length * head_size,
softmax_out,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0,
output,
Ctype,
head_size,
sequence_length * head_size,
batch_size * num_heads,
resultType,
algo));
} else { // sequence_length > 2 * w
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_heads; ++j) {
const void* v_head = reinterpret_cast<const char*>(v) + \
(i * x_offset + j * head_size * sequence_length) * element_size;
const void* prob_head = reinterpret_cast<const char*>(softmax_out) + \
(i * y_offset + j * sequence_length * sequence_length + w * sequence_length) * element_size;
void* out_head = reinterpret_cast<char*>(output) + \
(i * x_offset + j * head_size * sequence_length + w * head_size) * element_size;
int count = (sequence_length - 2 * w) / w;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size,
w,
3 * w,
alpha,
v_head,
Atype,
head_size,
w * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * w + w,
beta_0,
out_head,
Ctype,
head_size,
w * head_size,
count,
resultType,
algo));
}
}
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size,
w,
2 * w,
alpha,
v,
Atype,
head_size,
sequence_length * head_size,
softmax_out,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0,
output,
Ctype,
head_size,
sequence_length * head_size,
batch_size * num_heads,
resultType,
algo));
const void* v_head = reinterpret_cast<const char*>(v) + (last_block - 1) * w * head_size * element_size;
const void* prob_head = reinterpret_cast<const char*>(softmax_out) + \
(sequence_length * last_block * w + (last_block - 1) * w) * element_size;
void* out_head = reinterpret_cast<char*>(output) + last_block * w * head_size * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size,
w,
2 * w,
alpha,
v_head,
Atype,
head_size,
sequence_length * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0,
out_head,
Ctype,
head_size,
sequence_length * head_size,
batch_size * num_heads,
resultType,
algo));
}
for (int i = 0; i < batch_size; ++i) {
if (batch_global_count[i] > 0) {
int glob_longdim_mm = (last_block - 1) * w;
const void* v_head = reinterpret_cast<const char*>(v) + (i * x_offset) * element_size;
const void* prob_head = reinterpret_cast<const char*>(softmax_out) + \
(i * y_offset + 2 * w * sequence_length) * element_size;
void* out_head = reinterpret_cast<char*>(output) + (i * x_offset + 2 * w * head_size) * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size,
glob_longdim_mm,
batch_global_count[i],
alpha,
v_head,
Atype,
head_size,
sequence_length * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_1,
out_head,
Ctype,
head_size,
sequence_length * head_size,
num_heads,
resultType,
algo));
// Global tokens
v_head = reinterpret_cast<const char*>(global_v) + (i * x_offset) * element_size;
prob_head = reinterpret_cast<const char*>(softmax_out) + (i * y_offset) * element_size;
out_head = reinterpret_cast<char*>(output) + (i * x_offset) * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size,
batch_global_count[i],
sequence_length, // Re-write entries completely
alpha,
v_head,
Atype,
head_size,
sequence_length * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0, // Use beta=0 to overwrite
out_head, // Here assumes global tokens are at the beginning of sequence.
Ctype,
head_size,
sequence_length * head_size,
num_heads,
resultType,
algo));
}
}
return true;
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| b7298a5579706f808eebfef5ba6567f953632eb2.cu | /*
Copyright (c) NVIDIA Corporation and Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This is cuda kernels for longformer attention softmax that does not use compact memory.
// It uses two temporary matrix of BxNxSxS, and consumes more memory when sequence length is large.
// Its logic is simpler with less constraints (like number of global tokens could be larger than attention windows).
#include <cub/cub.cuh>
#include <cublas_v2.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <math_constants.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "longformer_attention_softmax.h"
#include "attention_impl.h"
using namespace onnxruntime::cuda;
using namespace cub;
#define CHECK(expr) \
if (!CUBLAS_CALL(expr)) { \
return false; \
}
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <typename T, int blockSize>
__launch_bounds__(blockSize)
__global__ void LongformerSoftmaxSimpleKernel(const int* global_attention,
const int* global_index,
const int* batch_global_num,
const T* input,
const T* attention_mask,
T* output,
float scaler,
int dim0,
int sequence_length,
int attention_window) {
typedef cub::BlockReduce<float, blockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage block_reduce_temp;
__shared__ float max_shared;
__shared__ float sum_shared;
const T* input_block = input + sequence_length * blockIdx.x;
T* output_block = output + sequence_length * blockIdx.x;
const int batch_index = blockIdx.x / dim0;
const int row_index = blockIdx.x % sequence_length;
const int global_num = batch_global_num[batch_index];
// To be consistent with Huggingface Longformer, the row of maksed word are set as zero.
if ((float)attention_mask[batch_index * sequence_length + row_index] < 0.0f) {
for (int i = threadIdx.x; i < sequence_length; i += blockSize) {
output_block[i] = (T)(0);
}
return;
}
// local attention token
int col_start = 0;
int col_end = sequence_length;
bool is_local_row = (global_attention[batch_index * sequence_length + row_index] == (int)0);
if (is_local_row) {
col_start = row_index - attention_window;
if (col_start < 0) {
col_start = 0;
}
col_end = row_index + attention_window + 1;
if (col_end > sequence_length) {
col_end = sequence_length;
}
}
const T* mask_block = attention_mask + sequence_length * batch_index;
int tid = threadIdx.x;
// calculate max input
float max_input = -CUDART_INF_F;
// #pragma unroll 16
for (int i = tid + col_start; i < col_end; i += blockSize) {
float x = input_block[i];
x = x * scaler + (float)mask_block[i];
if (max_input < x) {
max_input = x;
}
}
if (is_local_row) {
for (int g = tid; g < global_num; g += blockSize) {
int i = global_index[g];
if (i < col_start || i >= col_end) {
float x = input_block[i];
x = x * scaler + (float)mask_block[i];
if (max_input < x) {
max_input = x;
}
}
}
}
float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, cub::Max());
if (tid == 0) {
max_shared = max_block;
}
__syncthreads();
float sum_input = 0.f;
// #pragma unroll 16
for (int i = tid + col_start; i < col_end; i += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
sum_input += x;
}
if (is_local_row) {
for (int g = tid; g < global_num; g += blockSize) {
int i = global_index[g];
if (i < col_start || i >= col_end) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
sum_input += x;
}
}
}
float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, cub::Sum());
if (tid == 0) {
sum_shared = sum_block;
}
__syncthreads();
float recip_sum = 1.f / sum_shared;
if (is_local_row) {
// We only need to fill in zeros for blocks that will be used in the matrix multiplication
// following the Softmax.
//
// For now zero-out only [row_index - 2*attention_window, row_index + 2*attention_window],
// we can even be more agressive and reduce the zeroing out window size since
// each row has entries in 3 blocks (3*attention_window size instead of 4*attention_window)
int zero_start = row_index - 2 * attention_window;
if (zero_start < 0) {
zero_start = 0;
}
int zero_end = row_index + 2 * attention_window;
if (zero_end > sequence_length) {
zero_end = sequence_length;
}
for (int i = tid + zero_start; i < zero_end; i += blockSize) {
if (i < col_start || i >= col_end) {
output_block[i] = (T)(0.);
}
}
}
__syncthreads();
if (is_local_row) {
for (int g = tid; g < global_num; g += blockSize) {
int i = global_index[g];
if (i < col_start || i >= col_end) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
output_block[i] = (T)(recip_sum * x);
}
}
}
// #pragma unroll 16
for (int i = tid + col_start; i < col_end; i += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
output_block[i] = (T)(recip_sum * x);
}
}
// Launch the softmax kernel for non compact memory.
bool LaunchLongformerSoftmaxSimpleKernel(
cudaStream_t stream,
cublasHandle_t cublas,
void* workspace, // softmax space
const void* q, // transposed Q with shape (B, N, S, H)
const void* k, // transposed K with shape (B, N, S, H)
const void* v, // transposed V with shape (B, N, S, H)
const void* attention_mask, // attention mask with shape (B, S), with value 0.0 not masked, and -10000.0 masked.
const void* global_q, // Q for global tokens with shape (B, N, S, H)
const void* global_k, // K for global tokens with shape (B, N, S, H)
const void* global_v, // V for global tokens with shape (B, N, S, H)
const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global.
const int* global_index, // Global index with shape (B, S)
const int* batch_global_num, // Number of global tokens per batch with shape (B, 1)
void* pinned_buffer, // Pinned memory in CPU. Number of global tokens per batch with shape (B, 1)
void* output, // output with shape (B, N, S, H)
float scaler, // scalar
int batch_size, // batch size
int sequence_length, // sequence length
int num_heads, // number of heads
int head_size, // hidden size per head
int attention_window, // one sided windows size
size_t element_size) { // size of element: 2 for half, and 4 for float
bool is_fp16 = (element_size == 2);
void* scratch1 = reinterpret_cast<char*>(workspace);
size_t scratch1_size = GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, sequence_length);
void* scratch2 = reinterpret_cast<char*>(scratch1) + scratch1_size;
// setup shared parameters for two strided batched matrix multiplies
cudaDataType_t Atype;
cudaDataType_t Btype;
cudaDataType_t Ctype;
cudaDataType_t resultType;
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
__half one_fp16, zero_fp16;
float one_fp32, zero_fp32;
void *alpha, *beta_0, *beta_1;
if (is_fp16) {
one_fp16 = __float2half(1.f);
zero_fp16 = __float2half(0.f);
alpha = static_cast<void*>(&one_fp16);
beta_0 = static_cast<void*>(&zero_fp16);
beta_1 = static_cast<void*>(&one_fp16);
Atype = CUDA_R_16F;
Btype = CUDA_R_16F;
Ctype = CUDA_R_16F;
resultType = CUDA_R_16F;
algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
} else {
one_fp32 = 1.f;
zero_fp32 = 0.f;
alpha = static_cast<void*>(&one_fp32);
beta_0 = static_cast<void*>(&zero_fp32);
beta_1 = static_cast<void*>(&one_fp32);
Atype = CUDA_R_32F;
Btype = CUDA_R_32F;
Ctype = CUDA_R_32F;
resultType = CUDA_R_32F;
}
// Strided batch matrix multiply
// qk = q * k^T
// Shapes: q and k = B x N x S x H, qk = B x N x S x S
// Convert col-major to row-major by swapping q and k in Gemm
// Local attention part
// S x S is calculated using sliding block WxW (W is one sided window size) like the following:
// [W][W]
// [W][W][W]
// [W][W][W]
// [W][W]
// The first and last rows have 2 blocks, and the remaining has 3 blocks per row.
// The calculation are splited into 3 parts: Fill the middle rows, then the first row and finally the last row.
// The results are stored in scratch1.
int w = attention_window;
int x_offset = num_heads * sequence_length * head_size;
int y_offset = num_heads * sequence_length * sequence_length;
int last_block = (sequence_length / w) - 1;
int strideA = sequence_length * head_size;
int strideB = sequence_length * head_size;
int strideC = sequence_length * sequence_length;
// When S == 2W, there is no middle rows of blocks:
// [W][W]
// [W][W]
// We can use normal matrix multiplication in this case.
if (sequence_length == 2 * w) {
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
sequence_length,
sequence_length,
head_size,
alpha,
k,
Atype,
head_size,
sequence_length * head_size,
q,
Btype,
head_size,
sequence_length * head_size,
beta_0,
scratch1,
Ctype,
sequence_length,
sequence_length * sequence_length,
batch_size * num_heads,
resultType,
algo));
} else { // sequence_length > 2 * w
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_heads; ++j) {
const void* q_head = reinterpret_cast<const char*>(q) + \
(i * x_offset + j * sequence_length * head_size + w * head_size) * element_size;
const void* k_head = reinterpret_cast<const char*>(k) + (i * x_offset + j * sequence_length * head_size) * element_size;
void* qk_head = reinterpret_cast<char*>(scratch1) + \
(i * y_offset + j * sequence_length * sequence_length + w * sequence_length) * element_size;
int count = (sequence_length - 2 * w) / w;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
3 * w, // m
w, // n
head_size, // k
alpha, // alpha
k_head, // A
Atype, // A type
head_size, // lda
w * head_size, // strideA
q_head, // B
Btype, // B type
head_size, // ldb
w * head_size, // strideB
beta_0, // beta
qk_head, // C
Ctype, // C type
sequence_length, // ldc
sequence_length * w + w, // strideC
count, // batch count
resultType,
algo));
}
}
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
2 * w, // m
w, // n
head_size, // k
alpha, // alpha
k, // A
Atype, // A type
head_size, // lda
strideA, // strideA
q, // B
Btype, // B type
head_size, // ldb
strideB, // strideB
beta_0, // beta
scratch1, // C
Ctype, // C type
sequence_length, // ldc
strideC, // strideC
batch_size * num_heads, // batch count
resultType,
algo));
const void* q_head = reinterpret_cast<const char*>(q) + (last_block * w * head_size) * element_size;
const void* k_head = reinterpret_cast<const char*>(k) + ((last_block - 1) * w * head_size) * element_size;
void* qk_head = reinterpret_cast<char*>(scratch1) + \
(last_block * w * sequence_length + (last_block - 1) * w) * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
2 * w,
w,
head_size,
alpha,
k_head,
Atype,
head_size,
strideA,
q_head,
Btype,
head_size,
strideB,
beta_0,
qk_head,
Ctype,
sequence_length,
strideC,
batch_size * num_heads,
resultType,
algo));
}
const int* batch_global_count = reinterpret_cast<const int*>(pinned_buffer);
// Global attention part
for (int i = 0; i < batch_size; ++i) {
if (batch_global_count[i] > 0) {
const void* q_batch = reinterpret_cast<const char*>(q) + (i * x_offset) * element_size;
const void* k_batch = reinterpret_cast<const char*>(k) + (i * x_offset) * element_size;
void* qk_batch = reinterpret_cast<char*>(scratch1) + (i * y_offset) * element_size;
// Local tokens attending global tokens
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
batch_global_count[i],
sequence_length,
head_size,
alpha,
k_batch,
Atype,
head_size,
strideA,
q_batch,
Btype,
head_size,
strideB,
beta_0,
qk_batch,
Ctype,
sequence_length,
strideC,
num_heads,
resultType,
algo));
const void* global_q_batch = reinterpret_cast<const char*>(global_q) + \
(i * num_heads * sequence_length * head_size) * element_size;
const void* global_k_batch = reinterpret_cast<const char*>(global_k) + (i * x_offset) * element_size;
int strideB_global = sequence_length * head_size;
// Global tokens attending everything
// This GEMMs need to be last to make sure all global token entries are re-written.
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
sequence_length,
batch_global_count[i],
head_size,
alpha,
global_k_batch,
Atype,
head_size,
strideA,
global_q_batch,
Btype,
head_size,
strideB_global,
beta_0,
qk_batch,
Ctype,
sequence_length,
strideC,
num_heads,
resultType,
algo));
}
}
int dim0 = sequence_length * num_heads;
int dim1 = sequence_length;
void* softmax_out = scratch2;
const int blockSize = 64;
const int gridSize = batch_size * num_heads * sequence_length;
if (is_fp16) {
LongformerSoftmaxSimpleKernel<__half, blockSize><<<gridSize, blockSize, 0, stream>>>(
global_attention,
global_index,
batch_global_num,
static_cast<const __half*>(scratch1),
static_cast<const __half*>(attention_mask),
static_cast<__half*>(softmax_out), scaler, dim0, dim1, attention_window);
} else {
LongformerSoftmaxSimpleKernel<float, blockSize><<<gridSize, blockSize, 0, stream>>>(
global_attention,
global_index,
batch_global_num,
static_cast<const float*>(scratch1),
static_cast<const float*>(attention_mask),
static_cast<float*>(softmax_out), scaler, dim0, dim1, attention_window);
}
// Run the matrix multiply: output = softmax_out * v
// softmax_out: B x N x S x S
// v: B x N x S x H
// attn_out: B x N x S x H
// Calculation uses full Gemm (S == 2W) or sliding blocks (S > 2W) in a way similar to local attention part.
if (sequence_length == 2 * w) {
// convert col-major to row-major by swapping softmax_out and v
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size,
sequence_length,
sequence_length,
alpha,
v,
Atype,
head_size,
sequence_length * head_size,
softmax_out,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0,
output,
Ctype,
head_size,
sequence_length * head_size,
batch_size * num_heads,
resultType,
algo));
} else { // sequence_length > 2 * w
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_heads; ++j) {
const void* v_head = reinterpret_cast<const char*>(v) + \
(i * x_offset + j * head_size * sequence_length) * element_size;
const void* prob_head = reinterpret_cast<const char*>(softmax_out) + \
(i * y_offset + j * sequence_length * sequence_length + w * sequence_length) * element_size;
void* out_head = reinterpret_cast<char*>(output) + \
(i * x_offset + j * head_size * sequence_length + w * head_size) * element_size;
int count = (sequence_length - 2 * w) / w;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size,
w,
3 * w,
alpha,
v_head,
Atype,
head_size,
w * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * w + w,
beta_0,
out_head,
Ctype,
head_size,
w * head_size,
count,
resultType,
algo));
}
}
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size,
w,
2 * w,
alpha,
v,
Atype,
head_size,
sequence_length * head_size,
softmax_out,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0,
output,
Ctype,
head_size,
sequence_length * head_size,
batch_size * num_heads,
resultType,
algo));
const void* v_head = reinterpret_cast<const char*>(v) + (last_block - 1) * w * head_size * element_size;
const void* prob_head = reinterpret_cast<const char*>(softmax_out) + \
(sequence_length * last_block * w + (last_block - 1) * w) * element_size;
void* out_head = reinterpret_cast<char*>(output) + last_block * w * head_size * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size,
w,
2 * w,
alpha,
v_head,
Atype,
head_size,
sequence_length * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0,
out_head,
Ctype,
head_size,
sequence_length * head_size,
batch_size * num_heads,
resultType,
algo));
}
for (int i = 0; i < batch_size; ++i) {
if (batch_global_count[i] > 0) {
int glob_longdim_mm = (last_block - 1) * w;
const void* v_head = reinterpret_cast<const char*>(v) + (i * x_offset) * element_size;
const void* prob_head = reinterpret_cast<const char*>(softmax_out) + \
(i * y_offset + 2 * w * sequence_length) * element_size;
void* out_head = reinterpret_cast<char*>(output) + (i * x_offset + 2 * w * head_size) * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size,
glob_longdim_mm,
batch_global_count[i],
alpha,
v_head,
Atype,
head_size,
sequence_length * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_1,
out_head,
Ctype,
head_size,
sequence_length * head_size,
num_heads,
resultType,
algo));
// Global tokens
v_head = reinterpret_cast<const char*>(global_v) + (i * x_offset) * element_size;
prob_head = reinterpret_cast<const char*>(softmax_out) + (i * y_offset) * element_size;
out_head = reinterpret_cast<char*>(output) + (i * x_offset) * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size,
batch_global_count[i],
sequence_length, // Re-write entries completely
alpha,
v_head,
Atype,
head_size,
sequence_length * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0, // Use beta=0 to overwrite
out_head, // Here assumes global tokens are at the beginning of sequence.
Ctype,
head_size,
sequence_length * head_size,
num_heads,
resultType,
algo));
}
}
return true;
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
d28174376d7064d8b4562042d6dcefd4f1894654.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void differenceImg_gpu()
{
} | d28174376d7064d8b4562042d6dcefd4f1894654.cu | #include "includes.h"
__global__ void differenceImg_gpu()
{
} |
db7a4ba33426103fa76e534234327ead1b9147ba.hip | // !!! This is a file automatically generated by hipify!!!
#include <unistd.h>
#include <time.h>
#include <string.h>
#include <unistd.h>
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h>
#include "elastic_kernel.h"
#include <hip/hip_runtime_api.h>
typedef struct{
double start_time;
double end_time;
}t_ktime;
// Applications is composed of one or several kernels
typedef struct{
int num_kernels;
int index;
t_Kernel kid[8]; // Max: 8 kernels per application
t_kernel_stub* kstubs[8]; // One kernel stub per kernel
}t_application;
// Tables to store results for solo exectuions
t_smk_solo *smk_solo; //
t_smt_solo *smt_solo;
// Tables to store coexecution results
t_smk_coBlocks **smk_conc;
t_smt_coBlocks **smt_conc; //tpms of each kernel in coexection
// Table to store better speedups in coexecution
t_co_speedup **smk_best_sp;
t_co_speedup **smt_best_sp;
int read_profling_tables()
{
FILE *fp;
if ((fp = fopen("profiling_table.bin", "r")) == NULL) {
printf("Cannot read file\n");
return -1;
}
// Number of kernels
int n = Number_of_Kernels-1;
fread (&n, 1, sizeof(int), fp);
// Create t_smk_solo smk_info_solo[]
smk_solo = (t_smk_solo *)calloc(n, sizeof(t_smk_solo));
// Load t_smk_solo smk_solo[]
for (int i=0; i<n; i++){
fread(&smk_solo[i].num_configs, 1, sizeof(int), fp);
smk_solo[i].tpms = (double *)calloc(smk_solo[i].num_configs, sizeof(double));
fread(smk_solo[i].tpms, smk_solo[i].num_configs, sizeof(double), fp);
}
// Create t_smt_solo smt_solo[]
smt_solo = (t_smt_solo *)calloc(n, sizeof(t_smt_solo));
// Load t_smt_solo smt_info_solo
for (int i=0; i<n; i++){
fread(&smt_solo[i].num_configs, 1, sizeof(int), fp);
smt_solo[i].tpms = (double *)calloc(smt_solo[i].num_configs, sizeof(double));
fread(smt_solo[i].tpms, smt_solo[i].num_configs, sizeof(double), fp);
}
// Create t_smk_coBlocks smk_conc
smk_conc = (t_smk_coBlocks **)calloc(n, sizeof(t_smk_coBlocks *));
for (int i=0; i<n; i++)
smk_conc[i] = (t_smk_coBlocks *)calloc(n, sizeof(t_smk_coBlocks));
//Load t_smk_coBlocks smk_conc
for (int i=0; i<n; i++)
for (int j=0; j<n; j++) {
fread(smk_conc[i][j].kid, 2, sizeof(t_Kernel), fp);
fread(&smk_conc[i][j].num_configs, 1, sizeof(int), fp);
smk_conc[i][j].pairs = (int **)calloc(smk_conc[i][j].num_configs, sizeof(int *));
for (int k=0; k<smk_conc[i][j].num_configs; k++)
smk_conc[i][j].pairs[k] = (int *)calloc(2, sizeof(int));
for (int k=0; k<smk_conc[i][j].num_configs; k++)
fread(smk_conc[i][j].pairs[k], 2, sizeof(int), fp);
smk_conc[i][j].tpms = (double **)calloc(smk_conc[i][j].num_configs, sizeof(double *));
for (int k=0; k<smk_conc[i][j].num_configs; k++)
smk_conc[i][j].tpms[k] = (double *)calloc(2, sizeof(double));
for (int k=0; k<smk_conc[i][j].num_configs; k++)
fread(smk_conc[i][j].tpms[k], 2, sizeof(double), fp);
}
// Create t_smt_coBlocks smt_conc
smt_conc = (t_smt_coBlocks **)calloc(n, sizeof(t_smt_coBlocks *));
for (int i=0; i<n; i++)
smt_conc[i] = (t_smt_coBlocks *)calloc(n, sizeof(t_smt_coBlocks));
//Load t_smt_coBlocks smt_conc
for (int i=0; i<n; i++)
for (int j=0; j<n; j++) {
fread(smt_conc[i][j].kid, 2, sizeof(t_Kernel), fp);
fread(&smt_conc[i][j].num_configs, 1, sizeof(int), fp);
smt_conc[i][j].pairs = (int **)calloc(smt_conc[i][j].num_configs, sizeof(int *));
for (int k=0; k<smt_conc[i][j].num_configs; k++)
smt_conc[i][j].pairs[k] = (int *)calloc(2, sizeof(int));
for (int k=0; k<smt_conc[i][j].num_configs; k++)
fread(smt_conc[i][j].pairs[k], 2, sizeof(int), fp);
smt_conc[i][j].tpms = (double **)calloc(smt_conc[i][j].num_configs, sizeof(double *));
for (int k=0; k<smt_conc[i][j].num_configs; k++)
smt_conc[i][j].tpms[k] = (double *)calloc(2, sizeof(double));
for (int k=0; k<smt_conc[i][j].num_configs; k++)
fread(smt_conc[i][j].tpms[k], 2, sizeof(double), fp);
}
// Create t_co_speedup smk_best_sp
smk_best_sp = (t_co_speedup **)calloc(n, sizeof(t_co_speedup *));
for (int i=0; i<n; i++)
smk_best_sp[i] = (t_co_speedup *)calloc(n, sizeof(t_co_speedup));
// Load t_co_speedup smk_best_sp
for (int i=0; i<n; i++)
for (int j=0; j<n; j++) {
fread(smk_best_sp[i][j].pairs, 2, sizeof(int), fp);
fread(&smk_best_sp[i][j].speedup, 1, sizeof(double), fp);
}
// Create t_co_speedup smt_best_sp
smt_best_sp = (t_co_speedup **)calloc(n, sizeof(t_co_speedup *));
for (int i=0; i<n; i++)
smt_best_sp[i] = (t_co_speedup *)calloc(n, sizeof(t_co_speedup));
// Load t_co_speedup smt_best_sp
for (int i=0; i<n; i++)
for (int j=0; j<n; j++) {
fread(smt_best_sp[i][j].pairs, 2, sizeof(int), fp);
fread(&smt_best_sp[i][j].speedup, 1, sizeof(double), fp);
}
fclose(fp);
return 0;
}
int alloc_HtD_tranfers(t_application *applications, int num_applications)
{
for (int i=0; i<num_applications; i++) {
for (int j=0; j < applications[i].num_kernels; j++){
(applications[i].kstubs[j]->startMallocs)((void *)(applications[i].kstubs[j]));
(applications[i].kstubs[j]->startTransfers)((void *)(applications[i].kstubs[j]));
}
}
return 0;
}
int nocke_all_applications(t_application *applications, int num_applications, t_ktime *ktime)
{
struct timespec now;
hipStream_t * exec_stream = (hipStream_t *)malloc(sizeof(hipStream_t));
//kstr->kstub->kconf.max_persistent_blocks = 1; // Only a block per SM will be launched by each stream
int idSMs[2];
idSMs[0]=0;
// Launch streams
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++) {
idSMs[1] = applications[i].kstubs[j]->kconf.numSMs-1;
applications[i].kstubs[j]->idSMs = idSMs;
/*kstub->execution_s = exec_stream;
int cont_task;
t_kernel_stub *kstub = applications[i].kstubs[j];
hipMemcpy(&cont_task, kstub->d_executed_tasks, sizeof(int), hipMemcpyDeviceToHost);
State state[MAX_STREAMS_PER_KERNEL];
hipMemcpy(state, kstub->gm_state, sizeof(State) * MAX_STREAMS_PER_KERNEL, hipMemcpyDeviceToHost);
printf("Kid= %d, cont=%d, Status=%d, numSMs=%d, Max_pers=%d, minSM=%d, maxSM=%d ttasks=%d coar=%d index=%d atream=%lld\n", kstub->id, cont_task, state[0], kstub->kconf.numSMs,
kstub->kconf.max_persistent_blocks,kstub->idSMs[0],kstub->idSMs[1],kstub->total_tasks,
kstub->kconf.coarsening, kstub->stream_index, kstub->execution_s);
*/
(applications[i].kstubs[j]->launchCKEkernel)(applications[i].kstubs[j]);
clock_gettime(CLOCK_REALTIME, &now);
ktime[applications[i].kstubs[j]->id].start_time = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &now);
ktime[applications[i].kstubs[j]->id].end_time = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
}
// Reset task counter
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++)
hipMemset(applications[i].kstubs[j]->d_executed_tasks, 0, sizeof(int));
return 0;
}
// Assign a kstub to a kstreams already create;
int assing_kstreams(t_kernel_stub *kstub, t_kstreams *kstr)
{
kstr->kstub = kstub;
kstr->num_streams = 0;
kstr->save_cont_tasks = 0;
return 0;
}
// Given a kernel seach for the best partner (highest speeup in coexec) from the kernel ready list
int new_get_best_partner(t_Kernel curr_kid, t_Kernel *kid, State *k_done, float **bad_partner, int num_applications, t_Kernel *select_kid, int *select_index, int *b0, int *b1)
{
double best_perf = -1.0;
int best_index;
t_Kernel best_kid;
for (int i=0; i<num_applications; i++){ // For the remainning kernels
if (k_done[i] == READY){ // If kernel has not been executed
t_co_speedup *info = &smk_best_sp[curr_kid][kid[i]];
if (info->speedup > best_perf) { // Search for best partnet (highest speedup in coexec) among ready kernels
best_perf = info->speedup;
best_index = i;
best_kid = kid[i];
}
}
}
if (best_perf >=MIN_SPEEDUP) {
*select_kid = best_kid;
*select_index = best_index;
*b0 = smk_best_sp[curr_kid][best_kid].pairs[0];
*b1 = smk_best_sp[curr_kid][best_kid].pairs[1];
}else{
*select_kid = EMPTY; // Indicate no coexecution
*b0 = smk_solo[curr_kid].num_configs; // If performace is low the running kernel is executed with all the blocks
}
return 0;
}
int new_find_first_kernel( State *k_done, int *index, int num_kernels)
{
int i;
for (i=0;i<num_kernels; i++){
if (k_done[i] == READY) {
*index = i;
return 0;
}
}
*index = -1; // No available kernel found
return 0;
}
// greedy_scheduler is oriented to reduce the makespan of a set of applications
// Assumning a list of ready kernels, this scheduler selects a pair of kernels to be coexecuted
// First pair selection is based on the kernels that achived the highest speedup. W
// Then when one of the two kernels finishe, the next ready one having tje kighest speepup when coexecutued with the alreadu running is selected.
// Partial evition and adding of streams is performed to establshed the adequated resource assignement to each running kernel
int greedy_coexecution(int deviceId)
{
hipError_t err;
struct timespec now;
// Select device
hipSetDevice(deviceId);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, deviceId);
printf("Device=%s\n", deviceProp.name);
// Create sched structure
t_sched sched;
create_sched(&sched);
// Load profilinf tables
read_profling_tables();
// Important for reducing the number od streams launched by RCONV and CCONV
// Profiling tables are modified for RCONV to reduce the number of concurrent streams
int n = Number_of_Kernels-1;
for (int i=0; i<n; i++) {
smk_best_sp[RCONV][i].pairs[0] = smk_best_sp[RCONV][i].pairs[0]/2;
smk_best_sp[i][RCONV].pairs[1] = smk_best_sp[i][RCONV].pairs[1]/2;
}
smk_solo[RCONV].num_configs = smk_solo[RCONV].num_configs /2;
// Aplications
int num_applications=13;
t_application *applications = (t_application *)calloc(num_applications, sizeof(t_application));
applications[0].num_kernels = 1; applications[0].kid[0] = VA;
applications[1].num_kernels = 1; applications[1].kid[0] = MM;
applications[2].num_kernels = 1; applications[2].kid[0] = RCONV;
applications[3].num_kernels = 1; applications[3].kid[0] = CCONV;
applications[4].num_kernels = 1; applications[4].kid[0] = HST256;
applications[5].num_kernels = 1; applications[5].kid[0] = Reduction;
applications[6].num_kernels = 1; applications[6].kid[0] = PF;
applications[7].num_kernels = 1; applications[7].kid[0] = BS;
applications[8].num_kernels = 1; applications[8].kid[0] = SPMV_CSRscalar;
applications[9].num_kernels = 1; applications[9].kid[0] = GCEDD;
applications[10].num_kernels = 1; applications[10].kid[0] = SCEDD;
applications[11].num_kernels = 1; applications[11].kid[0] = NCEDD;
applications[12].num_kernels = 1; applications[12].kid[0] = HCEDD;
/*
applications[0].num_kernels = 4; applications[0].kid[0] = GCEDD;
applications[0].kid[1] = SCEDD;
applications[0].kid[2] = NCEDD;
applications[0].kid[3] = HCEDD;
applications[1].num_kernels = 2; applications[1].kid[0] = RCONV;
applications[1].kid[1] = CCONV;
applications[2].num_kernels = 1; applications[2].kid[0] = HST256;
applications[3].num_kernels = 1; applications[3].kid[0] = Reduction;
applications[4].num_kernels = 1; applications[4].kid[0] = PF;
applications[5].num_kernels = 1; applications[5].kid[0] = VA;
applications[6].num_kernels = 1; applications[6].kid[0] = BS;
applications[7].num_kernels = 1; applications[7].kid[0] = SPMV_CSRscalar;
applications[8].num_kernels = 1; applications[8].kid[0] = MM;
*/
// First kernel of each application in sent to ready
t_Kernel *kid = (t_Kernel *) calloc(num_applications, sizeof(t_Kernel)); // List of ready kernels
for (int i=0; i<num_applications; i++)
kid[i] = applications[i].kid[0];
// k_done annotates kernel state
State *k_done = (State *)calloc(num_applications, sizeof(int));
/** Create commom streams for all kernels: two for asynchronous transfers, one for preemption commands*/
hipStream_t *transfers_s;
transfers_s = (hipStream_t *)calloc(2, sizeof(hipStream_t));
for (int i=0;i<2;i++){
err = hipStreamCreate(&transfers_s[i]);
checkCudaErrors(err);
}
hipStream_t preemp_s;
checkCudaErrors(hipStreamCreateWithFlags(&preemp_s, hipStreamNonBlocking));
/** Create stubs ***/
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++)
if (j == 0) // If first applicacion kernel
create_stubinfo(&applications[i].kstubs[j], deviceId, applications[i].kid[j], transfers_s, &preemp_s);
else
create_stubinfo_with_params(&applications[i].kstubs[j], deviceId, applications[i].kid[j], transfers_s, &preemp_s, applications[i].kstubs[0]->params);
// Make allocation and HtD transfer for applications
alloc_HtD_tranfers(applications, num_applications);
hipDeviceSynchronize();
// Bad patners: each kenel annotates if of partner with bad speedpup in coexecution
float **bad_partner = (float **)calloc(Number_of_Kernels, sizeof(float *));
for (int i=0;i<Number_of_Kernels; i++)
bad_partner[i] = (float *)calloc(Number_of_Kernels, sizeof(float));
// Annotate start and end kernel execution time
t_ktime *ktime_conc = (t_ktime *)calloc(Number_of_Kernels-1, sizeof(t_ktime));
// Annotate start and end kernel execution time
t_ktime *ktime2_seq = (t_ktime *)calloc(Number_of_Kernels-1, sizeof(t_ktime));
// Save application to change the order in following iterations
t_application *save_applications = (t_application *)calloc(num_applications, sizeof(t_application));
for (int i=0; i<num_applications; i++){
save_applications[i].num_kernels = applications[i].num_kernels;
for (int j=0; j<applications[i].num_kernels; j++) {
save_applications[i].kid[j] = applications[i].kid[j];
save_applications[i].kstubs[j] = applications[i].kstubs[j];
}
}
// Sequential execution
t_ktime *ktime_seq = (t_ktime *)calloc(Number_of_Kernels-1, sizeof(t_ktime));
clock_gettime(CLOCK_REALTIME, &now);
double time1 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double start_seq_time = time1;
nocke_all_applications(applications, num_applications, ktime_seq);
clock_gettime(CLOCK_REALTIME, &now);
double time2 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double seq_exec_time = time2-time1;
for (int perm=0; perm<13; perm++) {
// Application order permutation
for (int i=0; i<num_applications; i++){
applications[(i+perm) % num_applications].num_kernels = save_applications[i].num_kernels;
applications[(i+perm) % num_applications].index=0;
for (int j=0; j<save_applications[i].num_kernels; j++) {
applications[(i+perm) % num_applications].kid[j] = save_applications[i].kid[j];
applications[(i+perm) % num_applications].kstubs[j] = save_applications[i].kstubs[j];
}
}
for (int i=0; i<num_applications; i++)
kid[i] = applications[i].kid[0];
// Create streams kernel info for coexecution
t_kstreams *kstr = (t_kstreams *)calloc(num_applications, sizeof(t_kstreams));
for (int i=0; i<num_applications; i++)
create_kstreams(applications[i].kstubs[0], &kstr[i]);
// Initiallu all kernels are ready
for (int i=0; i< num_applications; i++)
k_done[i] = READY;
// Coxecution info
t_kcoexec coexec;
create_coexec(&coexec, 2);
// Launch proxy
launch_generic_proxy((void *)&sched); // Launch proxy
// Select initial kernel
int task_index = 0; // Index of the kernel in the array with ready kernels;
//k_done[task_index] = 1; // Kernel removed from pending kernels*/
// Reset timers
memset(ktime_conc, 0, sizeof(t_ktime)*(Number_of_Kernels-1));
int kernel_idx; // Position of kernel in coexec struc
double speedup;
clock_gettime(CLOCK_REALTIME, &now);
time1 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double time_sample;
do {
if (coexec.num_kernels == 0) {
if (perm==1)
printf("Aqui\n");
new_find_first_kernel(k_done, &task_index, num_applications); // Index in k_done: Arbitrarily we choode the first ready one from the array head
if (task_index == -1)
break; // Exit: no remaining kernels*/
k_done[task_index] = RUNNING;
}
// Given kid[task_index] kernel, choose the partner with highest performance
int task_index2; // index in kernel ready list of the slected kernel
int b0, b1;
t_Kernel select_kid;
new_get_best_partner(kid[task_index], kid, k_done, bad_partner, num_applications, &select_kid, &task_index2, &b0, &b1);
char k0_name[30]; char k1_name[30];
kid_from_index(kid[task_index], k0_name);
kid_from_index(select_kid, k1_name);
//printf("---> Selecting %s(%d) %s(%d)\n",k0_name, b0, k1_name, b1 );
// Ckeck if kernel is already in coexec (because it is executing)
int pos, dif;
kernel_in_coexec(&coexec, &kstr[task_index], &pos);
// kernel position in coexec struct (0 or 1)
if (pos == -1) // kernel is not in coexec
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index], b0, task_index); // Add b0 streams
else {
if ((dif = ( b0 - kstr[task_index].num_streams)) > 0) // New streams must be added
add_streams_to_kernel(&coexec, &sched, coexec.kstr[pos], dif);
else {
evict_streams(coexec.kstr[pos], -dif); // Some running streams must be evicted
coexec.num_streams[pos] +=dif;
}
}
if (select_kid != EMPTY){ // if coexecution is theorically benefical
k_done[task_index2] = RUNNING; // Romove kernel from ready list
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index2], b1, task_index2); // Add b0 streams
}
// Execute kernels (launching streams) in coexec structure
launch_coexec(&coexec);
// Annotate kernel start time
clock_gettime(CLOCK_REALTIME, &now);
time_sample = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
if (coexec.kstr[0] != NULL)
if (ktime_conc[coexec.kstr[0]->kstub->id].start_time == 0)
ktime_conc[coexec.kstr[0]->kstub->id].start_time = time_sample;
if (coexec.kstr[1] != NULL)
if (ktime_conc[coexec.kstr[1]->kstub->id].start_time == 0)
ktime_conc[coexec.kstr[1]->kstub->id].start_time = time_sample;
// Wait for termination condition
wait_for_kernel_termination_with_proxy(&sched, &coexec, &kernel_idx, &speedup);
//if (coexec.num_kernels == 1)
// break; // The last kernels has finished. Exit
if (speedup < MIN_SPEEDUP && coexec.num_kernels == 2){ // If speedup is not good, stop second kernel
evict_streams(coexec.kstr[1], coexec.kstr[1]->num_streams); // Stop all the streams of the second kernel (why not the first one?-> criterion based on remaining execution time?)
k_done[coexec.queue_index[1]]= READY; //Put the second kernel as ready again
//bad_partner[coexec.kstr[0]->kstub->id][coexec.kstr[1]->kstub->id] = -1;//speedup; // Annotate bad partner
//bad_partner[coexec.kstr[1]->kstub->id][coexec.kstr[0]->kstub->id] = -1; //;
clock_gettime(CLOCK_REALTIME, &now);
time_sample = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
ktime_conc[coexec.kstr[1]->kstub->id].end_time = time_sample;
rem_kernel_from_coexecution(&coexec, &sched, coexec.kstr[1]); //Remove second kernel for coexec struct
// Add new exectuing streams to first kernel (in coexec struct) so that it will run the maximum number of streams
add_streams_to_kernel(&coexec, &sched, coexec.kstr[0], smk_solo[coexec.kstr[0]->kstub->id].num_configs - coexec.kstr[0]->num_streams);
launch_coexec(&coexec); // Launch new streams of first kernel
wait_for_kernel_termination_with_proxy(&sched, &coexec, &kernel_idx, &speedup); // Wait first kernel to finish, kernel_idx.->index in coexec
int kind = coexec.queue_index[0]; // Save index in ready list of the finished kernel
// Update coexec: remove first kernel
clock_gettime(CLOCK_REALTIME, &now);
time_sample = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
ktime_conc[coexec.kstr[0]->kstub->id].end_time = time_sample;
rem_kernel_from_coexecution(&coexec, &sched, coexec.kstr[0]);
// If application has more kernels activate the next one
// Kernel index in ready list
if (applications[kind].index + 1 < applications[kind].num_kernels) {
applications[kind].index++;
kid[kind] = applications[kind].kid[applications[kind].index]; // get ID of new kernel
k_done[kind] = READY; // Set ready
assing_kstreams(applications[kind].kstubs[applications[kind].index], &kstr[kind]);
}
else
k_done[kind] = DONE; // Otherwise, application has finished
}
else
{
int kind = coexec.queue_index[kernel_idx]; // Save index in ready list of the finished kernel
// Remove finished kernel
clock_gettime(CLOCK_REALTIME, &now);
time_sample = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
ktime_conc[coexec.kstr[kernel_idx]->kstub->id].end_time = time_sample;
rem_kernel_from_coexecution(&coexec, &sched, coexec.kstr[kernel_idx]);
// If application has more kernels activate the next one
if (applications[kind].index + 1 < applications[kind].num_kernels) {
applications[kind].index++;
kid[kind] = applications[kind].kid[applications[kind].index]; // get ID of new kernel and write in in kid list
k_done[kind] = READY; // Set the new kernel ready
assing_kstreams(applications[kind].kstubs[applications[kind].index], &kstr[kind]); // Assing new kernel to application kstreams
}
else
k_done[kind] = DONE; // Otherwise, application has finished
}
if (coexec.num_kernels != 0){
// find task index of the running kernel
int i;
for (i=0;i<MAX_NUM_COEXEC_KERNELS; i++)
if (coexec.kstr[i] != NULL)
task_index = coexec.queue_index[i];
}
} while (1);
// Evict proxy
sched.kernel_evict_zc[0] = PROXY_EVICT;
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &now);
time2 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double conc_exec_time = time2-time1;
//printf("Concurrent excution time=%f sec.\n", time2-time1);
// Free
remove_coexec(&coexec);
for (int i=0; i<num_applications; i++)
remove_kstreams(&kstr[i]);
free(kstr);
// Reset task counter
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++)
hipMemset(applications[i].kstubs[j]->d_executed_tasks, 0, sizeof(int));
// Set streams status to PREP
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++){
for (int k=0; k<MAX_STREAMS_PER_KERNEL; k++)
applications[i].kstubs[j]->h_state[k] = PREP;
hipMemcpy(applications[i].kstubs[j]->gm_state, applications[i].kstubs[j]->h_state, sizeof(State) * MAX_STREAMS_PER_KERNEL, hipMemcpyHostToDevice);
}
// Reset index
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++)
applications[i].kstubs[j]->stream_index = 0;
// Calculate stats
double acc_time=start_seq_time;
for (int i=0;i<num_applications;i++) {
for (int j=0; j < applications[i].num_kernels; j++){
int id = applications[i].kstubs[j]->id;
ktime2_seq[id].start_time =acc_time;
acc_time += (ktime_seq[id].end_time - ktime_seq[id].start_time);
ktime2_seq[id].end_time =acc_time;
}
}
double antt_seq=0, antt_conc=0;
int cont =0;
printf("Kid \t endtime_seq \t endtime_conc \t NTT_Seq \t NTT_conc\n");
for (int i=0; i<Number_of_Kernels-1; i++)
if (ktime_seq[i].start_time != 0){
printf("%d \t %f \t %f \t %f \t %f\n", i, (ktime2_seq[i].end_time - start_seq_time), (ktime_conc[i].end_time - time1), (ktime2_seq[i].end_time - start_seq_time) /(ktime2_seq[i].end_time - ktime2_seq[i].start_time), (ktime_conc[i].end_time - time1)/(ktime2_seq[i].end_time - ktime2_seq[i].start_time) );
antt_seq += (ktime2_seq[i].end_time - start_seq_time) /(ktime_seq[i].end_time - ktime_seq[i].start_time);
antt_conc += (ktime_conc[i].end_time - time1)/(ktime_seq[i].end_time - ktime_seq[i].start_time);
cont++;
}
printf("ANTT_seq \t ANTT_conc\n");
printf("%f \t %f\n", antt_seq/(double)cont, antt_conc/(double)cont);
printf("Speepup = %f\n", seq_exec_time/conc_exec_time);
}
return 0;
}
int rt_scheduler(int deviceId, double max_slowdown, t_Kernel bg_kid)
{
struct timespec now;
hipError_t err;
// Select device
hipSetDevice(deviceId);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, deviceId);
printf("Device=%s\n", deviceProp.name);
// Load profilinf tables
read_profling_tables();
// Create sched structure
t_sched sched;
create_sched(&sched);
// Aplications
int num_applications=6;
t_application *applications = (t_application *)calloc(num_applications, sizeof(t_application));
applications[0].num_kernels = 4; applications[0].kid[0] = GCEDD;
applications[0].kid[1] = SCEDD;
applications[0].kid[2] = NCEDD;
applications[0].kid[3] = HCEDD;
applications[1].num_kernels = 1; applications[1].kid[0] = bg_kid;
applications[2].num_kernels = 1; applications[2].kid[0] = bg_kid;
applications[3].num_kernels = 1; applications[3].kid[0] = bg_kid;
applications[4].num_kernels = 1; applications[4].kid[0] = bg_kid;
applications[5].num_kernels = 1; applications[5].kid[0] = bg_kid;
// First kernel of each application in sent to ready
t_Kernel *kid = (t_Kernel *) calloc(num_applications, sizeof(t_Kernel)); // List of ready kernels
for (int i=0; i<num_applications; i++)
kid[i] = applications[i].kid[0];
// k_done annotates kernel state
State *k_done = (State *)calloc(num_applications, sizeof(int));
for (int i=0; i< num_applications-1; i++) //App SCEDD is latency sensitive
k_done[i] = READY;
/** Create commom streams for all kernels: two for asynchronous transfers, one for preemption commands*/
hipStream_t *transfers_s;
transfers_s = (hipStream_t *)calloc(2, sizeof(hipStream_t));
for (int i=0;i<2;i++){
err = hipStreamCreate(&transfers_s[i]);
checkCudaErrors(err);
}
hipStream_t preemp_s;
checkCudaErrors(hipStreamCreateWithFlags(&preemp_s, hipStreamNonBlocking));
/** Create stubs ***/
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++)
if (j == 0) // If first applicacion kernel
create_stubinfo(&applications[i].kstubs[j], deviceId, applications[i].kid[j], transfers_s, &preemp_s);
else
create_stubinfo_with_params(&applications[i].kstubs[j], deviceId, applications[i].kid[j], transfers_s, &preemp_s, applications[i].kstubs[0]->params);
// Make allocation and HtD transfer for applications
alloc_HtD_tranfers(applications, num_applications);
hipDeviceSynchronize();
// Calculate sequential execution time (overlapping is still possible)
clock_gettime(CLOCK_REALTIME, &now);
double time1 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
t_ktime *ktime = (t_ktime *)calloc(Number_of_Kernels-1, sizeof(t_ktime));
nocke_all_applications(applications, num_applications, ktime);
clock_gettime(CLOCK_REALTIME, &now);
double time2 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
printf("Sequential execution time =%f sec\n", time2-time1);
double seq_time = ktime[GCEDD].end_time - ktime[GCEDD].start_time
+ ktime[SCEDD].end_time - ktime[SCEDD].start_time
+ ktime[NCEDD].end_time - ktime[NCEDD].start_time
+ ktime[HCEDD].end_time - ktime[HCEDD].start_time ;
// Create streams kernel info for coexecution
t_kstreams *kstr = (t_kstreams *)calloc(num_applications, sizeof(t_kstreams));
for (int i=0; i<num_applications; i++)
create_kstreams(applications[i].kstubs[0], &kstr[i]);
// Coxecution info
t_kcoexec coexec;
create_coexec(&coexec, 2);
// Launch proxy
launch_generic_proxy((void *)&sched); // Launch proxy
int kernel_idx; // Position of kernel in coexec struc
double speedup;
int task_index = 0; // Index of kernel
int task_index2 = 1;
double time1_rt, time2_rt;
clock_gettime(CLOCK_REALTIME, &now);
time1_rt = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double acc_numstreams_time=0; // to calculate time num_streams
do {
// Coexecution configuration with th maximum number of streams for kernel 0
t_Kernel idk0 = kstr[task_index].kstub->id;
t_Kernel idk2 = kstr[task_index2].kstub->id;
int num_cf = smk_conc[idk0][idk2].num_configs;
int b0, b1; // Max num of streams must be assigned to ls kernel
if (smk_conc[idk0][idk2].pairs[0][0] > smk_conc[idk0][idk2].pairs[0][1]) {
b0 = smk_conc[idk0][idk2].pairs[0][0];
b1 = smk_conc[idk0][idk2].pairs[0][1];
}
else {
int num_cf = smk_conc[idk0][idk2].num_configs;
b0 = smk_conc[idk0][idk2].pairs[num_cf-1][0];
b1 = smk_conc[idk0][idk2].pairs[num_cf-1][1];
}
printf("kids=(%d,%d) streams=(%d,%d)\n", idk0, idk2, b0, b1);
/* printf("Max tmps=%f\n", smk_conc[idk0][idk2].tpms[0][0]);
for (int i=0;i<=7;i++)
printf("Solo num_str=%d tpms=%f\n", i+1, smk_solo[idk0].tpms[i]);
for (int i=0;i<7;i++)
printf("Coexec num_str=%d tpms=%f\n", i+1, smk_conc[idk0][idk2].tpms[i][0]);
*/
int pos0, pos1;
kernel_in_coexec(&coexec, &kstr[task_index], &pos0);
kernel_in_coexec(&coexec, &kstr[task_index2], &pos1);
if (pos0 == -1 && pos1 == -1) { // No kernels are executiing
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index], b0, task_index); // Add b0 streams
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index2], b1, task_index2); // Add b1 streams
}
else {
if (pos0 == -1){
if (coexec.kstr[1]->num_streams > b1){ // If non-ls kernel is running and have too much streams
int rem_streams = coexec.kstr[1]->num_streams - b1;
evict_streams(coexec.kstr[1], rem_streams); // Now, non-ls kernels has b1 streams
coexec.num_streams[1] -= rem_streams;
printf("Borrando %d(%d)streams de kernel %d\n", rem_streams, coexec.num_streams[1], coexec.kstr[1]->kstub->id);
}
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index], b0, task_index); // Add b0 streams
}
if (pos1 == -1){
if (coexec.kstr[0]->num_streams < b0) // If LS kernel is running and has too few streams
add_streams_to_kernel(&coexec, &sched, coexec.kstr[0], b0 - coexec.kstr[0]->num_streams); // Add more streams to LS kernel
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index2], b1, task_index2); // Add b1 streams
}
}
// Execute kernels (launching streams) in coexec structure
launch_coexec(&coexec);
double start_time;
if (pos0 == -1) {// When a new ls kernel is launched, get time
// Get current time
clock_gettime(CLOCK_REALTIME, &now);
start_time = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
}
// Calculate minimum tpms that ls kernel can obtain
double max_tpms = smk_solo[idk0].tpms[smk_solo[idk0].num_configs-1]; // Faster tpms
double min_tpms = max_slowdown * max_tpms;
// Wait for termination condition
int rc;
double sampling_interval = 0.0004;
int flag_rc2 = 0, stop_pingpong=0;
do {
ls_coexec_wait_for_kernel_termination_with_proxy(&sched, &coexec, min_tpms, start_time, sampling_interval, &acc_numstreams_time, &kernel_idx, &rc);
printf("rc=%d\n", rc);
if (rc == 1 && coexec.kstr[1] != NULL) { // LS kernel performace is low: steal a stream of non LS kernel if this kernel is still running
evict_streams(coexec.kstr[1], 1);
coexec.num_streams[1]--;
if (coexec.kstr[1]->num_streams == 0) {
rem_kernel_from_coexecution(&coexec, &sched, coexec.kstr[1]);
printf("Eliminando stream de no-ls\n");
}
add_streams_to_kernel(&coexec, &sched, coexec.kstr[0], 1);
launch_coexec(&coexec);
if (flag_rc2 == 1) stop_pingpong =1;
}
if (rc == 2 && coexec.kstr[0]->num_streams >= 2 && stop_pingpong == 0) { // LS kernel performace is high: give a stream to non LS kerne LS Kernel has, at least 2, streams
evict_streams(coexec.kstr[0], 1);
coexec.num_streams[0]--;
kernel_in_coexec(&coexec, &kstr[task_index2], &pos1);
if (pos1 != -1) // If non ls is running
add_streams_to_kernel(&coexec, &sched, coexec.kstr[1], 1); // add a new stream to non ls
else
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index2], 1, task_index2); // else add nonls to coexec with one stream
launch_coexec(&coexec);
flag_rc2=1;
}
sampling_interval = 0.0002;
}
while (rc == 1 || rc==2); // If rc==0, a kernel has finished
int kind = coexec.queue_index[kernel_idx]; // Save index in ready list of the finished kernel
// Remove finished kernel
rem_kernel_from_coexecution(&coexec, &sched, coexec.kstr[kernel_idx]);
printf("Eliminando kernel con idx = %d\n", kernel_idx);
if (kid[kind] == HCEDD)
break;
// If application has more kernels activate the next one
if (applications[kind].index + 1 < applications[kind].num_kernels) {
applications[kind].index++;
kid[kind] = applications[kind].kid[applications[kind].index]; // get ID of new kernel
k_done[kind] = READY; // Set ready
assing_kstreams(applications[kind].kstubs[applications[kind].index], &kstr[kind]);
}
else {
k_done[kind] = DONE; // Set ready
task_index2++; // move to nex non-ls kernel
if (task_index2 >= num_applications){
printf("Error: no hay suficiente sapplicaciones de non ls kernel. Termino de forma anticipada\n");
break;
}
}
} while (1);
clock_gettime(CLOCK_REALTIME, &now);
time2_rt = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double rt_exec_time = time2_rt - time1_rt;
printf("kid, Slowdownm Fullfil, weighted_streams_per_time, \n");
printf("%d, %f, %f, %f\n", kid[1], max_slowdown, (max_slowdown*seq_time-rt_exec_time)/(max_slowdown*seq_time), acc_numstreams_time/rt_exec_time);
//printf(",st=%f rt=%f Per=%f, num_streams=%f\n", seq_time, rt_exec_time, (max_slowdown*seq_time-rt_exec_time)/max_slowdown*seq_time, acc_numstreams_time/rt_exec_time);
// Evict proxy
sched.kernel_evict_zc[0] = PROXY_EVICT;
hipDeviceSynchronize();
//printf("Concurrent excution time=%f sec.\n", time2_rt-time1_rt);
return 0;
}
| db7a4ba33426103fa76e534234327ead1b9147ba.cu | #include <unistd.h>
#include <time.h>
#include <string.h>
#include <unistd.h>
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h>
#include "elastic_kernel.h"
#include <cuda_profiler_api.h>
typedef struct{
double start_time;
double end_time;
}t_ktime;
// Applications is composed of one or several kernels
typedef struct{
int num_kernels;
int index;
t_Kernel kid[8]; // Max: 8 kernels per application
t_kernel_stub* kstubs[8]; // One kernel stub per kernel
}t_application;
// Tables to store results for solo exectuions
t_smk_solo *smk_solo; //
t_smt_solo *smt_solo;
// Tables to store coexecution results
t_smk_coBlocks **smk_conc;
t_smt_coBlocks **smt_conc; //tpms of each kernel in coexection
// Table to store better speedups in coexecution
t_co_speedup **smk_best_sp;
t_co_speedup **smt_best_sp;
int read_profling_tables()
{
FILE *fp;
if ((fp = fopen("profiling_table.bin", "r")) == NULL) {
printf("Cannot read file\n");
return -1;
}
// Number of kernels
int n = Number_of_Kernels-1;
fread (&n, 1, sizeof(int), fp);
// Create t_smk_solo smk_info_solo[]
smk_solo = (t_smk_solo *)calloc(n, sizeof(t_smk_solo));
// Load t_smk_solo smk_solo[]
for (int i=0; i<n; i++){
fread(&smk_solo[i].num_configs, 1, sizeof(int), fp);
smk_solo[i].tpms = (double *)calloc(smk_solo[i].num_configs, sizeof(double));
fread(smk_solo[i].tpms, smk_solo[i].num_configs, sizeof(double), fp);
}
// Create t_smt_solo smt_solo[]
smt_solo = (t_smt_solo *)calloc(n, sizeof(t_smt_solo));
// Load t_smt_solo smt_info_solo
for (int i=0; i<n; i++){
fread(&smt_solo[i].num_configs, 1, sizeof(int), fp);
smt_solo[i].tpms = (double *)calloc(smt_solo[i].num_configs, sizeof(double));
fread(smt_solo[i].tpms, smt_solo[i].num_configs, sizeof(double), fp);
}
// Create t_smk_coBlocks smk_conc
smk_conc = (t_smk_coBlocks **)calloc(n, sizeof(t_smk_coBlocks *));
for (int i=0; i<n; i++)
smk_conc[i] = (t_smk_coBlocks *)calloc(n, sizeof(t_smk_coBlocks));
//Load t_smk_coBlocks smk_conc
for (int i=0; i<n; i++)
for (int j=0; j<n; j++) {
fread(smk_conc[i][j].kid, 2, sizeof(t_Kernel), fp);
fread(&smk_conc[i][j].num_configs, 1, sizeof(int), fp);
smk_conc[i][j].pairs = (int **)calloc(smk_conc[i][j].num_configs, sizeof(int *));
for (int k=0; k<smk_conc[i][j].num_configs; k++)
smk_conc[i][j].pairs[k] = (int *)calloc(2, sizeof(int));
for (int k=0; k<smk_conc[i][j].num_configs; k++)
fread(smk_conc[i][j].pairs[k], 2, sizeof(int), fp);
smk_conc[i][j].tpms = (double **)calloc(smk_conc[i][j].num_configs, sizeof(double *));
for (int k=0; k<smk_conc[i][j].num_configs; k++)
smk_conc[i][j].tpms[k] = (double *)calloc(2, sizeof(double));
for (int k=0; k<smk_conc[i][j].num_configs; k++)
fread(smk_conc[i][j].tpms[k], 2, sizeof(double), fp);
}
// Create t_smt_coBlocks smt_conc
smt_conc = (t_smt_coBlocks **)calloc(n, sizeof(t_smt_coBlocks *));
for (int i=0; i<n; i++)
smt_conc[i] = (t_smt_coBlocks *)calloc(n, sizeof(t_smt_coBlocks));
//Load t_smt_coBlocks smt_conc
for (int i=0; i<n; i++)
for (int j=0; j<n; j++) {
fread(smt_conc[i][j].kid, 2, sizeof(t_Kernel), fp);
fread(&smt_conc[i][j].num_configs, 1, sizeof(int), fp);
smt_conc[i][j].pairs = (int **)calloc(smt_conc[i][j].num_configs, sizeof(int *));
for (int k=0; k<smt_conc[i][j].num_configs; k++)
smt_conc[i][j].pairs[k] = (int *)calloc(2, sizeof(int));
for (int k=0; k<smt_conc[i][j].num_configs; k++)
fread(smt_conc[i][j].pairs[k], 2, sizeof(int), fp);
smt_conc[i][j].tpms = (double **)calloc(smt_conc[i][j].num_configs, sizeof(double *));
for (int k=0; k<smt_conc[i][j].num_configs; k++)
smt_conc[i][j].tpms[k] = (double *)calloc(2, sizeof(double));
for (int k=0; k<smt_conc[i][j].num_configs; k++)
fread(smt_conc[i][j].tpms[k], 2, sizeof(double), fp);
}
// Create t_co_speedup smk_best_sp
smk_best_sp = (t_co_speedup **)calloc(n, sizeof(t_co_speedup *));
for (int i=0; i<n; i++)
smk_best_sp[i] = (t_co_speedup *)calloc(n, sizeof(t_co_speedup));
// Load t_co_speedup smk_best_sp
for (int i=0; i<n; i++)
for (int j=0; j<n; j++) {
fread(smk_best_sp[i][j].pairs, 2, sizeof(int), fp);
fread(&smk_best_sp[i][j].speedup, 1, sizeof(double), fp);
}
// Create t_co_speedup smt_best_sp
smt_best_sp = (t_co_speedup **)calloc(n, sizeof(t_co_speedup *));
for (int i=0; i<n; i++)
smt_best_sp[i] = (t_co_speedup *)calloc(n, sizeof(t_co_speedup));
// Load t_co_speedup smt_best_sp
for (int i=0; i<n; i++)
for (int j=0; j<n; j++) {
fread(smt_best_sp[i][j].pairs, 2, sizeof(int), fp);
fread(&smt_best_sp[i][j].speedup, 1, sizeof(double), fp);
}
fclose(fp);
return 0;
}
int alloc_HtD_tranfers(t_application *applications, int num_applications)
{
for (int i=0; i<num_applications; i++) {
for (int j=0; j < applications[i].num_kernels; j++){
(applications[i].kstubs[j]->startMallocs)((void *)(applications[i].kstubs[j]));
(applications[i].kstubs[j]->startTransfers)((void *)(applications[i].kstubs[j]));
}
}
return 0;
}
int nocke_all_applications(t_application *applications, int num_applications, t_ktime *ktime)
{
struct timespec now;
cudaStream_t * exec_stream = (cudaStream_t *)malloc(sizeof(cudaStream_t));
//kstr->kstub->kconf.max_persistent_blocks = 1; // Only a block per SM will be launched by each stream
int idSMs[2];
idSMs[0]=0;
// Launch streams
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++) {
idSMs[1] = applications[i].kstubs[j]->kconf.numSMs-1;
applications[i].kstubs[j]->idSMs = idSMs;
/*kstub->execution_s = exec_stream;
int cont_task;
t_kernel_stub *kstub = applications[i].kstubs[j];
cudaMemcpy(&cont_task, kstub->d_executed_tasks, sizeof(int), cudaMemcpyDeviceToHost);
State state[MAX_STREAMS_PER_KERNEL];
cudaMemcpy(state, kstub->gm_state, sizeof(State) * MAX_STREAMS_PER_KERNEL, cudaMemcpyDeviceToHost);
printf("Kid= %d, cont=%d, Status=%d, numSMs=%d, Max_pers=%d, minSM=%d, maxSM=%d ttasks=%d coar=%d index=%d atream=%lld\n", kstub->id, cont_task, state[0], kstub->kconf.numSMs,
kstub->kconf.max_persistent_blocks,kstub->idSMs[0],kstub->idSMs[1],kstub->total_tasks,
kstub->kconf.coarsening, kstub->stream_index, kstub->execution_s);
*/
(applications[i].kstubs[j]->launchCKEkernel)(applications[i].kstubs[j]);
clock_gettime(CLOCK_REALTIME, &now);
ktime[applications[i].kstubs[j]->id].start_time = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &now);
ktime[applications[i].kstubs[j]->id].end_time = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
}
// Reset task counter
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++)
cudaMemset(applications[i].kstubs[j]->d_executed_tasks, 0, sizeof(int));
return 0;
}
// Assign a kstub to a kstreams already create;
int assing_kstreams(t_kernel_stub *kstub, t_kstreams *kstr)
{
kstr->kstub = kstub;
kstr->num_streams = 0;
kstr->save_cont_tasks = 0;
return 0;
}
// Given a kernel seach for the best partner (highest speeup in coexec) from the kernel ready list
int new_get_best_partner(t_Kernel curr_kid, t_Kernel *kid, State *k_done, float **bad_partner, int num_applications, t_Kernel *select_kid, int *select_index, int *b0, int *b1)
{
double best_perf = -1.0;
int best_index;
t_Kernel best_kid;
for (int i=0; i<num_applications; i++){ // For the remainning kernels
if (k_done[i] == READY){ // If kernel has not been executed
t_co_speedup *info = &smk_best_sp[curr_kid][kid[i]];
if (info->speedup > best_perf) { // Search for best partnet (highest speedup in coexec) among ready kernels
best_perf = info->speedup;
best_index = i;
best_kid = kid[i];
}
}
}
if (best_perf >=MIN_SPEEDUP) {
*select_kid = best_kid;
*select_index = best_index;
*b0 = smk_best_sp[curr_kid][best_kid].pairs[0];
*b1 = smk_best_sp[curr_kid][best_kid].pairs[1];
}else{
*select_kid = EMPTY; // Indicate no coexecution
*b0 = smk_solo[curr_kid].num_configs; // If performace is low the running kernel is executed with all the blocks
}
return 0;
}
int new_find_first_kernel( State *k_done, int *index, int num_kernels)
{
int i;
for (i=0;i<num_kernels; i++){
if (k_done[i] == READY) {
*index = i;
return 0;
}
}
*index = -1; // No available kernel found
return 0;
}
// greedy_scheduler is oriented to reduce the makespan of a set of applications
// Assumning a list of ready kernels, this scheduler selects a pair of kernels to be coexecuted
// First pair selection is based on the kernels that achived the highest speedup. W
// Then when one of the two kernels finishe, the next ready one having tje kighest speepup when coexecutued with the alreadu running is selected.
// Partial evition and adding of streams is performed to establshed the adequated resource assignement to each running kernel
int greedy_coexecution(int deviceId)
{
cudaError_t err;
struct timespec now;
// Select device
cudaSetDevice(deviceId);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, deviceId);
printf("Device=%s\n", deviceProp.name);
// Create sched structure
t_sched sched;
create_sched(&sched);
// Load profilinf tables
read_profling_tables();
// Important for reducing the number od streams launched by RCONV and CCONV
// Profiling tables are modified for RCONV to reduce the number of concurrent streams
int n = Number_of_Kernels-1;
for (int i=0; i<n; i++) {
smk_best_sp[RCONV][i].pairs[0] = smk_best_sp[RCONV][i].pairs[0]/2;
smk_best_sp[i][RCONV].pairs[1] = smk_best_sp[i][RCONV].pairs[1]/2;
}
smk_solo[RCONV].num_configs = smk_solo[RCONV].num_configs /2;
// Aplications
int num_applications=13;
t_application *applications = (t_application *)calloc(num_applications, sizeof(t_application));
applications[0].num_kernels = 1; applications[0].kid[0] = VA;
applications[1].num_kernels = 1; applications[1].kid[0] = MM;
applications[2].num_kernels = 1; applications[2].kid[0] = RCONV;
applications[3].num_kernels = 1; applications[3].kid[0] = CCONV;
applications[4].num_kernels = 1; applications[4].kid[0] = HST256;
applications[5].num_kernels = 1; applications[5].kid[0] = Reduction;
applications[6].num_kernels = 1; applications[6].kid[0] = PF;
applications[7].num_kernels = 1; applications[7].kid[0] = BS;
applications[8].num_kernels = 1; applications[8].kid[0] = SPMV_CSRscalar;
applications[9].num_kernels = 1; applications[9].kid[0] = GCEDD;
applications[10].num_kernels = 1; applications[10].kid[0] = SCEDD;
applications[11].num_kernels = 1; applications[11].kid[0] = NCEDD;
applications[12].num_kernels = 1; applications[12].kid[0] = HCEDD;
/*
applications[0].num_kernels = 4; applications[0].kid[0] = GCEDD;
applications[0].kid[1] = SCEDD;
applications[0].kid[2] = NCEDD;
applications[0].kid[3] = HCEDD;
applications[1].num_kernels = 2; applications[1].kid[0] = RCONV;
applications[1].kid[1] = CCONV;
applications[2].num_kernels = 1; applications[2].kid[0] = HST256;
applications[3].num_kernels = 1; applications[3].kid[0] = Reduction;
applications[4].num_kernels = 1; applications[4].kid[0] = PF;
applications[5].num_kernels = 1; applications[5].kid[0] = VA;
applications[6].num_kernels = 1; applications[6].kid[0] = BS;
applications[7].num_kernels = 1; applications[7].kid[0] = SPMV_CSRscalar;
applications[8].num_kernels = 1; applications[8].kid[0] = MM;
*/
// First kernel of each application in sent to ready
t_Kernel *kid = (t_Kernel *) calloc(num_applications, sizeof(t_Kernel)); // List of ready kernels
for (int i=0; i<num_applications; i++)
kid[i] = applications[i].kid[0];
// k_done annotates kernel state
State *k_done = (State *)calloc(num_applications, sizeof(int));
/** Create commom streams for all kernels: two for asynchronous transfers, one for preemption commands*/
cudaStream_t *transfers_s;
transfers_s = (cudaStream_t *)calloc(2, sizeof(cudaStream_t));
for (int i=0;i<2;i++){
err = cudaStreamCreate(&transfers_s[i]);
checkCudaErrors(err);
}
cudaStream_t preemp_s;
checkCudaErrors(cudaStreamCreateWithFlags(&preemp_s, cudaStreamNonBlocking));
/** Create stubs ***/
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++)
if (j == 0) // If first applicacion kernel
create_stubinfo(&applications[i].kstubs[j], deviceId, applications[i].kid[j], transfers_s, &preemp_s);
else
create_stubinfo_with_params(&applications[i].kstubs[j], deviceId, applications[i].kid[j], transfers_s, &preemp_s, applications[i].kstubs[0]->params);
// Make allocation and HtD transfer for applications
alloc_HtD_tranfers(applications, num_applications);
cudaDeviceSynchronize();
// Bad patners: each kenel annotates if of partner with bad speedpup in coexecution
float **bad_partner = (float **)calloc(Number_of_Kernels, sizeof(float *));
for (int i=0;i<Number_of_Kernels; i++)
bad_partner[i] = (float *)calloc(Number_of_Kernels, sizeof(float));
// Annotate start and end kernel execution time
t_ktime *ktime_conc = (t_ktime *)calloc(Number_of_Kernels-1, sizeof(t_ktime));
// Annotate start and end kernel execution time
t_ktime *ktime2_seq = (t_ktime *)calloc(Number_of_Kernels-1, sizeof(t_ktime));
// Save application to change the order in following iterations
t_application *save_applications = (t_application *)calloc(num_applications, sizeof(t_application));
for (int i=0; i<num_applications; i++){
save_applications[i].num_kernels = applications[i].num_kernels;
for (int j=0; j<applications[i].num_kernels; j++) {
save_applications[i].kid[j] = applications[i].kid[j];
save_applications[i].kstubs[j] = applications[i].kstubs[j];
}
}
// Sequential execution
t_ktime *ktime_seq = (t_ktime *)calloc(Number_of_Kernels-1, sizeof(t_ktime));
clock_gettime(CLOCK_REALTIME, &now);
double time1 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double start_seq_time = time1;
nocke_all_applications(applications, num_applications, ktime_seq);
clock_gettime(CLOCK_REALTIME, &now);
double time2 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double seq_exec_time = time2-time1;
for (int perm=0; perm<13; perm++) {
// Application order permutation
for (int i=0; i<num_applications; i++){
applications[(i+perm) % num_applications].num_kernels = save_applications[i].num_kernels;
applications[(i+perm) % num_applications].index=0;
for (int j=0; j<save_applications[i].num_kernels; j++) {
applications[(i+perm) % num_applications].kid[j] = save_applications[i].kid[j];
applications[(i+perm) % num_applications].kstubs[j] = save_applications[i].kstubs[j];
}
}
for (int i=0; i<num_applications; i++)
kid[i] = applications[i].kid[0];
// Create streams kernel info for coexecution
t_kstreams *kstr = (t_kstreams *)calloc(num_applications, sizeof(t_kstreams));
for (int i=0; i<num_applications; i++)
create_kstreams(applications[i].kstubs[0], &kstr[i]);
// Initiallu all kernels are ready
for (int i=0; i< num_applications; i++)
k_done[i] = READY;
// Coxecution info
t_kcoexec coexec;
create_coexec(&coexec, 2);
// Launch proxy
launch_generic_proxy((void *)&sched); // Launch proxy
// Select initial kernel
int task_index = 0; // Index of the kernel in the array with ready kernels;
//k_done[task_index] = 1; // Kernel removed from pending kernels*/
// Reset timers
memset(ktime_conc, 0, sizeof(t_ktime)*(Number_of_Kernels-1));
int kernel_idx; // Position of kernel in coexec struc
double speedup;
clock_gettime(CLOCK_REALTIME, &now);
time1 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double time_sample;
do {
if (coexec.num_kernels == 0) {
if (perm==1)
printf("Aqui\n");
new_find_first_kernel(k_done, &task_index, num_applications); // Index in k_done: Arbitrarily we choode the first ready one from the array head
if (task_index == -1)
break; // Exit: no remaining kernels*/
k_done[task_index] = RUNNING;
}
// Given kid[task_index] kernel, choose the partner with highest performance
int task_index2; // index in kernel ready list of the slected kernel
int b0, b1;
t_Kernel select_kid;
new_get_best_partner(kid[task_index], kid, k_done, bad_partner, num_applications, &select_kid, &task_index2, &b0, &b1);
char k0_name[30]; char k1_name[30];
kid_from_index(kid[task_index], k0_name);
kid_from_index(select_kid, k1_name);
//printf("---> Selecting %s(%d) %s(%d)\n",k0_name, b0, k1_name, b1 );
// Ckeck if kernel is already in coexec (because it is executing)
int pos, dif;
kernel_in_coexec(&coexec, &kstr[task_index], &pos);
// kernel position in coexec struct (0 or 1)
if (pos == -1) // kernel is not in coexec
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index], b0, task_index); // Add b0 streams
else {
if ((dif = ( b0 - kstr[task_index].num_streams)) > 0) // New streams must be added
add_streams_to_kernel(&coexec, &sched, coexec.kstr[pos], dif);
else {
evict_streams(coexec.kstr[pos], -dif); // Some running streams must be evicted
coexec.num_streams[pos] +=dif;
}
}
if (select_kid != EMPTY){ // if coexecution is theorically benefical
k_done[task_index2] = RUNNING; // Romove kernel from ready list
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index2], b1, task_index2); // Add b0 streams
}
// Execute kernels (launching streams) in coexec structure
launch_coexec(&coexec);
// Annotate kernel start time
clock_gettime(CLOCK_REALTIME, &now);
time_sample = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
if (coexec.kstr[0] != NULL)
if (ktime_conc[coexec.kstr[0]->kstub->id].start_time == 0)
ktime_conc[coexec.kstr[0]->kstub->id].start_time = time_sample;
if (coexec.kstr[1] != NULL)
if (ktime_conc[coexec.kstr[1]->kstub->id].start_time == 0)
ktime_conc[coexec.kstr[1]->kstub->id].start_time = time_sample;
// Wait for termination condition
wait_for_kernel_termination_with_proxy(&sched, &coexec, &kernel_idx, &speedup);
//if (coexec.num_kernels == 1)
// break; // The last kernels has finished. Exit
if (speedup < MIN_SPEEDUP && coexec.num_kernels == 2){ // If speedup is not good, stop second kernel
evict_streams(coexec.kstr[1], coexec.kstr[1]->num_streams); // Stop all the streams of the second kernel (why not the first one?-> criterion based on remaining execution time?)
k_done[coexec.queue_index[1]]= READY; //Put the second kernel as ready again
//bad_partner[coexec.kstr[0]->kstub->id][coexec.kstr[1]->kstub->id] = -1;//speedup; // Annotate bad partner
//bad_partner[coexec.kstr[1]->kstub->id][coexec.kstr[0]->kstub->id] = -1; //;
clock_gettime(CLOCK_REALTIME, &now);
time_sample = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
ktime_conc[coexec.kstr[1]->kstub->id].end_time = time_sample;
rem_kernel_from_coexecution(&coexec, &sched, coexec.kstr[1]); //Remove second kernel for coexec struct
// Add new exectuing streams to first kernel (in coexec struct) so that it will run the maximum number of streams
add_streams_to_kernel(&coexec, &sched, coexec.kstr[0], smk_solo[coexec.kstr[0]->kstub->id].num_configs - coexec.kstr[0]->num_streams);
launch_coexec(&coexec); // Launch new streams of first kernel
wait_for_kernel_termination_with_proxy(&sched, &coexec, &kernel_idx, &speedup); // Wait first kernel to finish, kernel_idx.->index in coexec
int kind = coexec.queue_index[0]; // Save index in ready list of the finished kernel
// Update coexec: remove first kernel
clock_gettime(CLOCK_REALTIME, &now);
time_sample = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
ktime_conc[coexec.kstr[0]->kstub->id].end_time = time_sample;
rem_kernel_from_coexecution(&coexec, &sched, coexec.kstr[0]);
// If application has more kernels activate the next one
// Kernel index in ready list
if (applications[kind].index + 1 < applications[kind].num_kernels) {
applications[kind].index++;
kid[kind] = applications[kind].kid[applications[kind].index]; // get ID of new kernel
k_done[kind] = READY; // Set ready
assing_kstreams(applications[kind].kstubs[applications[kind].index], &kstr[kind]);
}
else
k_done[kind] = DONE; // Otherwise, application has finished
}
else
{
int kind = coexec.queue_index[kernel_idx]; // Save index in ready list of the finished kernel
// Remove finished kernel
clock_gettime(CLOCK_REALTIME, &now);
time_sample = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
ktime_conc[coexec.kstr[kernel_idx]->kstub->id].end_time = time_sample;
rem_kernel_from_coexecution(&coexec, &sched, coexec.kstr[kernel_idx]);
// If application has more kernels activate the next one
if (applications[kind].index + 1 < applications[kind].num_kernels) {
applications[kind].index++;
kid[kind] = applications[kind].kid[applications[kind].index]; // get ID of new kernel and write in in kid list
k_done[kind] = READY; // Set the new kernel ready
assing_kstreams(applications[kind].kstubs[applications[kind].index], &kstr[kind]); // Assing new kernel to application kstreams
}
else
k_done[kind] = DONE; // Otherwise, application has finished
}
if (coexec.num_kernels != 0){
// find task index of the running kernel
int i;
for (i=0;i<MAX_NUM_COEXEC_KERNELS; i++)
if (coexec.kstr[i] != NULL)
task_index = coexec.queue_index[i];
}
} while (1);
// Evict proxy
sched.kernel_evict_zc[0] = PROXY_EVICT;
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &now);
time2 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double conc_exec_time = time2-time1;
//printf("Concurrent excution time=%f sec.\n", time2-time1);
// Free
remove_coexec(&coexec);
for (int i=0; i<num_applications; i++)
remove_kstreams(&kstr[i]);
free(kstr);
// Reset task counter
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++)
cudaMemset(applications[i].kstubs[j]->d_executed_tasks, 0, sizeof(int));
// Set streams status to PREP
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++){
for (int k=0; k<MAX_STREAMS_PER_KERNEL; k++)
applications[i].kstubs[j]->h_state[k] = PREP;
cudaMemcpy(applications[i].kstubs[j]->gm_state, applications[i].kstubs[j]->h_state, sizeof(State) * MAX_STREAMS_PER_KERNEL, cudaMemcpyHostToDevice);
}
// Reset index
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++)
applications[i].kstubs[j]->stream_index = 0;
// Calculate stats
double acc_time=start_seq_time;
for (int i=0;i<num_applications;i++) {
for (int j=0; j < applications[i].num_kernels; j++){
int id = applications[i].kstubs[j]->id;
ktime2_seq[id].start_time =acc_time;
acc_time += (ktime_seq[id].end_time - ktime_seq[id].start_time);
ktime2_seq[id].end_time =acc_time;
}
}
double antt_seq=0, antt_conc=0;
int cont =0;
printf("Kid \t endtime_seq \t endtime_conc \t NTT_Seq \t NTT_conc\n");
for (int i=0; i<Number_of_Kernels-1; i++)
if (ktime_seq[i].start_time != 0){
printf("%d \t %f \t %f \t %f \t %f\n", i, (ktime2_seq[i].end_time - start_seq_time), (ktime_conc[i].end_time - time1), (ktime2_seq[i].end_time - start_seq_time) /(ktime2_seq[i].end_time - ktime2_seq[i].start_time), (ktime_conc[i].end_time - time1)/(ktime2_seq[i].end_time - ktime2_seq[i].start_time) );
antt_seq += (ktime2_seq[i].end_time - start_seq_time) /(ktime_seq[i].end_time - ktime_seq[i].start_time);
antt_conc += (ktime_conc[i].end_time - time1)/(ktime_seq[i].end_time - ktime_seq[i].start_time);
cont++;
}
printf("ANTT_seq \t ANTT_conc\n");
printf("%f \t %f\n", antt_seq/(double)cont, antt_conc/(double)cont);
printf("Speepup = %f\n", seq_exec_time/conc_exec_time);
}
return 0;
}
int rt_scheduler(int deviceId, double max_slowdown, t_Kernel bg_kid)
{
struct timespec now;
cudaError_t err;
// Select device
cudaSetDevice(deviceId);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, deviceId);
printf("Device=%s\n", deviceProp.name);
// Load profilinf tables
read_profling_tables();
// Create sched structure
t_sched sched;
create_sched(&sched);
// Aplications
int num_applications=6;
t_application *applications = (t_application *)calloc(num_applications, sizeof(t_application));
applications[0].num_kernels = 4; applications[0].kid[0] = GCEDD;
applications[0].kid[1] = SCEDD;
applications[0].kid[2] = NCEDD;
applications[0].kid[3] = HCEDD;
applications[1].num_kernels = 1; applications[1].kid[0] = bg_kid;
applications[2].num_kernels = 1; applications[2].kid[0] = bg_kid;
applications[3].num_kernels = 1; applications[3].kid[0] = bg_kid;
applications[4].num_kernels = 1; applications[4].kid[0] = bg_kid;
applications[5].num_kernels = 1; applications[5].kid[0] = bg_kid;
// First kernel of each application in sent to ready
t_Kernel *kid = (t_Kernel *) calloc(num_applications, sizeof(t_Kernel)); // List of ready kernels
for (int i=0; i<num_applications; i++)
kid[i] = applications[i].kid[0];
// k_done annotates kernel state
State *k_done = (State *)calloc(num_applications, sizeof(int));
for (int i=0; i< num_applications-1; i++) //App SCEDD is latency sensitive
k_done[i] = READY;
/** Create commom streams for all kernels: two for asynchronous transfers, one for preemption commands*/
cudaStream_t *transfers_s;
transfers_s = (cudaStream_t *)calloc(2, sizeof(cudaStream_t));
for (int i=0;i<2;i++){
err = cudaStreamCreate(&transfers_s[i]);
checkCudaErrors(err);
}
cudaStream_t preemp_s;
checkCudaErrors(cudaStreamCreateWithFlags(&preemp_s, cudaStreamNonBlocking));
/** Create stubs ***/
for (int i=0; i<num_applications; i++)
for (int j=0; j < applications[i].num_kernels; j++)
if (j == 0) // If first applicacion kernel
create_stubinfo(&applications[i].kstubs[j], deviceId, applications[i].kid[j], transfers_s, &preemp_s);
else
create_stubinfo_with_params(&applications[i].kstubs[j], deviceId, applications[i].kid[j], transfers_s, &preemp_s, applications[i].kstubs[0]->params);
// Make allocation and HtD transfer for applications
alloc_HtD_tranfers(applications, num_applications);
cudaDeviceSynchronize();
// Calculate sequential execution time (overlapping is still possible)
clock_gettime(CLOCK_REALTIME, &now);
double time1 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
t_ktime *ktime = (t_ktime *)calloc(Number_of_Kernels-1, sizeof(t_ktime));
nocke_all_applications(applications, num_applications, ktime);
clock_gettime(CLOCK_REALTIME, &now);
double time2 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
printf("Sequential execution time =%f sec\n", time2-time1);
double seq_time = ktime[GCEDD].end_time - ktime[GCEDD].start_time
+ ktime[SCEDD].end_time - ktime[SCEDD].start_time
+ ktime[NCEDD].end_time - ktime[NCEDD].start_time
+ ktime[HCEDD].end_time - ktime[HCEDD].start_time ;
// Create streams kernel info for coexecution
t_kstreams *kstr = (t_kstreams *)calloc(num_applications, sizeof(t_kstreams));
for (int i=0; i<num_applications; i++)
create_kstreams(applications[i].kstubs[0], &kstr[i]);
// Coxecution info
t_kcoexec coexec;
create_coexec(&coexec, 2);
// Launch proxy
launch_generic_proxy((void *)&sched); // Launch proxy
int kernel_idx; // Position of kernel in coexec struc
double speedup;
int task_index = 0; // Index of kernel
int task_index2 = 1;
double time1_rt, time2_rt;
clock_gettime(CLOCK_REALTIME, &now);
time1_rt = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double acc_numstreams_time=0; // to calculate time · num_streams
do {
// Coexecution configuration with th maximum number of streams for kernel 0
t_Kernel idk0 = kstr[task_index].kstub->id;
t_Kernel idk2 = kstr[task_index2].kstub->id;
int num_cf = smk_conc[idk0][idk2].num_configs;
int b0, b1; // Max num of streams must be assigned to ls kernel
if (smk_conc[idk0][idk2].pairs[0][0] > smk_conc[idk0][idk2].pairs[0][1]) {
b0 = smk_conc[idk0][idk2].pairs[0][0];
b1 = smk_conc[idk0][idk2].pairs[0][1];
}
else {
int num_cf = smk_conc[idk0][idk2].num_configs;
b0 = smk_conc[idk0][idk2].pairs[num_cf-1][0];
b1 = smk_conc[idk0][idk2].pairs[num_cf-1][1];
}
printf("kids=(%d,%d) streams=(%d,%d)\n", idk0, idk2, b0, b1);
/* printf("Max tmps=%f\n", smk_conc[idk0][idk2].tpms[0][0]);
for (int i=0;i<=7;i++)
printf("Solo num_str=%d tpms=%f\n", i+1, smk_solo[idk0].tpms[i]);
for (int i=0;i<7;i++)
printf("Coexec num_str=%d tpms=%f\n", i+1, smk_conc[idk0][idk2].tpms[i][0]);
*/
int pos0, pos1;
kernel_in_coexec(&coexec, &kstr[task_index], &pos0);
kernel_in_coexec(&coexec, &kstr[task_index2], &pos1);
if (pos0 == -1 && pos1 == -1) { // No kernels are executiing
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index], b0, task_index); // Add b0 streams
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index2], b1, task_index2); // Add b1 streams
}
else {
if (pos0 == -1){
if (coexec.kstr[1]->num_streams > b1){ // If non-ls kernel is running and have too much streams
int rem_streams = coexec.kstr[1]->num_streams - b1;
evict_streams(coexec.kstr[1], rem_streams); // Now, non-ls kernels has b1 streams
coexec.num_streams[1] -= rem_streams;
printf("Borrando %d(%d)streams de kernel %d\n", rem_streams, coexec.num_streams[1], coexec.kstr[1]->kstub->id);
}
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index], b0, task_index); // Add b0 streams
}
if (pos1 == -1){
if (coexec.kstr[0]->num_streams < b0) // If LS kernel is running and has too few streams
add_streams_to_kernel(&coexec, &sched, coexec.kstr[0], b0 - coexec.kstr[0]->num_streams); // Add more streams to LS kernel
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index2], b1, task_index2); // Add b1 streams
}
}
// Execute kernels (launching streams) in coexec structure
launch_coexec(&coexec);
double start_time;
if (pos0 == -1) {// When a new ls kernel is launched, get time
// Get current time
clock_gettime(CLOCK_REALTIME, &now);
start_time = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
}
// Calculate minimum tpms that ls kernel can obtain
double max_tpms = smk_solo[idk0].tpms[smk_solo[idk0].num_configs-1]; // Faster tpms
double min_tpms = max_slowdown * max_tpms;
// Wait for termination condition
int rc;
double sampling_interval = 0.0004;
int flag_rc2 = 0, stop_pingpong=0;
do {
ls_coexec_wait_for_kernel_termination_with_proxy(&sched, &coexec, min_tpms, start_time, sampling_interval, &acc_numstreams_time, &kernel_idx, &rc);
printf("rc=%d\n", rc);
if (rc == 1 && coexec.kstr[1] != NULL) { // LS kernel performace is low: steal a stream of non LS kernel if this kernel is still running
evict_streams(coexec.kstr[1], 1);
coexec.num_streams[1]--;
if (coexec.kstr[1]->num_streams == 0) {
rem_kernel_from_coexecution(&coexec, &sched, coexec.kstr[1]);
printf("Eliminando stream de no-ls\n");
}
add_streams_to_kernel(&coexec, &sched, coexec.kstr[0], 1);
launch_coexec(&coexec);
if (flag_rc2 == 1) stop_pingpong =1;
}
if (rc == 2 && coexec.kstr[0]->num_streams >= 2 && stop_pingpong == 0) { // LS kernel performace is high: give a stream to non LS kerne LS Kernel has, at least 2, streams
evict_streams(coexec.kstr[0], 1);
coexec.num_streams[0]--;
kernel_in_coexec(&coexec, &kstr[task_index2], &pos1);
if (pos1 != -1) // If non ls is running
add_streams_to_kernel(&coexec, &sched, coexec.kstr[1], 1); // add a new stream to non ls
else
add_kernel_for_coexecution(&coexec, &sched, &kstr[task_index2], 1, task_index2); // else add nonls to coexec with one stream
launch_coexec(&coexec);
flag_rc2=1;
}
sampling_interval = 0.0002;
}
while (rc == 1 || rc==2); // If rc==0, a kernel has finished
int kind = coexec.queue_index[kernel_idx]; // Save index in ready list of the finished kernel
// Remove finished kernel
rem_kernel_from_coexecution(&coexec, &sched, coexec.kstr[kernel_idx]);
printf("Eliminando kernel con idx = %d\n", kernel_idx);
if (kid[kind] == HCEDD)
break;
// If application has more kernels activate the next one
if (applications[kind].index + 1 < applications[kind].num_kernels) {
applications[kind].index++;
kid[kind] = applications[kind].kid[applications[kind].index]; // get ID of new kernel
k_done[kind] = READY; // Set ready
assing_kstreams(applications[kind].kstubs[applications[kind].index], &kstr[kind]);
}
else {
k_done[kind] = DONE; // Set ready
task_index2++; // move to nex non-ls kernel
if (task_index2 >= num_applications){
printf("Error: no hay suficiente sapplicaciones de non ls kernel. Termino de forma anticipada\n");
break;
}
}
} while (1);
clock_gettime(CLOCK_REALTIME, &now);
time2_rt = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double rt_exec_time = time2_rt - time1_rt;
printf("kid, Slowdownm Fullfil, weighted_streams_per_time, \n");
printf("%d, %f, %f, %f\n", kid[1], max_slowdown, (max_slowdown*seq_time-rt_exec_time)/(max_slowdown*seq_time), acc_numstreams_time/rt_exec_time);
//printf(",st=%f rt=%f Per=%f, num_streams=%f\n", seq_time, rt_exec_time, (max_slowdown*seq_time-rt_exec_time)/max_slowdown*seq_time, acc_numstreams_time/rt_exec_time);
// Evict proxy
sched.kernel_evict_zc[0] = PROXY_EVICT;
cudaDeviceSynchronize();
//printf("Concurrent excution time=%f sec.\n", time2_rt-time1_rt);
return 0;
}
|
bfc621a63b1e5e33c5fd9a38dfb1f8151f1c1f43.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include<stdio.h>
#include<vector>
#include<array>
#include<cassert>
#include<chrono>
#include<cmath>
#include "hip/hip_runtime_api.h"
#include "matrix.h"
#include "ReadWriteData.h"
#include "header.h"
#include "factorization.h"
#include "ILU_0.h"
namespace{
const int max_possible_grid_dim = 65536;
//---------------------------------------------------------------------------------------------------------------------------------------------------------
//APPROACH-1
__device__ void fill_partial_current_row_array(const int nrows, const int curr_row_index, double* const current_row_elements_arr, const int* const row_ptrs,
const int* const col_idxs, const double* const page_values, const int* const diag_ptrs)
{
const int diag_ele_loc = diag_ptrs[curr_row_index];
const int row_end_loc = row_ptrs[curr_row_index + 1];
for(int i = threadIdx.x + curr_row_index; i < nrows ; i += blockDim.x)
{
current_row_elements_arr[i] = 0;
}
__syncthreads();
for(int loc = threadIdx.x + diag_ele_loc ; loc < row_end_loc ; loc += blockDim.x)
{
current_row_elements_arr[ col_idxs[loc] ] = page_values[ loc ];
}
}
__device__ void modify_rows_below_curr_row(const int nrows, const int curr_row_index,const double* const column_elements_array_for_current_row, const int* const row_ptrs,
const int* const col_idxs, double* const page_values, const int* const diag_ptrs)
{
const int warp_id = threadIdx.x / WARP_SIZE;
const int id_within_warp = threadIdx.x % WARP_SIZE;
const int total_num_warps_in_block = blockDim.x / WARP_SIZE;
__shared__ double row_ele_arr[MAX_NUM_ROWS];
//initilaize it with zeroes
for(int i = threadIdx.x + curr_row_index + 1; i < nrows ; i += blockDim.x)
{
row_ele_arr[i] = 0;
}
__syncthreads();
//one warp per row
for(int row_below_index = warp_id + curr_row_index + 1; row_below_index < nrows ; row_below_index += total_num_warps_in_block )
{
for(int i = id_within_warp + row_ptrs[row_below_index] ; i < row_ptrs[row_below_index + 1]; i += WARP_SIZE)
{
const int col_index = col_idxs[i];
if(col_index == curr_row_index)
{
double diag_ele = page_values[diag_ptrs[curr_row_index]];
assert(diag_ele != 0);
double row_ele = page_values[i] / diag_ele;
row_ele_arr[row_below_index] = row_ele;
page_values[i] = row_ele;
}
__syncwarp(__activemask()); //else a warning
if(col_index > curr_row_index)
{
double col_ele = column_elements_array_for_current_row[col_index];
page_values[i] -= row_ele_arr[row_below_index] * col_ele;
}
}
}
}
__global__ void compute_ilu_0_approach1_kernel(const int npages, const int nrows, const int nnz, const int* const row_ptrs, const int* const col_idxs,
double* const values, const int* const diag_ptrs)
{
for(int page_id = blockIdx.x ; page_id < npages; page_id += gridDim.x)
{
//Tried out ---> Having stuff in shared memory slows down the kernel, so don't copy global arrays to shared memory.
__shared__ double current_row_elements_arr[MAX_NUM_ROWS];
// __shared__ int row_ptrs_sh[MAX_NUM_ROWS + 1];
// __shared__ int col_idxs_sh[MAX_NUM_NZ];
// __shared__ double page_vals_sh[MAX_NUM_NZ];
// __shared__ int diag_ptrs_sh[MAX_NUM_ROWS];
// for(int i = threadIdx.x; i < nrows + 1; i += blockDim.x)
// {
// row_ptrs_sh[i] = row_ptrs[i];
// }
// for(int i = threadIdx.x ; i < nnz ; i += blockDim.x)
// {
// col_idxs_sh[i] = col_idxs[i];
// page_vals_sh[i] = values[i + page_id * nnz ];
// }
// for(int i = threadIdx.x; i < nrows; i += blockDim.x)
// {
// diag_ptrs_sh[i] = diag_ptrs[i];
// }
// __syncthreads();
for(int curr_row_index = 0; curr_row_index < nrows; curr_row_index ++)
{
fill_partial_current_row_array(nrows, curr_row_index , current_row_elements_arr, row_ptrs, col_idxs , values + nnz * page_id, diag_ptrs);
//fill_partial_current_row_array(nrows, curr_row_index , current_row_elements_arr, row_ptrs_sh, col_idxs_sh , page_vals_sh, diag_ptrs_sh);
//If we plan to use shared memory for values, just send in page_vals_sh
__syncthreads();
modify_rows_below_curr_row(nrows, curr_row_index, current_row_elements_arr, row_ptrs, col_idxs, values + nnz * page_id, diag_ptrs);
//modify_rows_below_curr_row(nrows, curr_row_index, current_row_elements_arr, row_ptrs_sh, col_idxs_sh, page_vals_sh, diag_ptrs_sh);
__syncthreads();
}
// for(int i = threadIdx.x ; i < nnz; i += blockDim.x)
// {
// values[i + page_id * nnz ] = page_vals_sh[i];
// }
}
}
void ComputeILU0Approach1(PagedCSRMatrices & Factored_Pages , const int* const diag_ptrs)
{
dim3 block(THREADS_PER_BLOCK);
dim3 grid(Factored_Pages.GetNumPages());
hipLaunchKernelGGL(( compute_ilu_0_approach1_kernel), dim3(grid), dim3(block) , 0, 0, Factored_Pages.GetNumPages(), Factored_Pages.GetNumRows(), Factored_Pages.GetNumNz() , Factored_Pages.GetPtrToGpuRowPtrs(),
Factored_Pages.GetPtrToGpuColInd(), Factored_Pages.GetPtrToGpuValues(), diag_ptrs); // one thread block per small matrix in batch
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------//
//APPROACH-2
__global__ void single_matrix_case_legacy_kernel(const int nrows, const int nnz, const int* const row_ptrs, const int* const col_idxs, volatile double* const values,
const int* const diag_ptrs, volatile bool* const ready)
{
const int gid = blockDim.x * blockIdx.x + threadIdx.x;
const int loc = gid;
if(loc >= nnz)
return;
//Note: I am not using one warp per row as we need a unique thread for each element.
const int col_index = col_idxs[loc];
int row_index;
//find the row_index of this element
for(int a = 0; a < nrows + 1; a++) //uncoalesced memory accesses
{
if(row_ptrs[a] > loc)
{
row_index = a - 1;
break;
}
}
// for(int a = 0; a < nrows + 1; a++)
// {
// if(row_ptrs[a] >= loc + 1)
// {
// row = a- 1;
// }
// } //eqvt. to the code above
//F_i_j is the element of interest here, L_i_k,U_k_j pairs for k = 0 to min(i,j)-1 and diagonal U_j_j if i > j
double sum = 0;
double diag_val = 1;
bool finished = false;
const int k_max = min(row_index , col_index) - 1;
int maybe_l_loc = row_ptrs[row_index];
bool diag_flag = row_index > col_index ? false : true;
bool sum_flag = false;
int current_corr_u_loc;
bool current_corr_u_flag = false;
//all memory accesses are uncoalesced here
// int tmp_counter = 0;
while(!finished )
{
//tmp_counter++;
// printf("\n line: %d, thread: %d, counter: %d , diag_flag: %d , sum_flag: %d , maybe_l_loc : %d , row_ptrs[row_index + 1]: %d \n", __LINE__ , threadIdx.x, tmp_counter, diag_flag, sum_flag, maybe_l_loc, row_ptrs[row_index + 1]);
if(maybe_l_loc < row_ptrs[row_index + 1])
{
const int col = col_idxs[maybe_l_loc]; ////uncoalesced memory accesses when accessing col_idxs[]
if(col > k_max)
{
maybe_l_loc++;
// printf("\n line: %d, thread: %d, counter: %d , diag_flag: %d , sum_flag: %d , maybe_l_loc : %d , row_ptrs[row_index + 1]: %d \n", __LINE__ , threadIdx.x, tmp_counter, diag_flag, sum_flag, maybe_l_loc, row_ptrs[row_index + 1]);
continue;
}
if(current_corr_u_flag == true)
{
if(ready[maybe_l_loc] == true && ready[current_corr_u_loc] == true)
{
sum += values[maybe_l_loc] * values[current_corr_u_loc];
maybe_l_loc++;
current_corr_u_flag = false;
}
}
else
{
int maybe_u_loc = row_ptrs[col];
for(; maybe_u_loc < row_ptrs[col + 1]; maybe_u_loc++) //uncoalesced memory accesses when accessing col_idxs[]
{
if(col_idxs[maybe_u_loc] == col_index)
{
current_corr_u_flag = true;
current_corr_u_loc = maybe_u_loc;
if(ready[maybe_l_loc] == true && ready[current_corr_u_loc] == true)
{
sum += values[maybe_l_loc] * values[current_corr_u_loc];
maybe_l_loc++;
current_corr_u_flag = false;
}
break;
}
}
if(maybe_u_loc == row_ptrs[col + 1]) //that means no corr. u entry is there
{
maybe_l_loc++;
}
}
}
else
{
sum_flag = true;
}
if(diag_flag == false)
{
const int diag_loc = diag_ptrs[col_index];
if(ready[diag_loc] == true)
{
diag_val = values[diag_loc];
diag_flag = true;
}
}
// printf("\n line: %d, thread: %d, counter: %d , diag_flag: %d , sum_flag: %d , maybe_l_loc : %d , row_ptrs[row_index + 1]: %d \n", __LINE__ , threadIdx.x, tmp_counter, diag_flag, sum_flag, maybe_l_loc, row_ptrs[row_index + 1]);
if(diag_flag == true && sum_flag == true )
{
values[loc] = (values[loc] - sum)/diag_val;
__threadfence();
ready[loc] = true;
finished = true;
// printf("\n line: %d, thread: %d, counter: %d , now loc: %d is ready!\n", __LINE__ , threadIdx.x, tmp_counter, loc);
}
}
}
void ComputeILU0Approach2_SingleMatrix(PagedCSRMatrices & Factored_Pages , const int* const diag_ptrs)
{
dim3 block(THREADS_PER_BLOCK);
int grid_dim = ceil( (double)Factored_Pages.GetNumNz() /(double)THREADS_PER_BLOCK ) ;
dim3 grid( grid_dim );
const int nnz = Factored_Pages.GetNumNz();
bool* ready = nullptr;
hipMalloc((void**)&ready , nnz * sizeof(bool) );
hipMemset( ready , false, nnz * sizeof(bool) );
hipLaunchKernelGGL(( single_matrix_case_legacy_kernel), dim3(grid) , dim3(block) , 0, 0, Factored_Pages.GetNumRows() , Factored_Pages.GetNumNz(), Factored_Pages.GetPtrToGpuRowPtrs(),
Factored_Pages.GetPtrToGpuColInd(), Factored_Pages.GetPtrToGpuValues() , diag_ptrs, ready);
hipDeviceSynchronize();
hipFree(ready);
}
struct dependency{
int location;
bool is_diagonal;
struct dependency* next;
};
typedef struct dependency dependency;
__device__ void insert_dependency(dependency** graph_element , dependency* new_dependency)
{
dependency* address_currently_stored = *graph_element;
new_dependency->next = address_currently_stored;
*graph_element = new_dependency;
}
__global__ void create_dependency_graph_for_ilu0_computation(const int nrows, const int* const row_ptrs, const int* const col_idxs,
const int* const diag_ptrs, dependency ** graph)
{
//we use one warp per row
const int gid = blockDim.x * blockIdx.x + threadIdx.x;
const int warp_id = gid/WARP_SIZE;
const int id_within_warp = gid % WARP_SIZE;
const int total_num_warps = (gridDim.x * blockDim.x)/WARP_SIZE;
for(int row_index = warp_id ; row_index < nrows; row_index += total_num_warps)
{
const int row_start = row_ptrs[row_index];
const int row_end = row_ptrs[row_index + 1];
for(int loc = row_start + id_within_warp; loc < row_end ; loc += WARP_SIZE)
{
graph[loc] = nullptr;
const int col_index = col_idxs[loc];
const int k_max = min(row_index, col_index) - 1;
//the thread concerned for the particular element at: row_index, col_index does all this.--> but this is inefficent as that thread first searches for possible L_val, for each L_val,
//again searches for U_val. (Lot of uncoalesced accesses)
for(int maybe_l_loc = row_start ; maybe_l_loc < loc; maybe_l_loc++) //use loc instead of row_end as the matrix is sorted
{
const int k = col_idxs[maybe_l_loc]; //this should definitely be less than col_index, but we want to make sure it is less than or equal to k_max
if(k > k_max)
{
continue;
}
//find corresponfing U at position: k,col_index
for(int maybe_u_loc = row_ptrs[k]; maybe_u_loc < row_ptrs[k+1]; maybe_u_loc++)
{
if(col_idxs[maybe_u_loc] == col_index )
{
dependency* dep_node_l = (dependency*)malloc(sizeof(dependency));
assert(dep_node_l != nullptr);
dep_node_l->location = maybe_l_loc;
dep_node_l->is_diagonal = false;
dep_node_l->next = nullptr;
insert_dependency( &graph[loc] , dep_node_l );
dependency* dep_node_u = (dependency*)malloc(sizeof(dependency));
assert(dep_node_u != nullptr);
dep_node_u->location = maybe_u_loc;
dep_node_u->is_diagonal = false;
dep_node_u->next = nullptr;
insert_dependency( &graph[loc] , dep_node_u );
}
}
}
if(row_index > col_index)
{
const int diag_loc = diag_ptrs[col_index];
dependency* dep_node_diag = (dependency*)malloc(sizeof(dependency));
assert(dep_node_diag != nullptr);
dep_node_diag->location = diag_loc;
dep_node_diag->is_diagonal = true;
dep_node_diag->next = nullptr;
insert_dependency( &graph[loc] , dep_node_diag );
}
}
}
}
__global__ void deallocate_graph_mem(dependency** graph, const int nnz)
{
const int gid = threadIdx.x + blockIdx.x * blockDim.x;
for(int loc = gid; loc < nnz; loc += gridDim.x * blockDim.x)
{
dependency* address_of_next = graph[loc];
while(address_of_next != nullptr)
{
dependency* tmp = address_of_next->next;
free(address_of_next);
address_of_next = tmp;
}
}
}
__global__ void print_dependency_graph(dependency** graph, const int nnz)
{
if(threadIdx.x == 0)
{
for(int loc = 0; loc < nnz; loc++)
{
printf("\n\n\n Dependencies for element at location: %d are as follows: ", loc);
dependency* address = graph[loc];
while(address != nullptr)
{
printf("\n%d" , address->location);
address = address->next;
}
}
}
}
void PrintGraph(dependency** graph, const PagedCSRMatrices & Factored_Pages)
{
hipLaunchKernelGGL(( print_dependency_graph), dim3(1), dim3(1), 0, 0, graph, Factored_Pages.GetNumNz());
hipDeviceSynchronize();
}
__global__ void compute_ilu_0_approach2_legacy_kernel(const int npages, const int nrows, const int nnz, volatile double* const values, dependency** graph, volatile bool* const ready)
{
const int gid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = gridDim.x * blockDim.x;
if(num_threads < nnz)
{
printf("\n\n Sorry, we need atleast %d number of threads for this kernel to work\n\n", nnz); //This guarantees for nnz elements belonging to one small matrix, we have different threads(different gids) doing the compuatation
return;
}
for(int id = gid; id < nnz * npages ; id += gridDim.x * blockDim.x) //uncoalesced accesses when a thread accesses dependency locations
{
const int page_id = id/nnz;
const int loc = id % nnz;
dependency* address = graph[loc];
double diag_value = 1;
double u_val;
double l_val;
double sum = 0;
bool u_flag = false;
bool l_flag = false;
bool finished = false;
int diag_loc;
int u_loc;
int l_loc;
while(!finished)
{
if(address != nullptr && address->is_diagonal == true)
{
diag_loc = address->location;
if(ready[diag_loc + page_id * nnz] == true)
{
diag_value = values[diag_loc + page_id * nnz];
assert(diag_value != 0);
address = address->next;
}
}
if(address != nullptr && address->is_diagonal == false )
{
u_loc = address->location;
l_loc = (address->next)->location;
// if(ready[u_loc + page_id * nnz] == true && ready[l_loc + page_id * nnz] == true)
// {
// double u_val = values[u_loc + page_id * nnz];
// double l_val = values[l_loc + page_id * nnz];
// sum += u_val * l_val;
// address = (address->next)->next;
// }
if(u_flag == false && ready[u_loc + page_id * nnz] == true)
{
u_val = values[u_loc + page_id * nnz];
u_flag = true;
}
if(l_flag == false && ready[l_loc + page_id * nnz] == true)
{
l_val = values[l_loc + page_id * nnz];
l_flag = true;
}
if(u_flag == true && l_flag == true)
{
sum += u_val * l_val;
u_flag = false;
l_flag = false;
address = (address->next)->next;
}
}
if(address == nullptr)
{
values[loc + page_id * nnz] = (values[loc + page_id * nnz] - sum)/diag_value;
__threadfence();
ready[loc + page_id * nnz] = true;
finished = true;
//printf("\n Now loc: %d is ready", loc);
}
}
}
}
void ComputeILU0Approach2(PagedCSRMatrices & Factored_Pages , const int* const diag_ptrs)
{
dim3 block(THREADS_PER_BLOCK);
const int total_num_warps = Factored_Pages.GetNumRows();
const int num_warps_in_block = THREADS_PER_BLOCK/WARP_SIZE;
const int grid_size = ceil((double)total_num_warps / (double)num_warps_in_block);
dim3 grid( grid_size );
void* GRAPH;
hipMalloc( (void**)& GRAPH, sizeof(dependency*) * Factored_Pages.GetNumNz()); //array of pointers of type dependency
dependency** graph = (dependency**)GRAPH;
//Create a dependency graph for the ilu0 computation on the device memory. (The graph is stored using adjacency list datastrucure)
hipLaunchKernelGGL(( create_dependency_graph_for_ilu0_computation), dim3(grid), dim3(block) , 0, 0, Factored_Pages.GetNumRows(), Factored_Pages.GetPtrToGpuRowPtrs(), Factored_Pages.GetPtrToGpuColInd(),
diag_ptrs, graph);
// PrintGraph(graph, Factored_Pages);
int grid_dim = ceil( (double)Factored_Pages.GetNumNz() * (double)Factored_Pages.GetNumPages() /(double)THREADS_PER_BLOCK ) ;
if(grid_dim > max_possible_grid_dim)
{
// std::cout << "\n Using max possible grid dim at line:" << __LINE__ << "\n";
grid_dim = max_possible_grid_dim;
}
dim3 grid_1( grid_dim );
bool* ready = nullptr;
hipMalloc((void**)&ready , Factored_Pages.GetNumPages() * Factored_Pages.GetNumNz()* sizeof(bool) );
hipMemset( ready , false, Factored_Pages.GetNumPages() * Factored_Pages.GetNumNz()* sizeof(bool) );
hipLaunchKernelGGL(( compute_ilu_0_approach2_legacy_kernel) , dim3(grid_1) , dim3(block) , 0, 0, Factored_Pages.GetNumPages(), Factored_Pages.GetNumRows(), Factored_Pages.GetNumNz(),
Factored_Pages.GetPtrToGpuValues(), graph, ready);
dim3 grid_2(ceil( (double)Factored_Pages.GetNumNz()/(double)THREADS_PER_BLOCK ));
hipLaunchKernelGGL(( deallocate_graph_mem), dim3(grid_2), dim3(block) , 0, 0, graph, Factored_Pages.GetNumNz());
hipFree(GRAPH);
hipFree(ready);
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------//
//APPROACH-3
void create_dependency_graph(PagedCSRMatrices & Factored_Pages, std::vector<int> & dependencies , std::vector<int> & nz_ptrs, std::vector<int> & diag_ptrs)
{
const int nrows = Factored_Pages.GetNumRows();
const int* const row_ptrs = Factored_Pages.GetPtrToCpuRowPtrs();
const int* const col_idxs = Factored_Pages.GetPtrToCpuColInd();
nz_ptrs[0] = 0;
for(int row_index = 0; row_index < nrows ; row_index++ )
{
const int row_start = row_ptrs[row_index];
const int row_end = row_ptrs[row_index + 1];
for(int loc = row_start; loc < row_end; loc++)
{
int num_dependencies = 0;
const int col_index = col_idxs[loc];
if(row_index == col_index)
{
diag_ptrs[row_index] = loc;
}
const int k_max = ::min(row_index , col_index) - 1;
for(int maybe_l_loc = row_start; maybe_l_loc < loc; maybe_l_loc++) //use loc instead of row_end as the matrix is sorted
{
const int k = col_idxs[maybe_l_loc];
if(k > k_max)
{
continue;
}
//find corresponding u at position k,col_index
for(int maybe_u_loc = row_ptrs[k]; maybe_u_loc < row_ptrs[k + 1]; maybe_u_loc++)
{
if(col_idxs[maybe_u_loc] == col_index)
{
dependencies.push_back(maybe_l_loc);
dependencies.push_back(maybe_u_loc);
num_dependencies += 2;
}
}
}
if(row_index > col_index)
{
const int diag_loc = diag_ptrs[col_index]; //diag_ptrs[col_index] has correct value as it has been found when doing stuff for previous rows as col_index < row_index here
dependencies.push_back(diag_loc);
num_dependencies++;
}
nz_ptrs[loc + 1] = nz_ptrs[loc] + num_dependencies;
}
}
}
__global__ void compute_ilu_0_approach3_legacy_kernel(const int npages, const int nrows, const int nnz, volatile double* const values, const int dep_length,
const int* const dependencies, const int* const nz_ptrs, volatile bool* const ready)
{
const int gid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = gridDim.x * blockDim.x;
if(num_threads < nnz)
{
printf("\n\n Sorry, we need atleast %d number of threads for this kernel to work\n\n", nnz); //This guarantees for nnz elements belonging to one small matrix, we have different threads(different gids) doing the compuatation
return;
}
for(int id = gid; id < nnz * npages ; id += gridDim.x * blockDim.x) //uncoalesced accesses when a thread accesses dependencies array
{
const int page_id = id/nnz;
const int loc = id % nnz;
const int start = nz_ptrs[loc];
const int end = nz_ptrs[loc + 1] - 1;
const bool has_diag_dependency = (end + 1 - start)% 2 == 1 ? true : false;
int current = start;
double diag_value = 1;
double u_val;
double l_val;
double sum = 0;
bool u_flag = false;
bool l_flag = false;
bool finished = false;
while(!finished)
{
if( (has_diag_dependency == true && current <= end - 2) || (has_diag_dependency == false && current <= end - 1) )
{
const int l_loc = dependencies[current] + page_id * nnz;
const int u_loc = dependencies[current + 1] + page_id * nnz;
// if(ready[l_loc] == true && ready[u_loc] == true)
// {
// l_val = values[l_loc];
// u_val = values[u_loc];
// sum += l_val * u_val;
// current += 2;
// }
if(l_flag == false && ready[l_loc] == true)
{
l_val = values[l_loc];
l_flag = true;
}
if(u_flag == false && ready[u_loc] == true)
{
u_val = values[u_loc];
u_flag = true;
}
if(l_flag == true && u_flag == true)
{
sum += l_val * u_val;
current += 2;
l_flag = false;
u_flag = false;
}
}
if(has_diag_dependency == true && current == end )
{
const int diag_loc = dependencies[end] + page_id * nnz;
if(ready[diag_loc] == true)
{
diag_value = values[diag_loc];
assert(diag_value != 0);
current++;
}
}
if(current == end + 1)
{
values[loc + page_id * nnz] = (values[loc + page_id * nnz] - sum)/diag_value;
__threadfence();
ready[loc + page_id * nnz] = true;
finished = true;
//printf("\n Now loc: %d is ready", loc);
}
}
}
}
void Print_Dep_Graph(const std::vector<int> & dependencies_cpu , const std::vector<int> & nz_ptrs_cpu)
{
for(int loc = 0; loc < nz_ptrs_cpu.size() - 1 ; loc++)
{
const int start = nz_ptrs_cpu[loc];
const int end = nz_ptrs_cpu[loc + 1];
printf("\n\n Dependencies for element at loc = %d are: ", loc);
for(int i = start; i < end; i++)
{
printf("\n %d ", dependencies_cpu[i]);
}
}
}
void ComputeILU0Approach3(PagedCSRMatrices & Factored_Pages , int* const diag_ptrs)
{
//Here the representation of dependency graph is a bit different
std::vector<int> dependencies_cpu;
std::vector<int > nz_ptrs_cpu(Factored_Pages.GetNumNz() + 1);
std::vector<int > diag_ptrs_cpu( Factored_Pages.GetNumRows());
Factored_Pages.AllocateMemory(LOCATION::CPU);
Factored_Pages.CopyFromGpuToCpu();
create_dependency_graph(Factored_Pages, dependencies_cpu, nz_ptrs_cpu, diag_ptrs_cpu);
int* dependencies = nullptr;
int* nz_ptrs = nullptr;
hipMalloc((void**)&dependencies , dependencies_cpu.size() * sizeof(int));
hipMemcpy(dependencies , dependencies_cpu.data() , dependencies_cpu.size() * sizeof(int) , hipMemcpyHostToDevice );
hipMalloc((void**)&nz_ptrs , nz_ptrs_cpu.size() * sizeof(int) );
hipMemcpy( nz_ptrs , nz_ptrs_cpu.data() , nz_ptrs_cpu.size() * sizeof(int) , hipMemcpyHostToDevice );
hipMemcpy( diag_ptrs , diag_ptrs_cpu.data() , diag_ptrs_cpu.size() * sizeof(int) , hipMemcpyHostToDevice);
//Print_Dep_Graph(dependencies_cpu , nz_ptrs_cpu);
dim3 block(THREADS_PER_BLOCK);
int grid_dim = ceil( (double)Factored_Pages.GetNumNz() * (double)Factored_Pages.GetNumPages() /(double)THREADS_PER_BLOCK ) ;
if(grid_dim > max_possible_grid_dim)
{
//std::cout << "\n Using max possible grid dim at line:" << __LINE__ << "\n";
grid_dim = max_possible_grid_dim;
}
dim3 grid( grid_dim );
bool* ready = nullptr;
hipMalloc((void**)&ready , Factored_Pages.GetNumPages() * Factored_Pages.GetNumNz()* sizeof(bool) );
hipMemset( ready , false, Factored_Pages.GetNumPages() * Factored_Pages.GetNumNz()* sizeof(bool) );
hipLaunchKernelGGL(( compute_ilu_0_approach3_legacy_kernel) , dim3(grid) , dim3(block) , 0, 0, Factored_Pages.GetNumPages(), Factored_Pages.GetNumRows(), Factored_Pages.GetNumNz(),
Factored_Pages.GetPtrToGpuValues(), dependencies_cpu.size() ,dependencies, nz_ptrs , ready);
hipFree(dependencies);
hipFree(nz_ptrs);
hipFree(ready);
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------//
//APPROACH-4
void create_dependency_list(PagedCSRMatrices & Factored_Pages , std::vector<int> & dependencies, std::vector<int> & diag_starters, std::vector<int> & new_era, std::vector<int> & diag_ptrs)
{
const int nrows = Factored_Pages.GetNumRows();
const int* const row_ptrs = Factored_Pages.GetPtrToCpuRowPtrs();
const int* const col_idxs = Factored_Pages.GetPtrToCpuColInd();
diag_ptrs = std::vector<int>(nrows);
int d_start_ptr_to_dependencies_arr = 0;
int era_start_ptr_to_diag_starters = 0;
new_era.push_back(0);
for(int current_row = 0; current_row < nrows ; current_row++)
{
int temp_arr_curr_row[MAX_NUM_ROWS];
for(int i = 0; i < nrows; i++)
{
temp_arr_curr_row[i] = -1;
}
for(int j = row_ptrs[current_row]; j < row_ptrs[current_row + 1]; j++)
{
if(col_idxs[j] == current_row)
{
diag_ptrs[current_row] = j;
}
temp_arr_curr_row[col_idxs[j]] = j;
}
for(int row_below = current_row + 1; row_below < nrows; row_below++)
{
const int start = row_ptrs[row_below];
const int end = row_ptrs[row_below + 1];
int loc_row_ele = -1;
for(int loc = start; loc < end; loc++)
{
int col = col_idxs[loc];
if(col < current_row)
{
continue;
}
else if(col == current_row)
{
//Now there's only one dependency here--> that is divison by the diag element in the current row
//find loc_diag_ele
const int loc_diag_ele = temp_arr_curr_row[current_row];
dependencies.push_back(loc);
dependencies.push_back(loc_diag_ele);
diag_starters.push_back(d_start_ptr_to_dependencies_arr);
d_start_ptr_to_dependencies_arr += 2;
era_start_ptr_to_diag_starters++;
loc_row_ele = loc;
}
else
{
if(loc_row_ele == -1) //So if that row_ele is missing, then the whole row is not modified
{
break;
}
else
{
// find loc_col_ele
const int loc_col_ele = temp_arr_curr_row[col];
if(loc_col_ele == -1)
{
continue;
}
dependencies.push_back(loc);
dependencies.push_back(loc_col_ele);
d_start_ptr_to_dependencies_arr += 2;
}
}
}
}
if(new_era[new_era.size() - 1] < era_start_ptr_to_diag_starters) //If both are equal, then that means there were no dependencies at all for the current row.
{
new_era.push_back(era_start_ptr_to_diag_starters);
}
}
diag_starters.push_back(d_start_ptr_to_dependencies_arr);
}
void Print_Dep_List(std::vector<int> & dependencies_cpu, std::vector<int> & diag_starters_cpu, std::vector<int> & new_era_cpu)
{
std::cout << "\n\n dependencies: " << std::endl;
for(int i = 0; i < dependencies_cpu.size(); i++)
{
std::cout << dependencies_cpu[i] << " ";
}
std::cout << "\n\n diag starters(ptrs to dependencies array): " << std::endl;
for(int i = 0; i < diag_starters_cpu.size(); i++)
{
std::cout << diag_starters_cpu[i] << " ";
}
std::cout << "\n\n new era(ptrs to diag starters array): " << std::endl;
for(int i = 0; i < new_era_cpu.size(); i++)
{
std::cout << new_era_cpu[i] << " ";
}
}
__device__ void modify_elements_in_an_era(double* const values, const int* const dependencies, const int* const diag_starters,
const int* const era_array ,const int era_idx )
{
int start_idx_in_diag_starters = era_array[era_idx];
int end_idx_in_diag_starters = era_array[era_idx + 1];
const int warp_id = threadIdx.x / WARP_SIZE;
const int id_within_warp = threadIdx.x % WARP_SIZE;
const int total_num_warps_in_block = blockDim.x / WARP_SIZE;
for(int idx_in_diag_starters = start_idx_in_diag_starters + warp_id; idx_in_diag_starters < end_idx_in_diag_starters ; idx_in_diag_starters += total_num_warps_in_block)
{
int start_idx_in_dep_arr = diag_starters[idx_in_diag_starters];
int end_idx_in_dep_arr = diag_starters[idx_in_diag_starters + 1];
double row_ele;
for(int i = start_idx_in_dep_arr + id_within_warp; i < end_idx_in_dep_arr; i += WARP_SIZE)
{
const int loc = dependencies[i];
auto mask = __activemask();
int loc_1 = __shfl_sync(mask, loc, id_within_warp + 1); //For id_within_warp = 31, the result is undefined! But that is not used, so no worries!
if(i == start_idx_in_dep_arr)
{
double diag_val = values[loc_1];
assert(diag_val != 0);
values[loc ] /= diag_val;
row_ele = values[loc ];
}
__syncwarp(mask);
row_ele = __shfl_sync( mask , row_ele, 0);
if(i > start_idx_in_dep_arr + 1 && id_within_warp % 2 == 0 && id_within_warp <= 30)
{
//loc_m1 = dependencies[i + 1];
values[loc] -= values[loc_1] * row_ele;
}
}
// if(id_within_warp == 0)
// {
// double diag_val = values[dependencies[start_idx_in_dep_arr + 1] ];
// assert(diag_val != 0);
// values[dependencies[start_idx_in_dep_arr] ] /= diag_val;
// row_ele = values[dependencies[start_idx_in_dep_arr]];
// for(int i = start_idx_in_dep_arr + 2 ; i < end_idx_in_dep_arr - 1; i += 2)
// {
// values[dependencies[i] ] -= values[dependencies[i + 1]] * row_ele;
// }
// }
}
}
__global__ void compute_ilu_0_approach4_kernel(const int npages, const int nnz, double* const values, const int* const dependencies, const int* const diag_starters,
const int* const era_array , const int era_arr_length , const int dep_length)
{
const int page_id = blockIdx.x;
if(page_id >= npages)
return;
// extern __shared__ double array[];
// double* vals_sh = (double*)array;
// for(int i = threadIdx.x; i < nnz ; i += blockDim.x)
// {
// vals_sh[i] = values[i + page_id * nnz];
// }
// __syncthreads();
//dependenices array is too large to fit in shared memory
//storing values in shared memory makes it slower
const int num_eras = era_arr_length - 1;
for(int era_idx = 0; era_idx < num_eras ; era_idx ++)
{
modify_elements_in_an_era(values + page_id * nnz, dependencies, diag_starters, era_array, era_idx);
//modify_elements_in_an_era(vals_sh, dependencies, diag_starters, era_array, era_idx);
__syncthreads();
}
// for(int i = threadIdx.x; i < nnz ; i += blockDim.x)
// {
// values[i + page_id * nnz] = vals_sh[i];
// }
}
void ComputeILU0Approach4(PagedCSRMatrices & Factored_Pages, int* const diag_ptrs )
{
std::vector<int> dependencies_cpu;
std::vector<int> diag_starters_cpu;
std::vector<int> new_era_cpu;
std::vector<int> diag_ptrs_cpu;
Factored_Pages.CopyFromGpuToCpu();
create_dependency_list(Factored_Pages, dependencies_cpu, diag_starters_cpu, new_era_cpu, diag_ptrs_cpu );
int* dependencies = nullptr;
int* diag_starters = nullptr;
int* new_era = nullptr;
hipMalloc( (void**)&dependencies , dependencies_cpu.size() * sizeof(int) );
hipMemcpy( dependencies , dependencies_cpu.data(), dependencies_cpu.size() * sizeof(int) , hipMemcpyHostToDevice );
hipMalloc( (void**)&diag_starters, diag_starters_cpu.size() * sizeof(int) );
hipMemcpy( diag_starters , diag_starters_cpu.data(), diag_starters_cpu.size() * sizeof(int) , hipMemcpyHostToDevice );
hipMalloc( (void**)&new_era, new_era_cpu.size() * sizeof(int) );
hipMemcpy( new_era, new_era_cpu.data() , new_era_cpu.size() * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( diag_ptrs , diag_ptrs_cpu.data(), diag_ptrs_cpu.size() * sizeof(int), hipMemcpyHostToDevice);
// Print_Dep_List(dependencies_cpu, diag_starters_cpu, new_era_cpu);
dim3 block(THREADS_PER_BLOCK);
dim3 grid(Factored_Pages.GetNumPages());
// const int dynamic_shared_mem_size = Factored_Pages.GetNumNz() * sizeof(double);
// const int dynamic_shared_mem_size = dependencies_cpu.size() * sizeof(int);
const int dynamic_shared_mem_size = 0;
hipLaunchKernelGGL(( compute_ilu_0_approach4_kernel), dim3(grid), dim3(block), dynamic_shared_mem_size , 0, Factored_Pages.GetNumPages(), Factored_Pages.GetNumNz(), Factored_Pages.GetPtrToGpuValues(),
dependencies, diag_starters, new_era, new_era_cpu.size(), dependencies_cpu.size()); // one thread block per small matrix in batch
hipFree(dependencies);
hipFree(diag_starters);
hipFree(new_era);
}
//-------------------------------------------------------------------------------------------------------------------------------------------------------------------//
} //unnamed namespace
//-------------------------------- calling function for all small pieces ----------------------------------------------------------------------------------------
void ILU_0_Factorization_Gpu(const PagedCSRMatrices & A_pages , PagedCSRMatrices & L_pages, PagedCSRMatrices & U_pages, const int approach_num)
{
//hipProfilerStart();
// std::cout << "\n\nORIGINAL MATRIX: " << std::endl;
// PrintPagedCSRMatrix(A_pages);
//first assert matrix is square
assert(A_pages.GetNumCols() == A_pages.GetNumRows());
PagedCSRMatrices Factored_Pages;
//We would want to use copy assignment here... or even a copy constructor. implement it later...
//copy A to F
Copy_Gpu_PagedCSRMatrices(A_pages , Factored_Pages);
//SortCSRMatrix(Factored_Pages); if unsorted, pls sort the paged matrix befoe proceeding. (All these matrices are already sorted.(sorted while storing))
int* diag_info = nullptr;
hipMalloc((void**)&diag_info, sizeof(int) * Factored_Pages.GetNumRows());
int num_missing_diagonal_eles = Count_Missing_Diagonal_Elements(Factored_Pages , diag_info);
if(num_missing_diagonal_eles > 0)
{
PagedCSRMatrices New_Factored_Pages;
Add_Missing_Diagonal_Elements(New_Factored_Pages, Factored_Pages, diag_info , num_missing_diagonal_eles);
Copy_Gpu_PagedCSRMatrices(New_Factored_Pages , Factored_Pages); //TODO: avoid an extra copy here
}
// std::cout << "\n\nMATRIX AFTER ADDITION OF DIAGONAL ELEMENTS: " << std::endl;
// PrintPagedCSRMatrix(Factored_Pages);
//continue to use Factored_pages here...
hipProfilerStart();
if(approach_num == 1)
{
Find_locations_of_diagonal_elements(Factored_Pages, diag_info);
//std::cout << "\n\nLocn of diagonal elements:" << std::endl;
//print_kernel<<< 1, 1 >>>(Factored_Pages.GetNumRows(), diag_info);
//hipDeviceSynchronize();
ComputeILU0Approach1(Factored_Pages , diag_info);
}
else if(approach_num == 2)
{
Find_locations_of_diagonal_elements(Factored_Pages, diag_info);
//std::cout << "\n\nLocn of diagonal elements:" << std::endl;
//print_kernel<<< 1, 1 >>>(Factored_Pages.GetNumRows(), diag_info);
//hipDeviceSynchronize();
ComputeILU0Approach2(Factored_Pages , diag_info);
//ComputeILU0Approach2_SingleMatrix(Factored_Pages, diag_info);
}
else if(approach_num == 3)
{
ComputeILU0Approach3(Factored_Pages , diag_info);
// std::cout << "\n\nLocn of diagonal elements:" << std::endl;
// print_kernel<<< 1, 1 >>>(Factored_Pages.GetNumRows(), diag_info);
}
else if(approach_num == 4)
{
ComputeILU0Approach4(Factored_Pages , diag_info);
// std::cout << "\n\nLocn of diagonal elements:" << std::endl;
// print_kernel<<< 1, 1 >>>(Factored_Pages.GetNumRows(), diag_info);
}
else
{
printf("\n NOT IMPLEMENTED\n");
}
hipProfilerStop();
// std::cout << "\n\nFACTORIZED MATRIX(ILU(0)): " << std::endl;
// PrintPagedCSRMatrix(Factored_Pages);
Update_row_pointers_L_and_U_and_Allocate_Memory(Factored_Pages , diag_info, L_pages, U_pages);
Fill_L_and_U_col_idxs_and_vals(Factored_Pages, L_pages, U_pages);
// std::cout << "\n\nMATRIX L: " << std::endl;
// PrintPagedCSRMatrix(L_pages);
// std::cout << "\n\nMATRIX U: " << std::endl;
// PrintPagedCSRMatrix(U_pages);
hipFree(diag_info);
hipDeviceSynchronize(); //for timing purpose
//hipProfilerStop();
}
//TODO:
//Parallelize Prefix Sum
| bfc621a63b1e5e33c5fd9a38dfb1f8151f1c1f43.cu | #include<iostream>
#include<stdio.h>
#include<vector>
#include<array>
#include<cassert>
#include<chrono>
#include<cmath>
#include "cuda_profiler_api.h"
#include "matrix.h"
#include "ReadWriteData.h"
#include "header.h"
#include "factorization.h"
#include "ILU_0.h"
namespace{
const int max_possible_grid_dim = 65536;
//---------------------------------------------------------------------------------------------------------------------------------------------------------
//APPROACH-1
__device__ void fill_partial_current_row_array(const int nrows, const int curr_row_index, double* const current_row_elements_arr, const int* const row_ptrs,
const int* const col_idxs, const double* const page_values, const int* const diag_ptrs)
{
const int diag_ele_loc = diag_ptrs[curr_row_index];
const int row_end_loc = row_ptrs[curr_row_index + 1];
for(int i = threadIdx.x + curr_row_index; i < nrows ; i += blockDim.x)
{
current_row_elements_arr[i] = 0;
}
__syncthreads();
for(int loc = threadIdx.x + diag_ele_loc ; loc < row_end_loc ; loc += blockDim.x)
{
current_row_elements_arr[ col_idxs[loc] ] = page_values[ loc ];
}
}
__device__ void modify_rows_below_curr_row(const int nrows, const int curr_row_index,const double* const column_elements_array_for_current_row, const int* const row_ptrs,
const int* const col_idxs, double* const page_values, const int* const diag_ptrs)
{
const int warp_id = threadIdx.x / WARP_SIZE;
const int id_within_warp = threadIdx.x % WARP_SIZE;
const int total_num_warps_in_block = blockDim.x / WARP_SIZE;
__shared__ double row_ele_arr[MAX_NUM_ROWS];
//initilaize it with zeroes
for(int i = threadIdx.x + curr_row_index + 1; i < nrows ; i += blockDim.x)
{
row_ele_arr[i] = 0;
}
__syncthreads();
//one warp per row
for(int row_below_index = warp_id + curr_row_index + 1; row_below_index < nrows ; row_below_index += total_num_warps_in_block )
{
for(int i = id_within_warp + row_ptrs[row_below_index] ; i < row_ptrs[row_below_index + 1]; i += WARP_SIZE)
{
const int col_index = col_idxs[i];
if(col_index == curr_row_index)
{
double diag_ele = page_values[diag_ptrs[curr_row_index]];
assert(diag_ele != 0);
double row_ele = page_values[i] / diag_ele;
row_ele_arr[row_below_index] = row_ele;
page_values[i] = row_ele;
}
__syncwarp(__activemask()); //else a warning
if(col_index > curr_row_index)
{
double col_ele = column_elements_array_for_current_row[col_index];
page_values[i] -= row_ele_arr[row_below_index] * col_ele;
}
}
}
}
__global__ void compute_ilu_0_approach1_kernel(const int npages, const int nrows, const int nnz, const int* const row_ptrs, const int* const col_idxs,
double* const values, const int* const diag_ptrs)
{
for(int page_id = blockIdx.x ; page_id < npages; page_id += gridDim.x)
{
//Tried out ---> Having stuff in shared memory slows down the kernel, so don't copy global arrays to shared memory.
__shared__ double current_row_elements_arr[MAX_NUM_ROWS];
// __shared__ int row_ptrs_sh[MAX_NUM_ROWS + 1];
// __shared__ int col_idxs_sh[MAX_NUM_NZ];
// __shared__ double page_vals_sh[MAX_NUM_NZ];
// __shared__ int diag_ptrs_sh[MAX_NUM_ROWS];
// for(int i = threadIdx.x; i < nrows + 1; i += blockDim.x)
// {
// row_ptrs_sh[i] = row_ptrs[i];
// }
// for(int i = threadIdx.x ; i < nnz ; i += blockDim.x)
// {
// col_idxs_sh[i] = col_idxs[i];
// page_vals_sh[i] = values[i + page_id * nnz ];
// }
// for(int i = threadIdx.x; i < nrows; i += blockDim.x)
// {
// diag_ptrs_sh[i] = diag_ptrs[i];
// }
// __syncthreads();
for(int curr_row_index = 0; curr_row_index < nrows; curr_row_index ++)
{
fill_partial_current_row_array(nrows, curr_row_index , current_row_elements_arr, row_ptrs, col_idxs , values + nnz * page_id, diag_ptrs);
//fill_partial_current_row_array(nrows, curr_row_index , current_row_elements_arr, row_ptrs_sh, col_idxs_sh , page_vals_sh, diag_ptrs_sh);
//If we plan to use shared memory for values, just send in page_vals_sh
__syncthreads();
modify_rows_below_curr_row(nrows, curr_row_index, current_row_elements_arr, row_ptrs, col_idxs, values + nnz * page_id, diag_ptrs);
//modify_rows_below_curr_row(nrows, curr_row_index, current_row_elements_arr, row_ptrs_sh, col_idxs_sh, page_vals_sh, diag_ptrs_sh);
__syncthreads();
}
// for(int i = threadIdx.x ; i < nnz; i += blockDim.x)
// {
// values[i + page_id * nnz ] = page_vals_sh[i];
// }
}
}
void ComputeILU0Approach1(PagedCSRMatrices & Factored_Pages , const int* const diag_ptrs)
{
dim3 block(THREADS_PER_BLOCK);
dim3 grid(Factored_Pages.GetNumPages());
compute_ilu_0_approach1_kernel<<< grid, block >>>(Factored_Pages.GetNumPages(), Factored_Pages.GetNumRows(), Factored_Pages.GetNumNz() , Factored_Pages.GetPtrToGpuRowPtrs(),
Factored_Pages.GetPtrToGpuColInd(), Factored_Pages.GetPtrToGpuValues(), diag_ptrs); // one thread block per small matrix in batch
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------//
//APPROACH-2
__global__ void single_matrix_case_legacy_kernel(const int nrows, const int nnz, const int* const row_ptrs, const int* const col_idxs, volatile double* const values,
const int* const diag_ptrs, volatile bool* const ready)
{
const int gid = blockDim.x * blockIdx.x + threadIdx.x;
const int loc = gid;
if(loc >= nnz)
return;
//Note: I am not using one warp per row as we need a unique thread for each element.
const int col_index = col_idxs[loc];
int row_index;
//find the row_index of this element
for(int a = 0; a < nrows + 1; a++) //uncoalesced memory accesses
{
if(row_ptrs[a] > loc)
{
row_index = a - 1;
break;
}
}
// for(int a = 0; a < nrows + 1; a++)
// {
// if(row_ptrs[a] >= loc + 1)
// {
// row = a- 1;
// }
// } //eqvt. to the code above
//F_i_j is the element of interest here, L_i_k,U_k_j pairs for k = 0 to min(i,j)-1 and diagonal U_j_j if i > j
double sum = 0;
double diag_val = 1;
bool finished = false;
const int k_max = min(row_index , col_index) - 1;
int maybe_l_loc = row_ptrs[row_index];
bool diag_flag = row_index > col_index ? false : true;
bool sum_flag = false;
int current_corr_u_loc;
bool current_corr_u_flag = false;
//all memory accesses are uncoalesced here
// int tmp_counter = 0;
while(!finished )
{
//tmp_counter++;
// printf("\n line: %d, thread: %d, counter: %d , diag_flag: %d , sum_flag: %d , maybe_l_loc : %d , row_ptrs[row_index + 1]: %d \n", __LINE__ , threadIdx.x, tmp_counter, diag_flag, sum_flag, maybe_l_loc, row_ptrs[row_index + 1]);
if(maybe_l_loc < row_ptrs[row_index + 1])
{
const int col = col_idxs[maybe_l_loc]; ////uncoalesced memory accesses when accessing col_idxs[]
if(col > k_max)
{
maybe_l_loc++;
// printf("\n line: %d, thread: %d, counter: %d , diag_flag: %d , sum_flag: %d , maybe_l_loc : %d , row_ptrs[row_index + 1]: %d \n", __LINE__ , threadIdx.x, tmp_counter, diag_flag, sum_flag, maybe_l_loc, row_ptrs[row_index + 1]);
continue;
}
if(current_corr_u_flag == true)
{
if(ready[maybe_l_loc] == true && ready[current_corr_u_loc] == true)
{
sum += values[maybe_l_loc] * values[current_corr_u_loc];
maybe_l_loc++;
current_corr_u_flag = false;
}
}
else
{
int maybe_u_loc = row_ptrs[col];
for(; maybe_u_loc < row_ptrs[col + 1]; maybe_u_loc++) //uncoalesced memory accesses when accessing col_idxs[]
{
if(col_idxs[maybe_u_loc] == col_index)
{
current_corr_u_flag = true;
current_corr_u_loc = maybe_u_loc;
if(ready[maybe_l_loc] == true && ready[current_corr_u_loc] == true)
{
sum += values[maybe_l_loc] * values[current_corr_u_loc];
maybe_l_loc++;
current_corr_u_flag = false;
}
break;
}
}
if(maybe_u_loc == row_ptrs[col + 1]) //that means no corr. u entry is there
{
maybe_l_loc++;
}
}
}
else
{
sum_flag = true;
}
if(diag_flag == false)
{
const int diag_loc = diag_ptrs[col_index];
if(ready[diag_loc] == true)
{
diag_val = values[diag_loc];
diag_flag = true;
}
}
// printf("\n line: %d, thread: %d, counter: %d , diag_flag: %d , sum_flag: %d , maybe_l_loc : %d , row_ptrs[row_index + 1]: %d \n", __LINE__ , threadIdx.x, tmp_counter, diag_flag, sum_flag, maybe_l_loc, row_ptrs[row_index + 1]);
if(diag_flag == true && sum_flag == true )
{
values[loc] = (values[loc] - sum)/diag_val;
__threadfence();
ready[loc] = true;
finished = true;
// printf("\n line: %d, thread: %d, counter: %d , now loc: %d is ready!\n", __LINE__ , threadIdx.x, tmp_counter, loc);
}
}
}
void ComputeILU0Approach2_SingleMatrix(PagedCSRMatrices & Factored_Pages , const int* const diag_ptrs)
{
dim3 block(THREADS_PER_BLOCK);
int grid_dim = ceil( (double)Factored_Pages.GetNumNz() /(double)THREADS_PER_BLOCK ) ;
dim3 grid( grid_dim );
const int nnz = Factored_Pages.GetNumNz();
bool* ready = nullptr;
cudaMalloc((void**)&ready , nnz * sizeof(bool) );
cudaMemset( ready , false, nnz * sizeof(bool) );
single_matrix_case_legacy_kernel<<< grid , block >>>(Factored_Pages.GetNumRows() , Factored_Pages.GetNumNz(), Factored_Pages.GetPtrToGpuRowPtrs(),
Factored_Pages.GetPtrToGpuColInd(), Factored_Pages.GetPtrToGpuValues() , diag_ptrs, ready);
cudaDeviceSynchronize();
cudaFree(ready);
}
struct dependency{
int location;
bool is_diagonal;
struct dependency* next;
};
typedef struct dependency dependency;
__device__ void insert_dependency(dependency** graph_element , dependency* new_dependency)
{
dependency* address_currently_stored = *graph_element;
new_dependency->next = address_currently_stored;
*graph_element = new_dependency;
}
__global__ void create_dependency_graph_for_ilu0_computation(const int nrows, const int* const row_ptrs, const int* const col_idxs,
const int* const diag_ptrs, dependency ** graph)
{
//we use one warp per row
const int gid = blockDim.x * blockIdx.x + threadIdx.x;
const int warp_id = gid/WARP_SIZE;
const int id_within_warp = gid % WARP_SIZE;
const int total_num_warps = (gridDim.x * blockDim.x)/WARP_SIZE;
for(int row_index = warp_id ; row_index < nrows; row_index += total_num_warps)
{
const int row_start = row_ptrs[row_index];
const int row_end = row_ptrs[row_index + 1];
for(int loc = row_start + id_within_warp; loc < row_end ; loc += WARP_SIZE)
{
graph[loc] = nullptr;
const int col_index = col_idxs[loc];
const int k_max = min(row_index, col_index) - 1;
//the thread concerned for the particular element at: row_index, col_index does all this.--> but this is inefficent as that thread first searches for possible L_val, for each L_val,
//again searches for U_val. (Lot of uncoalesced accesses)
for(int maybe_l_loc = row_start ; maybe_l_loc < loc; maybe_l_loc++) //use loc instead of row_end as the matrix is sorted
{
const int k = col_idxs[maybe_l_loc]; //this should definitely be less than col_index, but we want to make sure it is less than or equal to k_max
if(k > k_max)
{
continue;
}
//find corresponfing U at position: k,col_index
for(int maybe_u_loc = row_ptrs[k]; maybe_u_loc < row_ptrs[k+1]; maybe_u_loc++)
{
if(col_idxs[maybe_u_loc] == col_index )
{
dependency* dep_node_l = (dependency*)malloc(sizeof(dependency));
assert(dep_node_l != nullptr);
dep_node_l->location = maybe_l_loc;
dep_node_l->is_diagonal = false;
dep_node_l->next = nullptr;
insert_dependency( &graph[loc] , dep_node_l );
dependency* dep_node_u = (dependency*)malloc(sizeof(dependency));
assert(dep_node_u != nullptr);
dep_node_u->location = maybe_u_loc;
dep_node_u->is_diagonal = false;
dep_node_u->next = nullptr;
insert_dependency( &graph[loc] , dep_node_u );
}
}
}
if(row_index > col_index)
{
const int diag_loc = diag_ptrs[col_index];
dependency* dep_node_diag = (dependency*)malloc(sizeof(dependency));
assert(dep_node_diag != nullptr);
dep_node_diag->location = diag_loc;
dep_node_diag->is_diagonal = true;
dep_node_diag->next = nullptr;
insert_dependency( &graph[loc] , dep_node_diag );
}
}
}
}
__global__ void deallocate_graph_mem(dependency** graph, const int nnz)
{
const int gid = threadIdx.x + blockIdx.x * blockDim.x;
for(int loc = gid; loc < nnz; loc += gridDim.x * blockDim.x)
{
dependency* address_of_next = graph[loc];
while(address_of_next != nullptr)
{
dependency* tmp = address_of_next->next;
free(address_of_next);
address_of_next = tmp;
}
}
}
__global__ void print_dependency_graph(dependency** graph, const int nnz)
{
if(threadIdx.x == 0)
{
for(int loc = 0; loc < nnz; loc++)
{
printf("\n\n\n Dependencies for element at location: %d are as follows: ", loc);
dependency* address = graph[loc];
while(address != nullptr)
{
printf("\n%d" , address->location);
address = address->next;
}
}
}
}
void PrintGraph(dependency** graph, const PagedCSRMatrices & Factored_Pages)
{
print_dependency_graph<<< 1, 1>>>(graph, Factored_Pages.GetNumNz());
cudaDeviceSynchronize();
}
__global__ void compute_ilu_0_approach2_legacy_kernel(const int npages, const int nrows, const int nnz, volatile double* const values, dependency** graph, volatile bool* const ready)
{
const int gid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = gridDim.x * blockDim.x;
if(num_threads < nnz)
{
printf("\n\n Sorry, we need atleast %d number of threads for this kernel to work\n\n", nnz); //This guarantees for nnz elements belonging to one small matrix, we have different threads(different gids) doing the compuatation
return;
}
for(int id = gid; id < nnz * npages ; id += gridDim.x * blockDim.x) //uncoalesced accesses when a thread accesses dependency locations
{
const int page_id = id/nnz;
const int loc = id % nnz;
dependency* address = graph[loc];
double diag_value = 1;
double u_val;
double l_val;
double sum = 0;
bool u_flag = false;
bool l_flag = false;
bool finished = false;
int diag_loc;
int u_loc;
int l_loc;
while(!finished)
{
if(address != nullptr && address->is_diagonal == true)
{
diag_loc = address->location;
if(ready[diag_loc + page_id * nnz] == true)
{
diag_value = values[diag_loc + page_id * nnz];
assert(diag_value != 0);
address = address->next;
}
}
if(address != nullptr && address->is_diagonal == false )
{
u_loc = address->location;
l_loc = (address->next)->location;
// if(ready[u_loc + page_id * nnz] == true && ready[l_loc + page_id * nnz] == true)
// {
// double u_val = values[u_loc + page_id * nnz];
// double l_val = values[l_loc + page_id * nnz];
// sum += u_val * l_val;
// address = (address->next)->next;
// }
if(u_flag == false && ready[u_loc + page_id * nnz] == true)
{
u_val = values[u_loc + page_id * nnz];
u_flag = true;
}
if(l_flag == false && ready[l_loc + page_id * nnz] == true)
{
l_val = values[l_loc + page_id * nnz];
l_flag = true;
}
if(u_flag == true && l_flag == true)
{
sum += u_val * l_val;
u_flag = false;
l_flag = false;
address = (address->next)->next;
}
}
if(address == nullptr)
{
values[loc + page_id * nnz] = (values[loc + page_id * nnz] - sum)/diag_value;
__threadfence();
ready[loc + page_id * nnz] = true;
finished = true;
//printf("\n Now loc: %d is ready", loc);
}
}
}
}
void ComputeILU0Approach2(PagedCSRMatrices & Factored_Pages , const int* const diag_ptrs)
{
dim3 block(THREADS_PER_BLOCK);
const int total_num_warps = Factored_Pages.GetNumRows();
const int num_warps_in_block = THREADS_PER_BLOCK/WARP_SIZE;
const int grid_size = ceil((double)total_num_warps / (double)num_warps_in_block);
dim3 grid( grid_size );
void* GRAPH;
cudaMalloc( (void**)& GRAPH, sizeof(dependency*) * Factored_Pages.GetNumNz()); //array of pointers of type dependency
dependency** graph = (dependency**)GRAPH;
//Create a dependency graph for the ilu0 computation on the device memory. (The graph is stored using adjacency list datastrucure)
create_dependency_graph_for_ilu0_computation<<< grid, block >>>(Factored_Pages.GetNumRows(), Factored_Pages.GetPtrToGpuRowPtrs(), Factored_Pages.GetPtrToGpuColInd(),
diag_ptrs, graph);
// PrintGraph(graph, Factored_Pages);
int grid_dim = ceil( (double)Factored_Pages.GetNumNz() * (double)Factored_Pages.GetNumPages() /(double)THREADS_PER_BLOCK ) ;
if(grid_dim > max_possible_grid_dim)
{
// std::cout << "\n Using max possible grid dim at line:" << __LINE__ << "\n";
grid_dim = max_possible_grid_dim;
}
dim3 grid_1( grid_dim );
bool* ready = nullptr;
cudaMalloc((void**)&ready , Factored_Pages.GetNumPages() * Factored_Pages.GetNumNz()* sizeof(bool) );
cudaMemset( ready , false, Factored_Pages.GetNumPages() * Factored_Pages.GetNumNz()* sizeof(bool) );
compute_ilu_0_approach2_legacy_kernel <<< grid_1 , block >>>(Factored_Pages.GetNumPages(), Factored_Pages.GetNumRows(), Factored_Pages.GetNumNz(),
Factored_Pages.GetPtrToGpuValues(), graph, ready);
dim3 grid_2(ceil( (double)Factored_Pages.GetNumNz()/(double)THREADS_PER_BLOCK ));
deallocate_graph_mem<<< grid_2, block >>>(graph, Factored_Pages.GetNumNz());
cudaFree(GRAPH);
cudaFree(ready);
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------//
//APPROACH-3
void create_dependency_graph(PagedCSRMatrices & Factored_Pages, std::vector<int> & dependencies , std::vector<int> & nz_ptrs, std::vector<int> & diag_ptrs)
{
const int nrows = Factored_Pages.GetNumRows();
const int* const row_ptrs = Factored_Pages.GetPtrToCpuRowPtrs();
const int* const col_idxs = Factored_Pages.GetPtrToCpuColInd();
nz_ptrs[0] = 0;
for(int row_index = 0; row_index < nrows ; row_index++ )
{
const int row_start = row_ptrs[row_index];
const int row_end = row_ptrs[row_index + 1];
for(int loc = row_start; loc < row_end; loc++)
{
int num_dependencies = 0;
const int col_index = col_idxs[loc];
if(row_index == col_index)
{
diag_ptrs[row_index] = loc;
}
const int k_max = std::min(row_index , col_index) - 1;
for(int maybe_l_loc = row_start; maybe_l_loc < loc; maybe_l_loc++) //use loc instead of row_end as the matrix is sorted
{
const int k = col_idxs[maybe_l_loc];
if(k > k_max)
{
continue;
}
//find corresponding u at position k,col_index
for(int maybe_u_loc = row_ptrs[k]; maybe_u_loc < row_ptrs[k + 1]; maybe_u_loc++)
{
if(col_idxs[maybe_u_loc] == col_index)
{
dependencies.push_back(maybe_l_loc);
dependencies.push_back(maybe_u_loc);
num_dependencies += 2;
}
}
}
if(row_index > col_index)
{
const int diag_loc = diag_ptrs[col_index]; //diag_ptrs[col_index] has correct value as it has been found when doing stuff for previous rows as col_index < row_index here
dependencies.push_back(diag_loc);
num_dependencies++;
}
nz_ptrs[loc + 1] = nz_ptrs[loc] + num_dependencies;
}
}
}
__global__ void compute_ilu_0_approach3_legacy_kernel(const int npages, const int nrows, const int nnz, volatile double* const values, const int dep_length,
const int* const dependencies, const int* const nz_ptrs, volatile bool* const ready)
{
const int gid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = gridDim.x * blockDim.x;
if(num_threads < nnz)
{
printf("\n\n Sorry, we need atleast %d number of threads for this kernel to work\n\n", nnz); //This guarantees for nnz elements belonging to one small matrix, we have different threads(different gids) doing the compuatation
return;
}
for(int id = gid; id < nnz * npages ; id += gridDim.x * blockDim.x) //uncoalesced accesses when a thread accesses dependencies array
{
const int page_id = id/nnz;
const int loc = id % nnz;
const int start = nz_ptrs[loc];
const int end = nz_ptrs[loc + 1] - 1;
const bool has_diag_dependency = (end + 1 - start)% 2 == 1 ? true : false;
int current = start;
double diag_value = 1;
double u_val;
double l_val;
double sum = 0;
bool u_flag = false;
bool l_flag = false;
bool finished = false;
while(!finished)
{
if( (has_diag_dependency == true && current <= end - 2) || (has_diag_dependency == false && current <= end - 1) )
{
const int l_loc = dependencies[current] + page_id * nnz;
const int u_loc = dependencies[current + 1] + page_id * nnz;
// if(ready[l_loc] == true && ready[u_loc] == true)
// {
// l_val = values[l_loc];
// u_val = values[u_loc];
// sum += l_val * u_val;
// current += 2;
// }
if(l_flag == false && ready[l_loc] == true)
{
l_val = values[l_loc];
l_flag = true;
}
if(u_flag == false && ready[u_loc] == true)
{
u_val = values[u_loc];
u_flag = true;
}
if(l_flag == true && u_flag == true)
{
sum += l_val * u_val;
current += 2;
l_flag = false;
u_flag = false;
}
}
if(has_diag_dependency == true && current == end )
{
const int diag_loc = dependencies[end] + page_id * nnz;
if(ready[diag_loc] == true)
{
diag_value = values[diag_loc];
assert(diag_value != 0);
current++;
}
}
if(current == end + 1)
{
values[loc + page_id * nnz] = (values[loc + page_id * nnz] - sum)/diag_value;
__threadfence();
ready[loc + page_id * nnz] = true;
finished = true;
//printf("\n Now loc: %d is ready", loc);
}
}
}
}
void Print_Dep_Graph(const std::vector<int> & dependencies_cpu , const std::vector<int> & nz_ptrs_cpu)
{
for(int loc = 0; loc < nz_ptrs_cpu.size() - 1 ; loc++)
{
const int start = nz_ptrs_cpu[loc];
const int end = nz_ptrs_cpu[loc + 1];
printf("\n\n Dependencies for element at loc = %d are: ", loc);
for(int i = start; i < end; i++)
{
printf("\n %d ", dependencies_cpu[i]);
}
}
}
void ComputeILU0Approach3(PagedCSRMatrices & Factored_Pages , int* const diag_ptrs)
{
//Here the representation of dependency graph is a bit different
std::vector<int> dependencies_cpu;
std::vector<int > nz_ptrs_cpu(Factored_Pages.GetNumNz() + 1);
std::vector<int > diag_ptrs_cpu( Factored_Pages.GetNumRows());
Factored_Pages.AllocateMemory(LOCATION::CPU);
Factored_Pages.CopyFromGpuToCpu();
create_dependency_graph(Factored_Pages, dependencies_cpu, nz_ptrs_cpu, diag_ptrs_cpu);
int* dependencies = nullptr;
int* nz_ptrs = nullptr;
cudaMalloc((void**)&dependencies , dependencies_cpu.size() * sizeof(int));
cudaMemcpy(dependencies , dependencies_cpu.data() , dependencies_cpu.size() * sizeof(int) , cudaMemcpyHostToDevice );
cudaMalloc((void**)&nz_ptrs , nz_ptrs_cpu.size() * sizeof(int) );
cudaMemcpy( nz_ptrs , nz_ptrs_cpu.data() , nz_ptrs_cpu.size() * sizeof(int) , cudaMemcpyHostToDevice );
cudaMemcpy( diag_ptrs , diag_ptrs_cpu.data() , diag_ptrs_cpu.size() * sizeof(int) , cudaMemcpyHostToDevice);
//Print_Dep_Graph(dependencies_cpu , nz_ptrs_cpu);
dim3 block(THREADS_PER_BLOCK);
int grid_dim = ceil( (double)Factored_Pages.GetNumNz() * (double)Factored_Pages.GetNumPages() /(double)THREADS_PER_BLOCK ) ;
if(grid_dim > max_possible_grid_dim)
{
//std::cout << "\n Using max possible grid dim at line:" << __LINE__ << "\n";
grid_dim = max_possible_grid_dim;
}
dim3 grid( grid_dim );
bool* ready = nullptr;
cudaMalloc((void**)&ready , Factored_Pages.GetNumPages() * Factored_Pages.GetNumNz()* sizeof(bool) );
cudaMemset( ready , false, Factored_Pages.GetNumPages() * Factored_Pages.GetNumNz()* sizeof(bool) );
compute_ilu_0_approach3_legacy_kernel <<< grid , block >>>(Factored_Pages.GetNumPages(), Factored_Pages.GetNumRows(), Factored_Pages.GetNumNz(),
Factored_Pages.GetPtrToGpuValues(), dependencies_cpu.size() ,dependencies, nz_ptrs , ready);
cudaFree(dependencies);
cudaFree(nz_ptrs);
cudaFree(ready);
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------//
//APPROACH-4
void create_dependency_list(PagedCSRMatrices & Factored_Pages , std::vector<int> & dependencies, std::vector<int> & diag_starters, std::vector<int> & new_era, std::vector<int> & diag_ptrs)
{
const int nrows = Factored_Pages.GetNumRows();
const int* const row_ptrs = Factored_Pages.GetPtrToCpuRowPtrs();
const int* const col_idxs = Factored_Pages.GetPtrToCpuColInd();
diag_ptrs = std::vector<int>(nrows);
int d_start_ptr_to_dependencies_arr = 0;
int era_start_ptr_to_diag_starters = 0;
new_era.push_back(0);
for(int current_row = 0; current_row < nrows ; current_row++)
{
int temp_arr_curr_row[MAX_NUM_ROWS];
for(int i = 0; i < nrows; i++)
{
temp_arr_curr_row[i] = -1;
}
for(int j = row_ptrs[current_row]; j < row_ptrs[current_row + 1]; j++)
{
if(col_idxs[j] == current_row)
{
diag_ptrs[current_row] = j;
}
temp_arr_curr_row[col_idxs[j]] = j;
}
for(int row_below = current_row + 1; row_below < nrows; row_below++)
{
const int start = row_ptrs[row_below];
const int end = row_ptrs[row_below + 1];
int loc_row_ele = -1;
for(int loc = start; loc < end; loc++)
{
int col = col_idxs[loc];
if(col < current_row)
{
continue;
}
else if(col == current_row)
{
//Now there's only one dependency here--> that is divison by the diag element in the current row
//find loc_diag_ele
const int loc_diag_ele = temp_arr_curr_row[current_row];
dependencies.push_back(loc);
dependencies.push_back(loc_diag_ele);
diag_starters.push_back(d_start_ptr_to_dependencies_arr);
d_start_ptr_to_dependencies_arr += 2;
era_start_ptr_to_diag_starters++;
loc_row_ele = loc;
}
else
{
if(loc_row_ele == -1) //So if that row_ele is missing, then the whole row is not modified
{
break;
}
else
{
// find loc_col_ele
const int loc_col_ele = temp_arr_curr_row[col];
if(loc_col_ele == -1)
{
continue;
}
dependencies.push_back(loc);
dependencies.push_back(loc_col_ele);
d_start_ptr_to_dependencies_arr += 2;
}
}
}
}
if(new_era[new_era.size() - 1] < era_start_ptr_to_diag_starters) //If both are equal, then that means there were no dependencies at all for the current row.
{
new_era.push_back(era_start_ptr_to_diag_starters);
}
}
diag_starters.push_back(d_start_ptr_to_dependencies_arr);
}
void Print_Dep_List(std::vector<int> & dependencies_cpu, std::vector<int> & diag_starters_cpu, std::vector<int> & new_era_cpu)
{
std::cout << "\n\n dependencies: " << std::endl;
for(int i = 0; i < dependencies_cpu.size(); i++)
{
std::cout << dependencies_cpu[i] << " ";
}
std::cout << "\n\n diag starters(ptrs to dependencies array): " << std::endl;
for(int i = 0; i < diag_starters_cpu.size(); i++)
{
std::cout << diag_starters_cpu[i] << " ";
}
std::cout << "\n\n new era(ptrs to diag starters array): " << std::endl;
for(int i = 0; i < new_era_cpu.size(); i++)
{
std::cout << new_era_cpu[i] << " ";
}
}
__device__ void modify_elements_in_an_era(double* const values, const int* const dependencies, const int* const diag_starters,
const int* const era_array ,const int era_idx )
{
int start_idx_in_diag_starters = era_array[era_idx];
int end_idx_in_diag_starters = era_array[era_idx + 1];
const int warp_id = threadIdx.x / WARP_SIZE;
const int id_within_warp = threadIdx.x % WARP_SIZE;
const int total_num_warps_in_block = blockDim.x / WARP_SIZE;
for(int idx_in_diag_starters = start_idx_in_diag_starters + warp_id; idx_in_diag_starters < end_idx_in_diag_starters ; idx_in_diag_starters += total_num_warps_in_block)
{
int start_idx_in_dep_arr = diag_starters[idx_in_diag_starters];
int end_idx_in_dep_arr = diag_starters[idx_in_diag_starters + 1];
double row_ele;
for(int i = start_idx_in_dep_arr + id_within_warp; i < end_idx_in_dep_arr; i += WARP_SIZE)
{
const int loc = dependencies[i];
auto mask = __activemask();
int loc_1 = __shfl_sync(mask, loc, id_within_warp + 1); //For id_within_warp = 31, the result is undefined! But that is not used, so no worries!
if(i == start_idx_in_dep_arr)
{
double diag_val = values[loc_1];
assert(diag_val != 0);
values[loc ] /= diag_val;
row_ele = values[loc ];
}
__syncwarp(mask);
row_ele = __shfl_sync( mask , row_ele, 0);
if(i > start_idx_in_dep_arr + 1 && id_within_warp % 2 == 0 && id_within_warp <= 30)
{
//loc_m1 = dependencies[i + 1];
values[loc] -= values[loc_1] * row_ele;
}
}
// if(id_within_warp == 0)
// {
// double diag_val = values[dependencies[start_idx_in_dep_arr + 1] ];
// assert(diag_val != 0);
// values[dependencies[start_idx_in_dep_arr] ] /= diag_val;
// row_ele = values[dependencies[start_idx_in_dep_arr]];
// for(int i = start_idx_in_dep_arr + 2 ; i < end_idx_in_dep_arr - 1; i += 2)
// {
// values[dependencies[i] ] -= values[dependencies[i + 1]] * row_ele;
// }
// }
}
}
__global__ void compute_ilu_0_approach4_kernel(const int npages, const int nnz, double* const values, const int* const dependencies, const int* const diag_starters,
const int* const era_array , const int era_arr_length , const int dep_length)
{
const int page_id = blockIdx.x;
if(page_id >= npages)
return;
// extern __shared__ double array[];
// double* vals_sh = (double*)array;
// for(int i = threadIdx.x; i < nnz ; i += blockDim.x)
// {
// vals_sh[i] = values[i + page_id * nnz];
// }
// __syncthreads();
//dependenices array is too large to fit in shared memory
//storing values in shared memory makes it slower
const int num_eras = era_arr_length - 1;
for(int era_idx = 0; era_idx < num_eras ; era_idx ++)
{
modify_elements_in_an_era(values + page_id * nnz, dependencies, diag_starters, era_array, era_idx);
//modify_elements_in_an_era(vals_sh, dependencies, diag_starters, era_array, era_idx);
__syncthreads();
}
// for(int i = threadIdx.x; i < nnz ; i += blockDim.x)
// {
// values[i + page_id * nnz] = vals_sh[i];
// }
}
void ComputeILU0Approach4(PagedCSRMatrices & Factored_Pages, int* const diag_ptrs )
{
std::vector<int> dependencies_cpu;
std::vector<int> diag_starters_cpu;
std::vector<int> new_era_cpu;
std::vector<int> diag_ptrs_cpu;
Factored_Pages.CopyFromGpuToCpu();
create_dependency_list(Factored_Pages, dependencies_cpu, diag_starters_cpu, new_era_cpu, diag_ptrs_cpu );
int* dependencies = nullptr;
int* diag_starters = nullptr;
int* new_era = nullptr;
cudaMalloc( (void**)&dependencies , dependencies_cpu.size() * sizeof(int) );
cudaMemcpy( dependencies , dependencies_cpu.data(), dependencies_cpu.size() * sizeof(int) , cudaMemcpyHostToDevice );
cudaMalloc( (void**)&diag_starters, diag_starters_cpu.size() * sizeof(int) );
cudaMemcpy( diag_starters , diag_starters_cpu.data(), diag_starters_cpu.size() * sizeof(int) , cudaMemcpyHostToDevice );
cudaMalloc( (void**)&new_era, new_era_cpu.size() * sizeof(int) );
cudaMemcpy( new_era, new_era_cpu.data() , new_era_cpu.size() * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( diag_ptrs , diag_ptrs_cpu.data(), diag_ptrs_cpu.size() * sizeof(int), cudaMemcpyHostToDevice);
// Print_Dep_List(dependencies_cpu, diag_starters_cpu, new_era_cpu);
dim3 block(THREADS_PER_BLOCK);
dim3 grid(Factored_Pages.GetNumPages());
// const int dynamic_shared_mem_size = Factored_Pages.GetNumNz() * sizeof(double);
// const int dynamic_shared_mem_size = dependencies_cpu.size() * sizeof(int);
const int dynamic_shared_mem_size = 0;
compute_ilu_0_approach4_kernel<<< grid, block, dynamic_shared_mem_size >>>(Factored_Pages.GetNumPages(), Factored_Pages.GetNumNz(), Factored_Pages.GetPtrToGpuValues(),
dependencies, diag_starters, new_era, new_era_cpu.size(), dependencies_cpu.size()); // one thread block per small matrix in batch
cudaFree(dependencies);
cudaFree(diag_starters);
cudaFree(new_era);
}
//-------------------------------------------------------------------------------------------------------------------------------------------------------------------//
} //unnamed namespace
//-------------------------------- calling function for all small pieces ----------------------------------------------------------------------------------------
void ILU_0_Factorization_Gpu(const PagedCSRMatrices & A_pages , PagedCSRMatrices & L_pages, PagedCSRMatrices & U_pages, const int approach_num)
{
//cudaProfilerStart();
// std::cout << "\n\nORIGINAL MATRIX: " << std::endl;
// PrintPagedCSRMatrix(A_pages);
//first assert matrix is square
assert(A_pages.GetNumCols() == A_pages.GetNumRows());
PagedCSRMatrices Factored_Pages;
//We would want to use copy assignment here... or even a copy constructor. implement it later...
//copy A to F
Copy_Gpu_PagedCSRMatrices(A_pages , Factored_Pages);
//SortCSRMatrix(Factored_Pages); if unsorted, pls sort the paged matrix befoe proceeding. (All these matrices are already sorted.(sorted while storing))
int* diag_info = nullptr;
cudaMalloc((void**)&diag_info, sizeof(int) * Factored_Pages.GetNumRows());
int num_missing_diagonal_eles = Count_Missing_Diagonal_Elements(Factored_Pages , diag_info);
if(num_missing_diagonal_eles > 0)
{
PagedCSRMatrices New_Factored_Pages;
Add_Missing_Diagonal_Elements(New_Factored_Pages, Factored_Pages, diag_info , num_missing_diagonal_eles);
Copy_Gpu_PagedCSRMatrices(New_Factored_Pages , Factored_Pages); //TODO: avoid an extra copy here
}
// std::cout << "\n\nMATRIX AFTER ADDITION OF DIAGONAL ELEMENTS: " << std::endl;
// PrintPagedCSRMatrix(Factored_Pages);
//continue to use Factored_pages here...
cudaProfilerStart();
if(approach_num == 1)
{
Find_locations_of_diagonal_elements(Factored_Pages, diag_info);
//std::cout << "\n\nLocn of diagonal elements:" << std::endl;
//print_kernel<<< 1, 1 >>>(Factored_Pages.GetNumRows(), diag_info);
//cudaDeviceSynchronize();
ComputeILU0Approach1(Factored_Pages , diag_info);
}
else if(approach_num == 2)
{
Find_locations_of_diagonal_elements(Factored_Pages, diag_info);
//std::cout << "\n\nLocn of diagonal elements:" << std::endl;
//print_kernel<<< 1, 1 >>>(Factored_Pages.GetNumRows(), diag_info);
//cudaDeviceSynchronize();
ComputeILU0Approach2(Factored_Pages , diag_info);
//ComputeILU0Approach2_SingleMatrix(Factored_Pages, diag_info);
}
else if(approach_num == 3)
{
ComputeILU0Approach3(Factored_Pages , diag_info);
// std::cout << "\n\nLocn of diagonal elements:" << std::endl;
// print_kernel<<< 1, 1 >>>(Factored_Pages.GetNumRows(), diag_info);
}
else if(approach_num == 4)
{
ComputeILU0Approach4(Factored_Pages , diag_info);
// std::cout << "\n\nLocn of diagonal elements:" << std::endl;
// print_kernel<<< 1, 1 >>>(Factored_Pages.GetNumRows(), diag_info);
}
else
{
printf("\n NOT IMPLEMENTED\n");
}
cudaProfilerStop();
// std::cout << "\n\nFACTORIZED MATRIX(ILU(0)): " << std::endl;
// PrintPagedCSRMatrix(Factored_Pages);
Update_row_pointers_L_and_U_and_Allocate_Memory(Factored_Pages , diag_info, L_pages, U_pages);
Fill_L_and_U_col_idxs_and_vals(Factored_Pages, L_pages, U_pages);
// std::cout << "\n\nMATRIX L: " << std::endl;
// PrintPagedCSRMatrix(L_pages);
// std::cout << "\n\nMATRIX U: " << std::endl;
// PrintPagedCSRMatrix(U_pages);
cudaFree(diag_info);
cudaDeviceSynchronize(); //for timing purpose
//cudaProfilerStop();
}
//TODO:
//Parallelize Prefix Sum
|
69bbea2f995c8b28a1d001d4fb27d29f57a5ead8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zbajac_csr.cu, normal z -> c, Wed Jan 2 14:18:53 2019
*/
#include "magmasparse_internal.h"
#define PRECISION_c
#define BLOCKSIZE 256
__global__ void
magma_cbajac_csr_ls_kernel(int localiters, int n,
magmaFloatComplex * valD,
magma_index_t * rowD,
magma_index_t * colD,
magmaFloatComplex * valR,
magma_index_t * rowR,
magma_index_t * colR,
const magmaFloatComplex * __restrict__ b,
magmaFloatComplex * x )
{
int inddiag = blockIdx.x*blockDim.x;
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, j, start, end;
if (index < n) {
start = rowR[index];
end = rowR[index+1];
magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
/* add more local iterations */
__shared__ magmaFloatComplex local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
x[index] = local_x[threadIdx.x];
}
}
__global__ void
magma_cbajac_csr_kernel(
int n,
magmaFloatComplex * valD,
magma_index_t * rowD,
magma_index_t * colD,
magmaFloatComplex * valR,
magma_index_t * rowR,
magma_index_t * colR,
magmaFloatComplex * b,
magmaFloatComplex * x )
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, start, end;
if (index < n) {
magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
start = rowR[index];
end = rowR[index+1];
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
v = bl - v;
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
x[index] = x[index] + ( v - tmp ) / (valD[start]);
}
}
/**
Purpose
-------
This routine is a block-asynchronous Jacobi iteration performing s
local Jacobi-updates within the block. Input format is two CSR matrices,
one containing the diagonal blocks, one containing the rest.
Arguments
---------
@param[in]
localiters magma_int_t
number of local Jacobi-like updates
@param[in]
D magma_c_matrix
input matrix with diagonal blocks
@param[in]
R magma_c_matrix
input matrix with non-diagonal parts
@param[in]
b magma_c_matrix
RHS
@param[in]
x magma_c_matrix*
iterate/solution
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbajac_csr(
magma_int_t localiters,
magma_c_matrix D,
magma_c_matrix R,
magma_c_matrix b,
magma_c_matrix *x,
magma_queue_t queue )
{
int blocksize1 = BLOCKSIZE;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( D.num_rows, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
if ( R.nnz > 0 ) {
if ( localiters == 1 )
hipLaunchKernelGGL(( magma_cbajac_csr_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
D.num_rows, D.dval, D.drow, D.dcol,
R.dval, R.drow, R.dcol, b.dval, x->dval );
else
hipLaunchKernelGGL(( magma_cbajac_csr_ls_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
localiters, D.num_rows, D.dval, D.drow, D.dcol,
R.dval, R.drow, R.dcol, b.dval, x->dval );
}
else {
printf("error: all elements in diagonal block.\n");
}
return MAGMA_SUCCESS;
}
| 69bbea2f995c8b28a1d001d4fb27d29f57a5ead8.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zbajac_csr.cu, normal z -> c, Wed Jan 2 14:18:53 2019
*/
#include "magmasparse_internal.h"
#define PRECISION_c
#define BLOCKSIZE 256
__global__ void
magma_cbajac_csr_ls_kernel(int localiters, int n,
magmaFloatComplex * valD,
magma_index_t * rowD,
magma_index_t * colD,
magmaFloatComplex * valR,
magma_index_t * rowR,
magma_index_t * colR,
const magmaFloatComplex * __restrict__ b,
magmaFloatComplex * x )
{
int inddiag = blockIdx.x*blockDim.x;
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, j, start, end;
if (index < n) {
start = rowR[index];
end = rowR[index+1];
magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
/* add more local iterations */
__shared__ magmaFloatComplex local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
x[index] = local_x[threadIdx.x];
}
}
__global__ void
magma_cbajac_csr_kernel(
int n,
magmaFloatComplex * valD,
magma_index_t * rowD,
magma_index_t * colD,
magmaFloatComplex * valR,
magma_index_t * rowR,
magma_index_t * colR,
magmaFloatComplex * b,
magmaFloatComplex * x )
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, start, end;
if (index < n) {
magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
start = rowR[index];
end = rowR[index+1];
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
v = bl - v;
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
x[index] = x[index] + ( v - tmp ) / (valD[start]);
}
}
/**
Purpose
-------
This routine is a block-asynchronous Jacobi iteration performing s
local Jacobi-updates within the block. Input format is two CSR matrices,
one containing the diagonal blocks, one containing the rest.
Arguments
---------
@param[in]
localiters magma_int_t
number of local Jacobi-like updates
@param[in]
D magma_c_matrix
input matrix with diagonal blocks
@param[in]
R magma_c_matrix
input matrix with non-diagonal parts
@param[in]
b magma_c_matrix
RHS
@param[in]
x magma_c_matrix*
iterate/solution
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbajac_csr(
magma_int_t localiters,
magma_c_matrix D,
magma_c_matrix R,
magma_c_matrix b,
magma_c_matrix *x,
magma_queue_t queue )
{
int blocksize1 = BLOCKSIZE;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( D.num_rows, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
if ( R.nnz > 0 ) {
if ( localiters == 1 )
magma_cbajac_csr_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( D.num_rows, D.dval, D.drow, D.dcol,
R.dval, R.drow, R.dcol, b.dval, x->dval );
else
magma_cbajac_csr_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( localiters, D.num_rows, D.dval, D.drow, D.dcol,
R.dval, R.drow, R.dcol, b.dval, x->dval );
}
else {
printf("error: all elements in diagonal block.\n");
}
return MAGMA_SUCCESS;
}
|
58b86aea105d74382adccdc11962e60b43027638.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <ceed.h>
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#include <magma_v2.h>
#include "magma_common_device.cuh"
#include "interp_device.cuh"
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ CeedScalar shared_data[];
template<typename T, int NCOMP, int P, int Q, int MAXPQ>
static __global__ void
magma_interp_2d_kernel(
const T *dT, magma_trans_t transT,
const T *dU, const int estrdU, const int cstrdU,
T *dV, const int estrdV, const int cstrdV, const int nelem)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int elem_id = (blockIdx.x * blockDim.y) + ty;
if (elem_id >= nelem) return;
T rU[1][NCOMP][P] = { make_zero<T>() }; // for a non fused operator DIM is always 1
T rV[1][NCOMP][Q] = { make_zero<T>() }; // for a non fused operator DIM is always 1
T rTmp = make_zero<T>();
// shift global memory pointers by elem stride
dU += elem_id * estrdU;
dV += elem_id * estrdV;
// assign shared memory pointers
T* sT = (T*)(shared_data);
T* sTmp = sT + P*Q;
sTmp += ty * (P * MAXPQ);
// read T
if (ty == 0) {
dread_T_gm2sm<P, Q>(tx, transT, dT, sT);
}
// read V if transT is magmaTrans
if (transT == MagmaTrans) {
readV_2d<T, Q, 1, NCOMP, Q, 0>(dV, cstrdV, rV, tx);
}
// read U -- there is a sync at the end of this function
readU_2d<T, P, 1, NCOMP, P, 0>(dU, cstrdU, rU, sTmp, tx);
// no sync needed here -- readU_2d already syncs at the end
magma_interp_2d_device<T, 1, 1, NCOMP, P, Q, P, Q>(sT, transT, rU, rV, tx, rTmp, sTmp);
__syncthreads();
// write V
writeV_2d<T, Q, 1, NCOMP, Q, 0>(dV, cstrdV, rV, tx);
}
//////////////////////////////////////////////////////////////////////////////////////////
template<typename T, int NCOMP, int P, int Q>
static magma_int_t
magma_interp_2d_kernel_driver(
const T *dT, magma_trans_t transT,
const T *dU, magma_int_t estrdU, magma_int_t cstrdU,
T *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_device_t device;
magma_getdevice( &device );
magma_int_t shmem_max, nthreads_max;
const int MAXPQ = maxpq(P,Q);
magma_int_t nthreads = MAXPQ;
magma_int_t ntcol = (maxthreads < nthreads) ? 1 : (maxthreads / nthreads);
magma_int_t shmem = 0;
shmem += P*Q *sizeof(T); // for sT
shmem += ntcol * ( P*MAXPQ*sizeof(T) ); // for reforming rU we need PxP, and for the intermediate output we need PxQ
hipDeviceGetAttribute (&nthreads_max, hipDeviceAttributeMaxThreadsPerBlock, device);
#if TORCH_HIP_VERSION >= 9000
hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeSharedMemPerBlockOptin, device);
if (shmem <= shmem_max) {
hipFuncSetAttribute(magma_interp_2d_kernel<T,NCOMP,P,Q,MAXPQ>, hipFuncAttributeMaxDynamicSharedMemorySize, shmem);
}
#else
hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeMaxSharedMemoryPerBlock, device);
#endif // TORCH_HIP_VERSION >= 9000
if ( (nthreads*ntcol) > nthreads_max || shmem > shmem_max ) {
return 1; // launch failed
}
else {
magma_int_t nblocks = (nelem + ntcol-1) / ntcol;
dim3 threads(nthreads, ntcol, 1);
dim3 grid(nblocks, 1, 1);
hipLaunchKernelGGL(( magma_interp_2d_kernel<T,NCOMP,P,Q,MAXPQ>), dim3(grid), dim3(threads), shmem, magma_queue_get_cuda_stream(queue),
dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem);
return (hipPeekAtLastError() == hipSuccess) ? 0 : 1;
}
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P, int Q>
static magma_int_t
magma_interp_2d_ncomp(
magma_int_t ncomp,
const CeedScalar *dT, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (ncomp) {
case 1:
launch_failed = magma_interp_2d_kernel_driver<CeedScalar,1,P,Q>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_interp_2d_kernel_driver<CeedScalar,2,P,Q>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_interp_2d_kernel_driver<CeedScalar,3,P,Q>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P>
static magma_int_t
magma_interp_1d_ncomp_q(
magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dT, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (Q) {
case 1:
launch_failed = magma_interp_2d_ncomp<P, 1>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_interp_2d_ncomp<P, 2>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_interp_2d_ncomp<P, 3>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_interp_2d_ncomp<P, 4>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_interp_2d_ncomp<P, 5>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_interp_2d_ncomp<P, 6>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_interp_2d_ncomp<P, 7>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_interp_2d_ncomp<P, 8>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_interp_2d_ncomp<P, 9>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_interp_2d_ncomp<P,10>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
static magma_int_t
magma_interp_2d_ncomp_q_p(
magma_int_t P, magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dT, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (P) {
case 1:
launch_failed = magma_interp_1d_ncomp_q< 1>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_interp_1d_ncomp_q< 2>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_interp_1d_ncomp_q< 3>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_interp_1d_ncomp_q< 4>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_interp_1d_ncomp_q< 5>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_interp_1d_ncomp_q< 6>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_interp_1d_ncomp_q< 7>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_interp_1d_ncomp_q< 8>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_interp_1d_ncomp_q< 9>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_interp_1d_ncomp_q<10>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
extern "C" magma_int_t
magma_interp_2d(
magma_int_t P, magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dT, CeedTransposeMode tmode,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans;
launch_failed = magma_interp_2d_ncomp_q_p(
P, Q, ncomp,
dT, transT,
dU, estrdU, cstrdU,
dV, estrdV, cstrdV,
nelem, maxthreads, queue);
return launch_failed;
}
| 58b86aea105d74382adccdc11962e60b43027638.cu | // Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <ceed.h>
#include <cuda.h> // for CUDA_VERSION
#include <magma_v2.h>
#include "magma_common_device.cuh"
#include "interp_device.cuh"
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ CeedScalar shared_data[];
template<typename T, int NCOMP, int P, int Q, int MAXPQ>
static __global__ void
magma_interp_2d_kernel(
const T *dT, magma_trans_t transT,
const T *dU, const int estrdU, const int cstrdU,
T *dV, const int estrdV, const int cstrdV, const int nelem)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int elem_id = (blockIdx.x * blockDim.y) + ty;
if (elem_id >= nelem) return;
T rU[1][NCOMP][P] = { make_zero<T>() }; // for a non fused operator DIM is always 1
T rV[1][NCOMP][Q] = { make_zero<T>() }; // for a non fused operator DIM is always 1
T rTmp = make_zero<T>();
// shift global memory pointers by elem stride
dU += elem_id * estrdU;
dV += elem_id * estrdV;
// assign shared memory pointers
T* sT = (T*)(shared_data);
T* sTmp = sT + P*Q;
sTmp += ty * (P * MAXPQ);
// read T
if (ty == 0) {
dread_T_gm2sm<P, Q>(tx, transT, dT, sT);
}
// read V if transT is magmaTrans
if (transT == MagmaTrans) {
readV_2d<T, Q, 1, NCOMP, Q, 0>(dV, cstrdV, rV, tx);
}
// read U -- there is a sync at the end of this function
readU_2d<T, P, 1, NCOMP, P, 0>(dU, cstrdU, rU, sTmp, tx);
// no sync needed here -- readU_2d already syncs at the end
magma_interp_2d_device<T, 1, 1, NCOMP, P, Q, P, Q>(sT, transT, rU, rV, tx, rTmp, sTmp);
__syncthreads();
// write V
writeV_2d<T, Q, 1, NCOMP, Q, 0>(dV, cstrdV, rV, tx);
}
//////////////////////////////////////////////////////////////////////////////////////////
template<typename T, int NCOMP, int P, int Q>
static magma_int_t
magma_interp_2d_kernel_driver(
const T *dT, magma_trans_t transT,
const T *dU, magma_int_t estrdU, magma_int_t cstrdU,
T *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_device_t device;
magma_getdevice( &device );
magma_int_t shmem_max, nthreads_max;
const int MAXPQ = maxpq(P,Q);
magma_int_t nthreads = MAXPQ;
magma_int_t ntcol = (maxthreads < nthreads) ? 1 : (maxthreads / nthreads);
magma_int_t shmem = 0;
shmem += P*Q *sizeof(T); // for sT
shmem += ntcol * ( P*MAXPQ*sizeof(T) ); // for reforming rU we need PxP, and for the intermediate output we need PxQ
cudaDeviceGetAttribute (&nthreads_max, cudaDevAttrMaxThreadsPerBlock, device);
#if CUDA_VERSION >= 9000
cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlockOptin, device);
if (shmem <= shmem_max) {
cudaFuncSetAttribute(magma_interp_2d_kernel<T,NCOMP,P,Q,MAXPQ>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem);
}
#else
cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlock, device);
#endif // CUDA_VERSION >= 9000
if ( (nthreads*ntcol) > nthreads_max || shmem > shmem_max ) {
return 1; // launch failed
}
else {
magma_int_t nblocks = (nelem + ntcol-1) / ntcol;
dim3 threads(nthreads, ntcol, 1);
dim3 grid(nblocks, 1, 1);
magma_interp_2d_kernel<T,NCOMP,P,Q,MAXPQ><<<grid, threads, shmem, magma_queue_get_cuda_stream(queue)>>>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem);
return (cudaPeekAtLastError() == cudaSuccess) ? 0 : 1;
}
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P, int Q>
static magma_int_t
magma_interp_2d_ncomp(
magma_int_t ncomp,
const CeedScalar *dT, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (ncomp) {
case 1:
launch_failed = magma_interp_2d_kernel_driver<CeedScalar,1,P,Q>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_interp_2d_kernel_driver<CeedScalar,2,P,Q>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_interp_2d_kernel_driver<CeedScalar,3,P,Q>
(dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P>
static magma_int_t
magma_interp_1d_ncomp_q(
magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dT, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (Q) {
case 1:
launch_failed = magma_interp_2d_ncomp<P, 1>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_interp_2d_ncomp<P, 2>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_interp_2d_ncomp<P, 3>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_interp_2d_ncomp<P, 4>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_interp_2d_ncomp<P, 5>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_interp_2d_ncomp<P, 6>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_interp_2d_ncomp<P, 7>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_interp_2d_ncomp<P, 8>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_interp_2d_ncomp<P, 9>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_interp_2d_ncomp<P,10>
(ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
static magma_int_t
magma_interp_2d_ncomp_q_p(
magma_int_t P, magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dT, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (P) {
case 1:
launch_failed = magma_interp_1d_ncomp_q< 1>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_interp_1d_ncomp_q< 2>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_interp_1d_ncomp_q< 3>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_interp_1d_ncomp_q< 4>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_interp_1d_ncomp_q< 5>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_interp_1d_ncomp_q< 6>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_interp_1d_ncomp_q< 7>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_interp_1d_ncomp_q< 8>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_interp_1d_ncomp_q< 9>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_interp_1d_ncomp_q<10>
(Q, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
extern "C" magma_int_t
magma_interp_2d(
magma_int_t P, magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dT, CeedTransposeMode tmode,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans;
launch_failed = magma_interp_2d_ncomp_q_p(
P, Q, ncomp,
dT, transT,
dU, estrdU, cstrdU,
dV, estrdV, cstrdV,
nelem, maxthreads, queue);
return launch_failed;
}
|
37b6110e0f1a4823940c6845accb0ac5673f4962.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Matrix multiply device code
#include <assert.h>
#include <math.h>
#include <algorithm>
#include "utils.h"
#include "types.h"
using namespace std;
#define BLOCK_SIZE 16
__global__ void matMul(int N, _DOUBLE_ *C, _DOUBLE_ *A, _DOUBLE_ *B) {
__shared__ _DOUBLE_ As0[4*BLOCK_SIZE][BLOCK_SIZE], Bs0[BLOCK_SIZE][4*BLOCK_SIZE];
int ty = threadIdx.y;
int tx = threadIdx.x;
int by0 = 4*blockIdx.y;
int bx0 = 4*blockIdx.x;
int I0 = by0*BLOCK_SIZE+ty;
int I1 = (by0+1)*BLOCK_SIZE+ty;
int I2 = (by0+2)*BLOCK_SIZE+ty;
int I3 = (by0+3)*BLOCK_SIZE+ty;
int J0 = bx0*BLOCK_SIZE+tx;
int J1 = (bx0+1)*BLOCK_SIZE+tx;
int J2 = (bx0+2)*BLOCK_SIZE+tx;
int J3 = (bx0+3)*BLOCK_SIZE+tx;
_DOUBLE_ Cij[16] = {0};
if (!(N&(N-1))) { // powers of 2
int nBLOCK_SIZE = N/BLOCK_SIZE;
#pragma unroll 4
for (unsigned int kk=0;kk<nBLOCK_SIZE;++kk) {
As0[ty][tx] = A[I0*N + kk*BLOCK_SIZE+tx];
As0[ty+16][tx] = A[I1*N + kk*BLOCK_SIZE+tx];
As0[ty+32][tx] = A[I2*N + kk*BLOCK_SIZE+tx];
As0[ty+48][tx] = A[I3*N + kk*BLOCK_SIZE+tx];
Bs0[ty][tx] = B[(kk*BLOCK_SIZE+ty)*N + J0];
Bs0[ty][tx+16] = B[(kk*BLOCK_SIZE+ty)*N + J1];
Bs0[ty][tx+32] = B[(kk*BLOCK_SIZE+ty)*N + J2];
Bs0[ty][tx+48] = B[(kk*BLOCK_SIZE+ty)*N + J3];
__syncthreads();
#pragma unroll 16
for (unsigned int k=0;k<BLOCK_SIZE;++k) {
Cij[0] += As0[ty][k] * Bs0[k][tx];
Cij[1] += As0[ty][k] * Bs0[k][tx+16];
Cij[2] += As0[ty][k] * Bs0[k][tx+32];
Cij[3] += As0[ty][k] * Bs0[k][tx+48];
Cij[4] += As0[ty+16][k] * Bs0[k][tx];
Cij[5] += As0[ty+16][k] * Bs0[k][tx+16];
Cij[6] += As0[ty+16][k] * Bs0[k][tx+32];
Cij[7] += As0[ty+16][k] * Bs0[k][tx+48];
Cij[8] += As0[ty+32][k] * Bs0[k][tx];
Cij[9] += As0[ty+32][k] * Bs0[k][tx+16];
Cij[10] += As0[ty+32][k] * Bs0[k][tx+32];
Cij[11] += As0[ty+32][k] * Bs0[k][tx+48];
Cij[12] += As0[ty+48][k] * Bs0[k][tx];
Cij[13] += As0[ty+48][k] * Bs0[k][tx+16];
Cij[14] += As0[ty+48][k] * Bs0[k][tx+32];
Cij[15] += As0[ty+48][k] * Bs0[k][tx+48];
}
__syncthreads();
}
C[I0*N+J0] = Cij[0];
C[I0*N+J1] = Cij[1];
C[I0*N+J2] = Cij[2];
C[I0*N+J3] = Cij[3];
C[I1*N+J0] = Cij[4];
C[I1*N+J1] = Cij[5];
C[I1*N+J2] = Cij[6];
C[I1*N+J3] = Cij[7];
C[I2*N+J0] = Cij[8];
C[I2*N+J1] = Cij[9];
C[I2*N+J2] = Cij[10];
C[I2*N+J3] = Cij[11];
C[I3*N+J0] = Cij[12];
C[I3*N+J1] = Cij[13];
C[I3*N+J2] = Cij[14];
C[I3*N+J3] = Cij[15];
}
else { // non-powers of 2 (with boundary checks)
int nBLOCK_SIZE = (N/BLOCK_SIZE) + 1;
#pragma unroll 4
for (unsigned int kk=0;kk<nBLOCK_SIZE;++kk) {
As0[ty][tx] = (I0<N && (kk*BLOCK_SIZE+tx) < N) ? A[I0*N + kk*BLOCK_SIZE+tx] : 0;
As0[ty+16][tx] = (I1<N && (kk*BLOCK_SIZE+tx) < N) ? A[I1*N + kk*BLOCK_SIZE+tx] : 0;
As0[ty+32][tx] = (I2<N && (kk*BLOCK_SIZE+tx) < N) ? A[I2*N + kk*BLOCK_SIZE+tx] : 0;
As0[ty+48][tx] = (I3<N && (kk*BLOCK_SIZE+tx) < N) ? A[I3*N + kk*BLOCK_SIZE+tx] : 0;
Bs0[ty][tx] = ((kk*BLOCK_SIZE+ty) < N && J0<N) ? B[(kk*BLOCK_SIZE+ty)*N + J0] : 0;
Bs0[ty][tx+16] = ((kk*BLOCK_SIZE+ty) < N && J1<N) ? B[(kk*BLOCK_SIZE+ty)*N + J1] : 0;
Bs0[ty][tx+32] = ((kk*BLOCK_SIZE+ty) < N && J2<N) ? B[(kk*BLOCK_SIZE+ty)*N + J2] : 0;
Bs0[ty][tx+48] = ((kk*BLOCK_SIZE+ty) < N && J3<N) ? B[(kk*BLOCK_SIZE+ty)*N + J3] : 0;
__syncthreads();
#pragma unroll 16
for (unsigned int k=0;k<BLOCK_SIZE;++k) {
Cij[0] += As0[ty][k] * Bs0[k][tx];
Cij[1] += As0[ty][k] * Bs0[k][tx+16];
Cij[2] += As0[ty][k] * Bs0[k][tx+32];
Cij[3] += As0[ty][k] * Bs0[k][tx+48];
Cij[4] += As0[ty+16][k] * Bs0[k][tx];
Cij[5] += As0[ty+16][k] * Bs0[k][tx+16];
Cij[6] += As0[ty+16][k] * Bs0[k][tx+32];
Cij[7] += As0[ty+16][k] * Bs0[k][tx+48];
Cij[8] += As0[ty+32][k] * Bs0[k][tx];
Cij[9] += As0[ty+32][k] * Bs0[k][tx+16];
Cij[10] += As0[ty+32][k] * Bs0[k][tx+32];
Cij[11] += As0[ty+32][k] * Bs0[k][tx+48];
Cij[12] += As0[ty+48][k] * Bs0[k][tx];
Cij[13] += As0[ty+48][k] * Bs0[k][tx+16];
Cij[14] += As0[ty+48][k] * Bs0[k][tx+32];
Cij[15] += As0[ty+48][k] * Bs0[k][tx+48];
}
__syncthreads();
}
if(I0<N && J0<N)
C[I0*N+J0] = Cij[0];
if(I0<N && J1<N)
C[I0*N+J1] = Cij[1];
if(I0<N && J2<N)
C[I0*N+J2] = Cij[2];
if(I0<N && J3<N)
C[I0*N+J3] = Cij[3];
if(I1<N && J0<N)
C[I1*N+J0] = Cij[4];
if(I1<N && J1<N)
C[I1*N+J1] = Cij[5];
if(I1<N && J2<N)
C[I1*N+J2] = Cij[6];
if(I1<N && J3<N)
C[I1*N+J3] = Cij[7];
if(I2<N && J0<N)
C[I2*N+J0] = Cij[8];
if(I2<N && J1<N)
C[I2*N+J1] = Cij[9];
if(I2<N && J2<N)
C[I2*N+J2] = Cij[10];
if(I2<N && J3<N)
C[I2*N+J3] = Cij[11];
if(I3<N && J0<N)
C[I3*N+J0] = Cij[12];
if(I3<N && J1<N)
C[I3*N+J1] = Cij[13];
if(I3<N && J2<N)
C[I3*N+J2] = Cij[14];
if(I3<N && J3<N)
C[I3*N+J3] = Cij[15];
}
}
| 37b6110e0f1a4823940c6845accb0ac5673f4962.cu | // Matrix multiply device code
#include <assert.h>
#include <math.h>
#include <algorithm>
#include "utils.h"
#include "types.h"
using namespace std;
#define BLOCK_SIZE 16
__global__ void matMul(int N, _DOUBLE_ *C, _DOUBLE_ *A, _DOUBLE_ *B) {
__shared__ _DOUBLE_ As0[4*BLOCK_SIZE][BLOCK_SIZE], Bs0[BLOCK_SIZE][4*BLOCK_SIZE];
int ty = threadIdx.y;
int tx = threadIdx.x;
int by0 = 4*blockIdx.y;
int bx0 = 4*blockIdx.x;
int I0 = by0*BLOCK_SIZE+ty;
int I1 = (by0+1)*BLOCK_SIZE+ty;
int I2 = (by0+2)*BLOCK_SIZE+ty;
int I3 = (by0+3)*BLOCK_SIZE+ty;
int J0 = bx0*BLOCK_SIZE+tx;
int J1 = (bx0+1)*BLOCK_SIZE+tx;
int J2 = (bx0+2)*BLOCK_SIZE+tx;
int J3 = (bx0+3)*BLOCK_SIZE+tx;
_DOUBLE_ Cij[16] = {0};
if (!(N&(N-1))) { // powers of 2
int nBLOCK_SIZE = N/BLOCK_SIZE;
#pragma unroll 4
for (unsigned int kk=0;kk<nBLOCK_SIZE;++kk) {
As0[ty][tx] = A[I0*N + kk*BLOCK_SIZE+tx];
As0[ty+16][tx] = A[I1*N + kk*BLOCK_SIZE+tx];
As0[ty+32][tx] = A[I2*N + kk*BLOCK_SIZE+tx];
As0[ty+48][tx] = A[I3*N + kk*BLOCK_SIZE+tx];
Bs0[ty][tx] = B[(kk*BLOCK_SIZE+ty)*N + J0];
Bs0[ty][tx+16] = B[(kk*BLOCK_SIZE+ty)*N + J1];
Bs0[ty][tx+32] = B[(kk*BLOCK_SIZE+ty)*N + J2];
Bs0[ty][tx+48] = B[(kk*BLOCK_SIZE+ty)*N + J3];
__syncthreads();
#pragma unroll 16
for (unsigned int k=0;k<BLOCK_SIZE;++k) {
Cij[0] += As0[ty][k] * Bs0[k][tx];
Cij[1] += As0[ty][k] * Bs0[k][tx+16];
Cij[2] += As0[ty][k] * Bs0[k][tx+32];
Cij[3] += As0[ty][k] * Bs0[k][tx+48];
Cij[4] += As0[ty+16][k] * Bs0[k][tx];
Cij[5] += As0[ty+16][k] * Bs0[k][tx+16];
Cij[6] += As0[ty+16][k] * Bs0[k][tx+32];
Cij[7] += As0[ty+16][k] * Bs0[k][tx+48];
Cij[8] += As0[ty+32][k] * Bs0[k][tx];
Cij[9] += As0[ty+32][k] * Bs0[k][tx+16];
Cij[10] += As0[ty+32][k] * Bs0[k][tx+32];
Cij[11] += As0[ty+32][k] * Bs0[k][tx+48];
Cij[12] += As0[ty+48][k] * Bs0[k][tx];
Cij[13] += As0[ty+48][k] * Bs0[k][tx+16];
Cij[14] += As0[ty+48][k] * Bs0[k][tx+32];
Cij[15] += As0[ty+48][k] * Bs0[k][tx+48];
}
__syncthreads();
}
C[I0*N+J0] = Cij[0];
C[I0*N+J1] = Cij[1];
C[I0*N+J2] = Cij[2];
C[I0*N+J3] = Cij[3];
C[I1*N+J0] = Cij[4];
C[I1*N+J1] = Cij[5];
C[I1*N+J2] = Cij[6];
C[I1*N+J3] = Cij[7];
C[I2*N+J0] = Cij[8];
C[I2*N+J1] = Cij[9];
C[I2*N+J2] = Cij[10];
C[I2*N+J3] = Cij[11];
C[I3*N+J0] = Cij[12];
C[I3*N+J1] = Cij[13];
C[I3*N+J2] = Cij[14];
C[I3*N+J3] = Cij[15];
}
else { // non-powers of 2 (with boundary checks)
int nBLOCK_SIZE = (N/BLOCK_SIZE) + 1;
#pragma unroll 4
for (unsigned int kk=0;kk<nBLOCK_SIZE;++kk) {
As0[ty][tx] = (I0<N && (kk*BLOCK_SIZE+tx) < N) ? A[I0*N + kk*BLOCK_SIZE+tx] : 0;
As0[ty+16][tx] = (I1<N && (kk*BLOCK_SIZE+tx) < N) ? A[I1*N + kk*BLOCK_SIZE+tx] : 0;
As0[ty+32][tx] = (I2<N && (kk*BLOCK_SIZE+tx) < N) ? A[I2*N + kk*BLOCK_SIZE+tx] : 0;
As0[ty+48][tx] = (I3<N && (kk*BLOCK_SIZE+tx) < N) ? A[I3*N + kk*BLOCK_SIZE+tx] : 0;
Bs0[ty][tx] = ((kk*BLOCK_SIZE+ty) < N && J0<N) ? B[(kk*BLOCK_SIZE+ty)*N + J0] : 0;
Bs0[ty][tx+16] = ((kk*BLOCK_SIZE+ty) < N && J1<N) ? B[(kk*BLOCK_SIZE+ty)*N + J1] : 0;
Bs0[ty][tx+32] = ((kk*BLOCK_SIZE+ty) < N && J2<N) ? B[(kk*BLOCK_SIZE+ty)*N + J2] : 0;
Bs0[ty][tx+48] = ((kk*BLOCK_SIZE+ty) < N && J3<N) ? B[(kk*BLOCK_SIZE+ty)*N + J3] : 0;
__syncthreads();
#pragma unroll 16
for (unsigned int k=0;k<BLOCK_SIZE;++k) {
Cij[0] += As0[ty][k] * Bs0[k][tx];
Cij[1] += As0[ty][k] * Bs0[k][tx+16];
Cij[2] += As0[ty][k] * Bs0[k][tx+32];
Cij[3] += As0[ty][k] * Bs0[k][tx+48];
Cij[4] += As0[ty+16][k] * Bs0[k][tx];
Cij[5] += As0[ty+16][k] * Bs0[k][tx+16];
Cij[6] += As0[ty+16][k] * Bs0[k][tx+32];
Cij[7] += As0[ty+16][k] * Bs0[k][tx+48];
Cij[8] += As0[ty+32][k] * Bs0[k][tx];
Cij[9] += As0[ty+32][k] * Bs0[k][tx+16];
Cij[10] += As0[ty+32][k] * Bs0[k][tx+32];
Cij[11] += As0[ty+32][k] * Bs0[k][tx+48];
Cij[12] += As0[ty+48][k] * Bs0[k][tx];
Cij[13] += As0[ty+48][k] * Bs0[k][tx+16];
Cij[14] += As0[ty+48][k] * Bs0[k][tx+32];
Cij[15] += As0[ty+48][k] * Bs0[k][tx+48];
}
__syncthreads();
}
if(I0<N && J0<N)
C[I0*N+J0] = Cij[0];
if(I0<N && J1<N)
C[I0*N+J1] = Cij[1];
if(I0<N && J2<N)
C[I0*N+J2] = Cij[2];
if(I0<N && J3<N)
C[I0*N+J3] = Cij[3];
if(I1<N && J0<N)
C[I1*N+J0] = Cij[4];
if(I1<N && J1<N)
C[I1*N+J1] = Cij[5];
if(I1<N && J2<N)
C[I1*N+J2] = Cij[6];
if(I1<N && J3<N)
C[I1*N+J3] = Cij[7];
if(I2<N && J0<N)
C[I2*N+J0] = Cij[8];
if(I2<N && J1<N)
C[I2*N+J1] = Cij[9];
if(I2<N && J2<N)
C[I2*N+J2] = Cij[10];
if(I2<N && J3<N)
C[I2*N+J3] = Cij[11];
if(I3<N && J0<N)
C[I3*N+J0] = Cij[12];
if(I3<N && J1<N)
C[I3*N+J1] = Cij[13];
if(I3<N && J2<N)
C[I3*N+J2] = Cij[14];
if(I3<N && J3<N)
C[I3*N+J3] = Cij[15];
}
}
|
7e202a9d27862da1fa82c162b249beb5d0e0f10c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel5(dtype *g_idata, dtype *g_odata, unsigned int n)
{
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
unsigned int numThread = blockDim.x * gridDim.x;
__shared__ dtype scratch[MAX_THREADS];
volatile dtype *wScratch = scratch;
if (i < n){
scratch[threadIdx.x] = g_idata[i];
}
else{
scratch[threadIdx.x] = 0.0;
}
i += numThread;
while (i < n){
scratch[threadIdx.x] += g_idata[i];
i += numThread;
}
if (blockDim.x >= 64){
__syncthreads ();
for(unsigned int s = blockDim.x >> 1 ; s > 32; s = s >> 1) {
if(threadIdx.x < s){
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
if(threadIdx.x < 32){
wScratch[threadIdx.x] += wScratch[threadIdx.x + 32];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 16];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 8];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 4];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 2];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 1];
}
}
else if (blockDim.x >= 32){
wScratch[threadIdx.x] += wScratch[threadIdx.x + 16];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 8];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 4];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 2];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 1];
}
else if (blockDim.x >= 16){
wScratch[threadIdx.x] += wScratch[threadIdx.x + 8];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 4];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 2];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 1];
}
else if (blockDim.x >= 8){
wScratch[threadIdx.x] += wScratch[threadIdx.x + 4];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 2];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 1];
}
else if (blockDim.x >= 4){
wScratch[threadIdx.x] += wScratch[threadIdx.x + 2];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 1];
}
else {
wScratch[threadIdx.x] += wScratch[threadIdx.x + 1];
}
if(threadIdx.x == 0) {
g_odata[bid] = wScratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_5, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 5;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype),
hipMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
//printf("threads:%d blocks:%d", threads, blocks);
/* warm up */
hipLaunchKernelGGL(( kernel5) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
hipDeviceSynchronize ();
stopwatch_start (timer);
/* execute kernel */
hipLaunchKernelGGL(( kernel5) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
//printf("threads:%d blocks:%d", threads, blocks);
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
hipLaunchKernelGGL(( kernel5) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
hipDeviceSynchronize ();
t_kernel_5 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute multiple add GPU reduction kernel: %Lg secs\n", t_kernel_5);
double bw = (N * sizeof(dtype)) / (t_kernel_5 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype),
hipMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
| 7e202a9d27862da1fa82c162b249beb5d0e0f10c.cu | #include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel5(dtype *g_idata, dtype *g_odata, unsigned int n)
{
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
unsigned int numThread = blockDim.x * gridDim.x;
__shared__ dtype scratch[MAX_THREADS];
volatile dtype *wScratch = scratch;
if (i < n){
scratch[threadIdx.x] = g_idata[i];
}
else{
scratch[threadIdx.x] = 0.0;
}
i += numThread;
while (i < n){
scratch[threadIdx.x] += g_idata[i];
i += numThread;
}
if (blockDim.x >= 64){
__syncthreads ();
for(unsigned int s = blockDim.x >> 1 ; s > 32; s = s >> 1) {
if(threadIdx.x < s){
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
if(threadIdx.x < 32){
wScratch[threadIdx.x] += wScratch[threadIdx.x + 32];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 16];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 8];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 4];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 2];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 1];
}
}
else if (blockDim.x >= 32){
wScratch[threadIdx.x] += wScratch[threadIdx.x + 16];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 8];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 4];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 2];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 1];
}
else if (blockDim.x >= 16){
wScratch[threadIdx.x] += wScratch[threadIdx.x + 8];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 4];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 2];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 1];
}
else if (blockDim.x >= 8){
wScratch[threadIdx.x] += wScratch[threadIdx.x + 4];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 2];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 1];
}
else if (blockDim.x >= 4){
wScratch[threadIdx.x] += wScratch[threadIdx.x + 2];
wScratch[threadIdx.x] += wScratch[threadIdx.x + 1];
}
else {
wScratch[threadIdx.x] += wScratch[threadIdx.x + 1];
}
if(threadIdx.x == 0) {
g_odata[bid] = wScratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_5, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 5;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype),
cudaMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
//printf("threads:%d blocks:%d", threads, blocks);
/* warm up */
kernel5 <<<gb, tb>>> (d_idata, d_odata, N);
cudaThreadSynchronize ();
stopwatch_start (timer);
/* execute kernel */
kernel5 <<<gb, tb>>> (d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
//printf("threads:%d blocks:%d", threads, blocks);
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
kernel5 <<<gb, tb>>> (d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
cudaThreadSynchronize ();
t_kernel_5 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute multiple add GPU reduction kernel: %Lg secs\n", t_kernel_5);
double bw = (N * sizeof(dtype)) / (t_kernel_5 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype),
cudaMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
440b04c6289e66e2d211cb013b9f61ef7e6455b5.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file dct2_fft2_cuda_kernel.cu
* @author Zixuan Jiang, Jiaqi Gu
* @date Apr 2019
* @brief Refernece: Byeong Lee, "A new algorithm to compute the discrete cosine Transform,"
* in IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 32, no. 6, pp. 1243-1245, December 1984.
* The preprocess and postprocess of 2d dct and 2d idct are discussed in the original paper.
* idct(idxst(x)) and idxst(idct(x)) are similar to the idct2d(x),
* except tiny modifications on preprocessing and postprocessing
*/
#include <math.h>
#include <float.h>
#include "hip/hip_runtime.h"
#include "utility/src/Msg.h"
#include "utility/src/ComplexNumber.cuh"
#define TPB (16)
DREAMPLACE_BEGIN_NAMESPACE
inline __device__ int INDEX(const int hid, const int wid, const int N)
{
return (hid * N + wid);
}
// dct2_fft2
template <typename T>
__global__ void dct2dPreprocess(const T *x, T *y, const int M, const int N, const int halfN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int index;
int cond = (((hid & 1) == 0) << 1) | ((wid & 1) == 0);
switch (cond)
{
case 0:
index = INDEX(2 * M - (hid + 1), N - (wid + 1) / 2, halfN);
break;
case 1:
index = INDEX(2 * M - (hid + 1), wid / 2, halfN);
break;
case 2:
index = INDEX(hid, N - (wid + 1) / 2, halfN);
break;
case 3:
index = INDEX(hid, wid / 2, halfN);
break;
default:
break;
}
y[index] = x[INDEX(hid, wid, N)];
}
}
template <typename T>
void dct2dPreprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( dct2dPreprocess<T>), dim3(gridSize), dim3(blockSize), 0, 0, x, y, M, N, N / 2);
}
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) dct2dPostprocess(const TComplex *V, T *y, const int M, const int N,
const int halfM, const int halfN, const T two_over_MN, const T four_over_MN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
y[0] = V[0].x * four_over_MN;
y[halfN] = RealPartOfMul(expkN[halfN], V[halfN]) * four_over_MN;
y[INDEX(halfM, 0, N)] = expkM[halfM].x * V[INDEX(halfM, 0, halfN + 1)].x * four_over_MN;
y[INDEX(halfM, halfN, N)] = expkM[halfM].x * RealPartOfMul(expkN[halfN], V[INDEX(halfM, halfN, halfN + 1)]) * four_over_MN;
break;
}
case 1:
{
ComplexType<T> tmp;
tmp = V[wid];
y[wid] = RealPartOfMul(expkN[wid], tmp) * four_over_MN;
y[N - wid] = -ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN;
tmp = V[INDEX(halfM, wid, halfN + 1)];
y[INDEX(halfM, wid, N)] = expkM[halfM].x * RealPartOfMul(expkN[wid], tmp) * four_over_MN;
y[INDEX(halfM, N - wid, N)] = -expkM[halfM].x * ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN;
break;
}
case 2:
{
ComplexType<T> tmp1, tmp2, tmp_up, tmp_down;
tmp1 = V[INDEX(hid, 0, halfN + 1)];
tmp2 = V[INDEX(M - hid, 0, halfN + 1)];
tmp_up.x = expkM[hid].x * (tmp1.x + tmp2.x) + expkM[hid].y * (tmp2.y - tmp1.y);
tmp_down.x = -expkM[hid].y * (tmp1.x + tmp2.x) + expkM[hid].x * (tmp2.y - tmp1.y);
y[INDEX(hid, 0, N)] = tmp_up.x * two_over_MN;
y[INDEX(M - hid, 0, N)] = tmp_down.x * two_over_MN;
tmp1 = complexAdd(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]);
tmp2 = complexSubtract(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]);
tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y;
tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x;
tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y;
tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x;
y[INDEX(hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_up) * two_over_MN;
y[INDEX(M - hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_down) * two_over_MN;
break;
}
case 3:
{
ComplexType<T> tmp1, tmp2, tmp_up, tmp_down;
tmp1 = complexAdd(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]);
tmp2 = complexSubtract(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]);
tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y;
tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x;
tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y;
tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x;
y[INDEX(hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_up) * two_over_MN;
y[INDEX(M - hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_down) * two_over_MN;
y[INDEX(hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_up) * two_over_MN;
y[INDEX(M - hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_down) * two_over_MN;
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void dct2dPostprocessCudaLauncher(const T *x, T *y, const int M, const int N,
const T *__restrict__ expkM, const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( dct2dPostprocess<T, ComplexType<T>>), dim3(gridSize), dim3(blockSize), 0, 0, (ComplexType<T> *)x, y, M, N, M / 2, N / 2, (T)(2. / (M * N)), (T)(4. / (M * N)), (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
// idct2_fft2
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) idct2_fft2Preprocess(const T *input, TComplex *output, const int M, const int N,
const int halfM, const int halfN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = input[0];
output[0].y = 0;
tmp1 = input[halfN];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up));
tmp1 = input[INDEX(halfM, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up));
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
TComplex tmp_up;
tmp_up.x = input[wid];
tmp_up.y = input[N - wid];
output[wid] = complexConj(complexMul(expkN[wid], tmp_up));
T tmp1 = input[INDEX(halfM, wid, N)];
T tmp2 = input[INDEX(halfM, N - wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
tmp1 = input[INDEX(hid, 0, N)];
tmp3 = input[INDEX(M - hid, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp3;
tmp_down.x = tmp3;
tmp_down.y = tmp1;
output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up));
output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down));
tmp1 = input[INDEX(hid, halfN, N)];
tmp3 = input[INDEX(M - hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(hid, wid, N)];
T tmp2 = input[INDEX(hid, N - wid, N)];
T tmp3 = input[INDEX(M - hid, wid, N)];
T tmp4 = input[INDEX(M - hid, N - wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void idct2_fft2PreprocessCudaLauncher(
const T *x,
T *y,
const int M,
const int N,
const T *__restrict__ expkM,
const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( idct2_fft2Preprocess<T, ComplexType<T>>), dim3(gridSize), dim3(blockSize), 0, 0, x, (ComplexType<T> *)y, M, N, M / 2, N / 2, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
template <typename T>
__global__ void idct2_fft2Postprocess(const T *x, T *y, const int M, const int N, const int halfN, const int MN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
break;
default:
assert(0);
break;
}
y[index] = x[INDEX(hid, wid, N)] * MN;
}
}
template <typename T>
void idct2_fft2PostprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( idct2_fft2Postprocess<T>), dim3(gridSize), dim3(blockSize), 0, 0, x, y, M, N, N / 2, M * N);
}
// idct_idxst
// Adpated from idct2d_preprocess(). The only change is the reordered input
// if (wid != 0)
// new_input[hid][wid] = input[hid][N - wid];
// else
// new_input[hid][0] = 0
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) idct_idxstPreprocess(const T *input, TComplex *output, const int M, const int N,
const int halfM, const int halfN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = 0;
output[0].y = 0;
tmp1 = input[halfN];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up));
output[INDEX(halfM, 0, halfN + 1)].x = 0;
output[INDEX(halfM, 0, halfN + 1)].y = 0;
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
TComplex tmp_up;
tmp_up.x = input[N - wid];
tmp_up.y = input[wid];
output[wid] = complexConj(complexMul(expkN[wid], tmp_up));
T tmp1 = input[INDEX(halfM, N - wid, N)];
T tmp2 = input[INDEX(halfM, wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
output[INDEX(hid, 0, halfN + 1)].x = 0;
output[INDEX(hid, 0, halfN + 1)].y = 0;
output[INDEX(M - hid, 0, halfN + 1)].x = 0;
output[INDEX(M - hid, 0, halfN + 1)].y = 0;
tmp1 = input[INDEX(hid, halfN, N)];
tmp3 = input[INDEX(M - hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(hid, N - wid, N)];
T tmp2 = input[INDEX(hid, wid, N)];
T tmp3 = input[INDEX(M - hid, N - wid, N)];
T tmp4 = input[INDEX(M - hid, wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void idct_idxstPreprocessCudaLauncher(const T *x, T *y, const int M, const int N,
const T *__restrict__ expkM, const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( idct_idxstPreprocess<T, ComplexType<T>>), dim3(gridSize), dim3(blockSize), 0, 0, x, (ComplexType<T> *)y, M, N, M / 2, N / 2, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
// Adpated from idct2d_postprocess() with changes on sign and scale
// if (wid % 2 == 1)
// new_output[hid][wid] = -output[hid][wid];
// else
// new_output[hid][wid] = output[hid][wid];
template <typename T>
__global__ void idct_idxstPostprocess(const T *x, T *y, const int M, const int N, const int halfN, const int MN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
default:
assert(0);
break;
}
}
}
template <typename T>
void idct_idxstPostprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( idct_idxstPostprocess<T>), dim3(gridSize), dim3(blockSize), 0, 0, x, y, M, N, N / 2, M * N);
}
// idxst_idct
// Adpated from idct2d_preprocess(). The only change is the reordered input
// if (hid != 0)
// new_input[hid][wid] = input[M - hid][wid];
// else
// new_input[0][wid] = 0
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) idxst_idctPreprocess(const T *input, TComplex *output, const int M, const int N,
const int halfM, const int halfN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = 0;
output[0].y = 0;
output[halfN].x = 0;
output[halfN].y = 0;
tmp1 = input[INDEX(halfM, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up));
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
output[wid].x = 0;
output[wid].y = 0;
TComplex tmp_up;
T tmp1 = input[INDEX(halfM, wid, N)];
T tmp2 = input[INDEX(halfM, N - wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
tmp1 = input[INDEX(M - hid, 0, N)];
tmp3 = input[INDEX(hid, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp3;
tmp_down.x = tmp3;
tmp_down.y = tmp1;
output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up));
output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down));
tmp1 = input[INDEX(M - hid, halfN, N)];
tmp3 = input[INDEX(hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(M - hid, wid, N)];
T tmp2 = input[INDEX(M - hid, N - wid, N)];
T tmp3 = input[INDEX(hid, wid, N)];
T tmp4 = input[INDEX(hid, N - wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void idxst_idctPreprocessCudaLauncher(
const T *x,
T *y,
const int M,
const int N,
const T *__restrict__ expkM,
const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( idxst_idctPreprocess<T, ComplexType<T>>), dim3(gridSize), dim3(blockSize), 0, 0, x, (ComplexType<T> *)y, M, N, M / 2, N / 2, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
// Adpated from idct2d_postprocess() with changes on sign and scale
// if (hid % 2 == 1)
// new_output[hid][wid] = -output[hid][wid];
// else
// new_output[hid][wid] = output[hid][wid];
template <typename T>
__global__ void idxst_idctPostprocess(const T *x, T *y, const int M, const int N, const int halfN, const int MN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
default:
assert(0);
break;
}
}
}
template <typename T>
void idxst_idctPostprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( idxst_idctPostprocess<T>), dim3(gridSize), dim3(blockSize), 0, 0, x, y, M, N, N / 2, M * N);
}
// dct2_fft2
#define REGISTER_DCT2DPREPROCESS_KERNEL_LAUNCHER(type) \
void instantiatedct2dPreprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N) \
{ \
return dct2dPreprocessCudaLauncher<type>( \
x, \
y, \
M, \
N); \
}
REGISTER_DCT2DPREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_DCT2DPREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_DCT2DPOSTPROCESS_KERNEL_LAUNCHER(type) \
void instantiatedct2dPostprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN) \
{ \
return dct2dPostprocessCudaLauncher<type>( \
x, \
y, \
M, \
N, \
expkM, \
expkN); \
}
REGISTER_DCT2DPOSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_DCT2DPOSTPROCESS_KERNEL_LAUNCHER(double);
//idct_idxst
#define REGISTER_IDCT_IDXSTPREPROCESS_KERNEL_LAUNCHER(type) \
void instantiateidct_idxstPreprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN) \
{ \
return idct_idxstPreprocessCudaLauncher<type>( \
x, \
y, \
M, \
N, \
expkM, \
expkN); \
}
REGISTER_IDCT_IDXSTPREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT_IDXSTPREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_IDCT_IDXSTPOSTPROCESS_KERNEL_LAUNCHER(type) \
void instantiateidct_idxstPostprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N) \
{ \
return idct_idxstPostprocessCudaLauncher<type>( \
x, \
y, \
M, \
N); \
}
REGISTER_IDCT_IDXSTPOSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT_IDXSTPOSTPROCESS_KERNEL_LAUNCHER(double);
//idxst_idct
#define REGISTER_IDXST_IDCTPREPROCESS_KERNEL_LAUNCHER(type) \
void instantiateidxst_idctPreprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN) \
{ \
return idxst_idctPreprocessCudaLauncher<type>( \
x, \
y, \
M, \
N, \
expkM, \
expkN); \
}
REGISTER_IDXST_IDCTPREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDXST_IDCTPREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_IDXST_IDCTPOSTPROCESS_KERNEL_LAUNCHER(type) \
void instantiateidxst_idctPostprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N) \
{ \
return idxst_idctPostprocessCudaLauncher<type>( \
x, \
y, \
M, \
N); \
}
REGISTER_IDXST_IDCTPOSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDXST_IDCTPOSTPROCESS_KERNEL_LAUNCHER(double);
//idct2_fft2
#define REGISTER_IDCT2_FFT2PREPROCESS_KERNEL_LAUNCHER(type) \
void instantiateidct2_fft2PreprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN) \
{ \
return idct2_fft2PreprocessCudaLauncher<type>( \
x, \
y, \
M, \
N, \
expkM, \
expkN); \
}
REGISTER_IDCT2_FFT2PREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT2_FFT2PREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_IDCT2_FFT2POSTPROCESS_KERNEL_LAUNCHER(type) \
void instantiateidct2_fft2PostprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N) \
{ \
return idct2_fft2PostprocessCudaLauncher<type>( \
x, \
y, \
M, \
N); \
}
REGISTER_IDCT2_FFT2POSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT2_FFT2POSTPROCESS_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
| 440b04c6289e66e2d211cb013b9f61ef7e6455b5.cu | /**
* @file dct2_fft2_cuda_kernel.cu
* @author Zixuan Jiang, Jiaqi Gu
* @date Apr 2019
* @brief Refernece: Byeong Lee, "A new algorithm to compute the discrete cosine Transform,"
* in IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 32, no. 6, pp. 1243-1245, December 1984.
* The preprocess and postprocess of 2d dct and 2d idct are discussed in the original paper.
* idct(idxst(x)) and idxst(idct(x)) are similar to the idct2d(x),
* except tiny modifications on preprocessing and postprocessing
*/
#include <math.h>
#include <float.h>
#include "cuda_runtime.h"
#include "utility/src/Msg.h"
#include "utility/src/ComplexNumber.cuh"
#define TPB (16)
DREAMPLACE_BEGIN_NAMESPACE
inline __device__ int INDEX(const int hid, const int wid, const int N)
{
return (hid * N + wid);
}
// dct2_fft2
template <typename T>
__global__ void dct2dPreprocess(const T *x, T *y, const int M, const int N, const int halfN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int index;
int cond = (((hid & 1) == 0) << 1) | ((wid & 1) == 0);
switch (cond)
{
case 0:
index = INDEX(2 * M - (hid + 1), N - (wid + 1) / 2, halfN);
break;
case 1:
index = INDEX(2 * M - (hid + 1), wid / 2, halfN);
break;
case 2:
index = INDEX(hid, N - (wid + 1) / 2, halfN);
break;
case 3:
index = INDEX(hid, wid / 2, halfN);
break;
default:
break;
}
y[index] = x[INDEX(hid, wid, N)];
}
}
template <typename T>
void dct2dPreprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
dct2dPreprocess<T><<<gridSize, blockSize>>>(x, y, M, N, N / 2);
}
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) dct2dPostprocess(const TComplex *V, T *y, const int M, const int N,
const int halfM, const int halfN, const T two_over_MN, const T four_over_MN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
y[0] = V[0].x * four_over_MN;
y[halfN] = RealPartOfMul(expkN[halfN], V[halfN]) * four_over_MN;
y[INDEX(halfM, 0, N)] = expkM[halfM].x * V[INDEX(halfM, 0, halfN + 1)].x * four_over_MN;
y[INDEX(halfM, halfN, N)] = expkM[halfM].x * RealPartOfMul(expkN[halfN], V[INDEX(halfM, halfN, halfN + 1)]) * four_over_MN;
break;
}
case 1:
{
ComplexType<T> tmp;
tmp = V[wid];
y[wid] = RealPartOfMul(expkN[wid], tmp) * four_over_MN;
y[N - wid] = -ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN;
tmp = V[INDEX(halfM, wid, halfN + 1)];
y[INDEX(halfM, wid, N)] = expkM[halfM].x * RealPartOfMul(expkN[wid], tmp) * four_over_MN;
y[INDEX(halfM, N - wid, N)] = -expkM[halfM].x * ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN;
break;
}
case 2:
{
ComplexType<T> tmp1, tmp2, tmp_up, tmp_down;
tmp1 = V[INDEX(hid, 0, halfN + 1)];
tmp2 = V[INDEX(M - hid, 0, halfN + 1)];
tmp_up.x = expkM[hid].x * (tmp1.x + tmp2.x) + expkM[hid].y * (tmp2.y - tmp1.y);
tmp_down.x = -expkM[hid].y * (tmp1.x + tmp2.x) + expkM[hid].x * (tmp2.y - tmp1.y);
y[INDEX(hid, 0, N)] = tmp_up.x * two_over_MN;
y[INDEX(M - hid, 0, N)] = tmp_down.x * two_over_MN;
tmp1 = complexAdd(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]);
tmp2 = complexSubtract(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]);
tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y;
tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x;
tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y;
tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x;
y[INDEX(hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_up) * two_over_MN;
y[INDEX(M - hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_down) * two_over_MN;
break;
}
case 3:
{
ComplexType<T> tmp1, tmp2, tmp_up, tmp_down;
tmp1 = complexAdd(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]);
tmp2 = complexSubtract(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]);
tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y;
tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x;
tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y;
tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x;
y[INDEX(hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_up) * two_over_MN;
y[INDEX(M - hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_down) * two_over_MN;
y[INDEX(hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_up) * two_over_MN;
y[INDEX(M - hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_down) * two_over_MN;
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void dct2dPostprocessCudaLauncher(const T *x, T *y, const int M, const int N,
const T *__restrict__ expkM, const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
dct2dPostprocess<T, ComplexType<T>><<<gridSize, blockSize>>>((ComplexType<T> *)x, y, M, N, M / 2, N / 2, (T)(2. / (M * N)), (T)(4. / (M * N)), (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
// idct2_fft2
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) idct2_fft2Preprocess(const T *input, TComplex *output, const int M, const int N,
const int halfM, const int halfN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = input[0];
output[0].y = 0;
tmp1 = input[halfN];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up));
tmp1 = input[INDEX(halfM, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up));
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
TComplex tmp_up;
tmp_up.x = input[wid];
tmp_up.y = input[N - wid];
output[wid] = complexConj(complexMul(expkN[wid], tmp_up));
T tmp1 = input[INDEX(halfM, wid, N)];
T tmp2 = input[INDEX(halfM, N - wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
tmp1 = input[INDEX(hid, 0, N)];
tmp3 = input[INDEX(M - hid, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp3;
tmp_down.x = tmp3;
tmp_down.y = tmp1;
output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up));
output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down));
tmp1 = input[INDEX(hid, halfN, N)];
tmp3 = input[INDEX(M - hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(hid, wid, N)];
T tmp2 = input[INDEX(hid, N - wid, N)];
T tmp3 = input[INDEX(M - hid, wid, N)];
T tmp4 = input[INDEX(M - hid, N - wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void idct2_fft2PreprocessCudaLauncher(
const T *x,
T *y,
const int M,
const int N,
const T *__restrict__ expkM,
const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
idct2_fft2Preprocess<T, ComplexType<T>><<<gridSize, blockSize>>>(x, (ComplexType<T> *)y, M, N, M / 2, N / 2, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
template <typename T>
__global__ void idct2_fft2Postprocess(const T *x, T *y, const int M, const int N, const int halfN, const int MN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
break;
default:
assert(0);
break;
}
y[index] = x[INDEX(hid, wid, N)] * MN;
}
}
template <typename T>
void idct2_fft2PostprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
idct2_fft2Postprocess<T><<<gridSize, blockSize>>>(x, y, M, N, N / 2, M * N);
}
// idct_idxst
// Adpated from idct2d_preprocess(). The only change is the reordered input
// if (wid != 0)
// new_input[hid][wid] = input[hid][N - wid];
// else
// new_input[hid][0] = 0
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) idct_idxstPreprocess(const T *input, TComplex *output, const int M, const int N,
const int halfM, const int halfN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = 0;
output[0].y = 0;
tmp1 = input[halfN];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up));
output[INDEX(halfM, 0, halfN + 1)].x = 0;
output[INDEX(halfM, 0, halfN + 1)].y = 0;
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
TComplex tmp_up;
tmp_up.x = input[N - wid];
tmp_up.y = input[wid];
output[wid] = complexConj(complexMul(expkN[wid], tmp_up));
T tmp1 = input[INDEX(halfM, N - wid, N)];
T tmp2 = input[INDEX(halfM, wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
output[INDEX(hid, 0, halfN + 1)].x = 0;
output[INDEX(hid, 0, halfN + 1)].y = 0;
output[INDEX(M - hid, 0, halfN + 1)].x = 0;
output[INDEX(M - hid, 0, halfN + 1)].y = 0;
tmp1 = input[INDEX(hid, halfN, N)];
tmp3 = input[INDEX(M - hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(hid, N - wid, N)];
T tmp2 = input[INDEX(hid, wid, N)];
T tmp3 = input[INDEX(M - hid, N - wid, N)];
T tmp4 = input[INDEX(M - hid, wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void idct_idxstPreprocessCudaLauncher(const T *x, T *y, const int M, const int N,
const T *__restrict__ expkM, const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
idct_idxstPreprocess<T, ComplexType<T>><<<gridSize, blockSize>>>(x, (ComplexType<T> *)y, M, N, M / 2, N / 2, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
// Adpated from idct2d_postprocess() with changes on sign and scale
// if (wid % 2 == 1)
// new_output[hid][wid] = -output[hid][wid];
// else
// new_output[hid][wid] = output[hid][wid];
template <typename T>
__global__ void idct_idxstPostprocess(const T *x, T *y, const int M, const int N, const int halfN, const int MN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
default:
assert(0);
break;
}
}
}
template <typename T>
void idct_idxstPostprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
idct_idxstPostprocess<T><<<gridSize, blockSize>>>(x, y, M, N, N / 2, M * N);
}
// idxst_idct
// Adpated from idct2d_preprocess(). The only change is the reordered input
// if (hid != 0)
// new_input[hid][wid] = input[M - hid][wid];
// else
// new_input[0][wid] = 0
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) idxst_idctPreprocess(const T *input, TComplex *output, const int M, const int N,
const int halfM, const int halfN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = 0;
output[0].y = 0;
output[halfN].x = 0;
output[halfN].y = 0;
tmp1 = input[INDEX(halfM, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up));
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
output[wid].x = 0;
output[wid].y = 0;
TComplex tmp_up;
T tmp1 = input[INDEX(halfM, wid, N)];
T tmp2 = input[INDEX(halfM, N - wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
tmp1 = input[INDEX(M - hid, 0, N)];
tmp3 = input[INDEX(hid, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp3;
tmp_down.x = tmp3;
tmp_down.y = tmp1;
output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up));
output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down));
tmp1 = input[INDEX(M - hid, halfN, N)];
tmp3 = input[INDEX(hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(M - hid, wid, N)];
T tmp2 = input[INDEX(M - hid, N - wid, N)];
T tmp3 = input[INDEX(hid, wid, N)];
T tmp4 = input[INDEX(hid, N - wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void idxst_idctPreprocessCudaLauncher(
const T *x,
T *y,
const int M,
const int N,
const T *__restrict__ expkM,
const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
idxst_idctPreprocess<T, ComplexType<T>><<<gridSize, blockSize>>>(x, (ComplexType<T> *)y, M, N, M / 2, N / 2, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
// Adpated from idct2d_postprocess() with changes on sign and scale
// if (hid % 2 == 1)
// new_output[hid][wid] = -output[hid][wid];
// else
// new_output[hid][wid] = output[hid][wid];
template <typename T>
__global__ void idxst_idctPostprocess(const T *x, T *y, const int M, const int N, const int halfN, const int MN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
default:
assert(0);
break;
}
}
}
template <typename T>
void idxst_idctPostprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
idxst_idctPostprocess<T><<<gridSize, blockSize>>>(x, y, M, N, N / 2, M * N);
}
// dct2_fft2
#define REGISTER_DCT2DPREPROCESS_KERNEL_LAUNCHER(type) \
void instantiatedct2dPreprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N) \
{ \
return dct2dPreprocessCudaLauncher<type>( \
x, \
y, \
M, \
N); \
}
REGISTER_DCT2DPREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_DCT2DPREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_DCT2DPOSTPROCESS_KERNEL_LAUNCHER(type) \
void instantiatedct2dPostprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN) \
{ \
return dct2dPostprocessCudaLauncher<type>( \
x, \
y, \
M, \
N, \
expkM, \
expkN); \
}
REGISTER_DCT2DPOSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_DCT2DPOSTPROCESS_KERNEL_LAUNCHER(double);
//idct_idxst
#define REGISTER_IDCT_IDXSTPREPROCESS_KERNEL_LAUNCHER(type) \
void instantiateidct_idxstPreprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN) \
{ \
return idct_idxstPreprocessCudaLauncher<type>( \
x, \
y, \
M, \
N, \
expkM, \
expkN); \
}
REGISTER_IDCT_IDXSTPREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT_IDXSTPREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_IDCT_IDXSTPOSTPROCESS_KERNEL_LAUNCHER(type) \
void instantiateidct_idxstPostprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N) \
{ \
return idct_idxstPostprocessCudaLauncher<type>( \
x, \
y, \
M, \
N); \
}
REGISTER_IDCT_IDXSTPOSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT_IDXSTPOSTPROCESS_KERNEL_LAUNCHER(double);
//idxst_idct
#define REGISTER_IDXST_IDCTPREPROCESS_KERNEL_LAUNCHER(type) \
void instantiateidxst_idctPreprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN) \
{ \
return idxst_idctPreprocessCudaLauncher<type>( \
x, \
y, \
M, \
N, \
expkM, \
expkN); \
}
REGISTER_IDXST_IDCTPREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDXST_IDCTPREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_IDXST_IDCTPOSTPROCESS_KERNEL_LAUNCHER(type) \
void instantiateidxst_idctPostprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N) \
{ \
return idxst_idctPostprocessCudaLauncher<type>( \
x, \
y, \
M, \
N); \
}
REGISTER_IDXST_IDCTPOSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDXST_IDCTPOSTPROCESS_KERNEL_LAUNCHER(double);
//idct2_fft2
#define REGISTER_IDCT2_FFT2PREPROCESS_KERNEL_LAUNCHER(type) \
void instantiateidct2_fft2PreprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN) \
{ \
return idct2_fft2PreprocessCudaLauncher<type>( \
x, \
y, \
M, \
N, \
expkM, \
expkN); \
}
REGISTER_IDCT2_FFT2PREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT2_FFT2PREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_IDCT2_FFT2POSTPROCESS_KERNEL_LAUNCHER(type) \
void instantiateidct2_fft2PostprocessCudaLauncher( \
const type *x, \
type *y, \
const int M, \
const int N) \
{ \
return idct2_fft2PostprocessCudaLauncher<type>( \
x, \
y, \
M, \
N); \
}
REGISTER_IDCT2_FFT2POSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT2_FFT2POSTPROCESS_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
e865d3d227e5498a5da58ae6efaefba84d22abd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
%%cu
#include<cuda.h>
#include <iostream>
#include <numeric>
#define SIZE 8
using namespace std;
__global__ void sum(int* input)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while (number_of_threads > 0)
{
if (tid < number_of_threads) // still alive?
{
const int fst = tid * step_size * 2;
const int snd = fst + step_size;
if(input[fst]<input[snd])
input[fst]=input[snd];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
int main()
{
int count = SIZE;
const int size = count*sizeof(int);
int h[SIZE];
for(int i=0;i<count;i++)
{
h[i]=rand()%50;
}
for(int i=0;i<count;i++)
{
cout<<" "<<h[i];
}
int* d;
hipMalloc(&d, size);
hipMemcpy(d, h, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sum) , dim3(1), dim3(count/2), 0, 0, d);
int result;
hipMemcpy(&result, d, sizeof(int), hipMemcpyDeviceToHost);
cout << "Max is " << result << endl;
//getchar();
hipFree(d);
return 0;
}
| e865d3d227e5498a5da58ae6efaefba84d22abd4.cu | %%cu
#include<cuda.h>
#include <iostream>
#include <numeric>
#define SIZE 8
using namespace std;
__global__ void sum(int* input)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while (number_of_threads > 0)
{
if (tid < number_of_threads) // still alive?
{
const int fst = tid * step_size * 2;
const int snd = fst + step_size;
if(input[fst]<input[snd])
input[fst]=input[snd];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
int main()
{
int count = SIZE;
const int size = count*sizeof(int);
int h[SIZE];
for(int i=0;i<count;i++)
{
h[i]=rand()%50;
}
for(int i=0;i<count;i++)
{
cout<<" "<<h[i];
}
int* d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
sum <<<1, count/2>>>(d);
int result;
cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost);
cout << "Max is " << result << endl;
//getchar();
cudaFree(d);
return 0;
}
|
0714273583b68a4a9ffcdf846d1e3ce4c9c3e0c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudafPHI.cuh"
#include <stdio.h>
#include <hip/hip_runtime.h>
static inline void printError(hipError_t error, const char * functionMess, const char * opMess, int Line) {
if (error == 0){
return;
}else{
//fclose(stderr);
//freopen("cudafPHI.err", "w", stderr);
printf("%s %s Line - %i : %s \n",functionMess , opMess, Line ,hipGetErrorString(error) );
//std::cerr << functionMess << " " << opMess << " Line- " << Line << ": "
// << hipGetErrorString(error) << "\n";
//fclose(stderr);
hipDeviceReset();
exit(0);
}
}
static __global__ void calculate_ZTZ(float * d_Z, float * ZTZI, size_t n_subjects) {
size_t rowIdx = threadIdx.x + blockDim.x * blockIdx.x;
size_t tIdx = threadIdx.x;
__shared__ float shared_ZTZI_0[BLOCK_SIZE_1];
__shared__ float shared_ZTZI_2[BLOCK_SIZE_1];
__shared__ float shared_ZTZI_3[BLOCK_SIZE_1];
if (rowIdx < n_subjects) {
shared_ZTZI_0[tIdx] = d_Z[rowIdx] * d_Z[rowIdx];
shared_ZTZI_2[tIdx] = d_Z[rowIdx + n_subjects] * d_Z[rowIdx];
shared_ZTZI_3[tIdx] = d_Z[rowIdx + n_subjects] * d_Z[rowIdx + n_subjects];
}else{
shared_ZTZI_0[tIdx] = 0.f;
shared_ZTZI_2[tIdx] = 0.f;
shared_ZTZI_3[tIdx] = 0.f;
}
__syncthreads();
for(unsigned int stride = BLOCK_SIZE_1/2 ; stride > 0 ; stride >>=1){
if(threadIdx.x < stride && (rowIdx+stride < n_subjects)){
shared_ZTZI_0[tIdx] += shared_ZTZI_0[tIdx + stride];
shared_ZTZI_2[tIdx] += shared_ZTZI_2[tIdx + stride];
shared_ZTZI_3[tIdx] += shared_ZTZI_3[tIdx + stride];
}
__syncthreads();
}
if((tIdx == 0) && (rowIdx < n_subjects)){
atomicAdd(&ZTZI[0], shared_ZTZI_0[0]);
atomicAdd(&ZTZI[1], shared_ZTZI_2[0]);
atomicAdd(&ZTZI[2], shared_ZTZI_2[0]);
atomicAdd(&ZTZI[3], shared_ZTZI_3[0]);
}
}
static __global__ void Inv4by4(float * ZTZI) {
float a = ZTZI[0];
float b = ZTZI[1];
float c = ZTZI[2];
float d = ZTZI[3];
float det = a * d - b * c;
ZTZI[0] = d / det;
ZTZI[1] = -c / det;
ZTZI[2] = -b / det;
ZTZI[3] = a / det;
}
static __global__ void calculate_d_theta(float * d_theta, float * d_F, float * d_Z, float * d_ZTZI, size_t voxel, size_t n_subjects){
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
size_t tIdx = threadIdx.x;
__shared__ float shared_theta_0[BLOCK_SIZE_1];
__shared__ float shared_theta_1[BLOCK_SIZE_1];
if (rowIdx < n_subjects) {
float d_Z_0 = d_Z[rowIdx];
float d_Z_1 = d_Z[rowIdx + n_subjects];
float F = d_F[rowIdx + voxel*n_subjects];
F = F*F;
shared_theta_0[tIdx] = F*(d_Z_0*d_ZTZI[0] + d_ZTZI[2]*d_Z_1);
shared_theta_1[tIdx] = F*(d_Z_0*d_ZTZI[1] + d_ZTZI[3]*d_Z_1);
// printf("%f \n",shared_theta_0[threadIdx.x]);
}else{
shared_theta_0[tIdx] = 0.f;
shared_theta_1[tIdx] = 0.f;
}
__syncthreads();
for(unsigned int stride = BLOCK_SIZE_1/2 ; stride > 0 ; stride >>=1){
if(tIdx < stride && (rowIdx+stride < n_subjects)){
shared_theta_0[tIdx] += shared_theta_0[tIdx + stride];
shared_theta_1[tIdx] += shared_theta_1[tIdx + stride];
}
__syncthreads();
}
if((tIdx == 0) && (rowIdx < n_subjects)){
atomicAdd(&d_theta[0], shared_theta_0[0]);
atomicAdd(&d_theta[1], shared_theta_1[0]);
}
}
static __global__ void calculate_weights(float * d_weights, float * d_Z, float * d_theta, size_t n_subjects){
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
if(rowIdx < n_subjects){
float theta_0 = d_theta[0];
float theta_1 = d_theta[1];
if(theta_0 < 0.f)
theta_0 = 0.f;
if(theta_1 < 0.f)
theta_1 = 0.f;
float weight = theta_0 + d_Z[rowIdx + n_subjects]*theta_1;
d_weights[rowIdx] = 1.f/(weight*weight);
}
}
static __global__ void calculate_sum_A_D_Sigma_P(float * d_A, float * d_D, float * d_Sigma_P, float * d_weights,
float * d_F, size_t voxel, size_t n_subjects){
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
size_t tIdx = threadIdx.x;
__shared__ float shared_A[BLOCK_SIZE_1];
__shared__ float shared_D[BLOCK_SIZE_1];
__shared__ float shared_Sigma_P[BLOCK_SIZE_1];
if(rowIdx < n_subjects){
float weight = d_weights[rowIdx];
float F = d_F[voxel*n_subjects + rowIdx];
F = F*F;
shared_A[tIdx] = weight;
shared_D[tIdx] = weight*F;
shared_Sigma_P[tIdx] = F;
}else{
shared_A[tIdx] = 0.f;
shared_D[tIdx] = 0.f;
shared_Sigma_P[tIdx] = 0.f;
}
__syncthreads();
for(unsigned int stride = BLOCK_SIZE_1/2 ; stride > 0 ; stride >>=1){
if(tIdx < stride && (rowIdx+stride < n_subjects)){
shared_A[tIdx] += shared_A[tIdx + stride];
shared_D[tIdx] += shared_D[tIdx + stride];
shared_Sigma_P[tIdx] += shared_Sigma_P[tIdx + stride];
}
__syncthreads();
}
if((tIdx == 0) && (rowIdx < n_subjects)){
atomicAdd(d_A, shared_A[0]);
atomicAdd(d_D, shared_D[0]);
atomicAdd(d_Sigma_P, shared_Sigma_P[0]);
}
}
static __global__ void calculate_sum_B_C_E(float * d_B, float * d_C, float * d_E, float * d_weights, float * d_F,
float * d_Z, size_t voxel, size_t n_subjects){
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
size_t tIdx = threadIdx.x;
__shared__ float shared_B[BLOCK_SIZE_1];
__shared__ float shared_C[BLOCK_SIZE_1];
__shared__ float shared_E[BLOCK_SIZE_1];
if(rowIdx < n_subjects){
float lambda = d_Z[rowIdx + n_subjects];
float F = d_F[voxel*n_subjects + rowIdx];
F = F*F;
float weight = d_weights[rowIdx];
shared_B[tIdx] = weight*lambda;
shared_C[tIdx] = weight*lambda*lambda;
shared_E[tIdx] = F*weight*lambda;
}else{
shared_B[tIdx] = 0.f;
shared_C[tIdx] = 0.f;
shared_E[tIdx] = 0.f;
}
__syncthreads();
for(unsigned int stride = BLOCK_SIZE_1/2 ; stride > 0 ; stride >>=1){
if(tIdx < stride && (rowIdx+stride < n_subjects)){
shared_B[tIdx] += shared_B[tIdx + stride];
shared_C[tIdx] += shared_C[tIdx + stride];
shared_E[tIdx] += shared_E[tIdx + stride];
}
__syncthreads();
}
if((threadIdx.x == 0) && (rowIdx < n_subjects)){
atomicAdd(d_B, shared_B[0]);
atomicAdd(d_C, shared_C[0]);
atomicAdd(d_E, shared_E[0]);
}
}
static __global__ void calculate_score(float * d_score, float * d_Z, float * d_F, float * d_Sigma_P, size_t voxel, size_t n_subjects){
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
size_t tIdx = threadIdx.x;
__shared__ float shared_score[BLOCK_SIZE_1];
if(rowIdx < n_subjects){
float F = d_F[voxel*n_subjects + rowIdx];
F = F*F;
shared_score[tIdx] = 0.5f*d_Z[rowIdx + n_subjects]*(F/(*d_Sigma_P/float(n_subjects)) - 1.f);
}else{
shared_score[tIdx] = 0.f;
}
__syncthreads();
for(unsigned int stride = BLOCK_SIZE_1/2 ; stride > 0 ; stride >>=1){
if(tIdx < stride && (rowIdx+stride < n_subjects)){
shared_score[tIdx] += shared_score[tIdx + stride];
}
__syncthreads();
}
if((tIdx == 0) && (rowIdx < n_subjects)){
atomicAdd(d_score, shared_score[0]);
}
}
static __global__ void calculate_h2(float * d_h2, float * d_indicator, float * d_Sigma_P, float * d_score, bool * d_boolean_score, float * d_A, float * d_B, float * d_C, float * d_D, float * d_E, size_t voxel){
d_indicator[voxel] = *d_score;
float A = *d_A;
float B = *d_B;
float C = *d_C;
float D = *d_D;
float E = *d_E;
float sigma_E = (C*D - B*E)/(A*C - B*B);
float sigma_A = (A*E - B*D)/(A*C - B*B);
if(sigma_E < 0.f)
sigma_E = 0.f;
if(sigma_A < 0.f)
sigma_A = 0.f;
float h2r;
if((sigma_E + sigma_A) == 0.f){
d_h2[voxel] = 0.f;
d_boolean_score[voxel] = false;
}else{
if(*d_score < 0){
h2r = 0.f;
}
h2r = sigma_A/(sigma_E + sigma_A);
if(h2r <= 0.f){
h2r = 0.f;
d_boolean_score[voxel] = false;
}else{
d_boolean_score[voxel] = true;
}
d_h2[voxel] = h2r;
*d_Sigma_P = sigma_E + sigma_A;
}
}
/*
static __global__ void calculate_weights_SE(float * d_weights, bool * d_boolean_score, float * d_Z, float * d_h2, float * d_Sigma_P, size_t n_subjects, size_t voxel){
if(d_boolean_score[voxel] == false){
return;
}
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
if(rowIdx < n_subjects){
float Sigma_A = d_h2[voxel]*(*d_Sigma_P);
float Sigma_E = *d_Sigma_P - Sigma_A;
if(Sigma_A < 0.f)
Sigma_A = 0.f;
if(Sigma_E < 0.f)
Sigma_E = 0.f;
float weight = Sigma_E + d_Z[rowIdx + n_subjects]*Sigma_A;
d_weights[rowIdx] = 1.f/(weight*weight);
}
}
static __global__ void calculate_sum_A_B_C(float * d_A, float * d_B, float * d_C, float * d_weights,
float * d_Z, bool * d_boolean_score, size_t n_subjects, size_t voxel){
if(d_boolean_score[voxel] == false)
return;
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
size_t tIdx = threadIdx.x;
__shared__ float shared_A[BLOCK_SIZE_1];
__shared__ float shared_B[BLOCK_SIZE_1];
__shared__ float shared_C[BLOCK_SIZE_1];
if(rowIdx < n_subjects){
float weight = d_weights[rowIdx];
float lambda = d_Z[rowIdx + n_subjects];
shared_A[tIdx] = weight;
shared_B[tIdx] = weight*F;
shared_C[tIdx] = F;
}else{
shared_A[tIdx] = 0.f;
shared_B[tIdx] = 0.f;
shared_C[tIdx] = 0.f;
}
__syncthreads();
for(unsigned int stride = BLOCK_SIZE_1/2 ; stride > 0 ; stride >>=1){
if(tIdx < stride && (rowIdx+stride < n_subjects)){
shared_A[tIdx] += shared_A[tIdx + stride];
shared_B[tIdx] += shared_B[tIdx + stride];
shared_C[tIdx] += shared_C[tIdx + stride];
}
__syncthreads();
}
if((tIdx == 0) && (rowIdx < n_subjects)){
atomicAdd(d_A, shared_A[0]);
atomicAdd(d_B, shared_B[0]);
atomicAdd(d_C, shared_C[0]);
}
}
*/
int compute_h2(float * d_F, float * d_h2, float * d_indicator, bool * d_boolean_score,
compute_h2_variables vars, aux_variables aux_vars,
size_t n_subjects, size_t n_voxels, hipStream_t stream) {
const char * functionName = "compute_h2.cu";
dim3 blockSize(BLOCK_SIZE_1, 1, 1);
dim3 gridSize(ceil(float(n_subjects) / float(BLOCK_SIZE_1)), 1, 1);
blockSize.x = BLOCK_SIZE_1;
gridSize.x = ceil(float(n_subjects)/float(BLOCK_SIZE_1));
blockSize.y = 1;
gridSize.y = 1;
for(size_t voxel = 0; voxel < n_voxels ; voxel++){
printError(hipMemsetAsync(vars.d_theta, 0, sizeof(float)*2, stream),
functionName, "hipMemset-d_theta", __LINE__);
hipLaunchKernelGGL(( calculate_d_theta), dim3(gridSize), dim3(blockSize), 0, stream, vars.d_theta, d_F, aux_vars.d_Z, aux_vars.d_ZTZI, voxel, n_subjects);
hipLaunchKernelGGL(( calculate_weights), dim3(gridSize), dim3(blockSize), 0, stream, vars.d_weights, aux_vars.d_Z, vars.d_theta, n_subjects);
printError(hipMemsetAsync(vars.d_A, 0, sizeof(float), stream),
functionName, "hipMemset-d_A", __LINE__);
printError(hipMemsetAsync(vars.d_D, 0, sizeof(float), stream),
functionName, "hipMemset-d_D", __LINE__);
printError(hipMemsetAsync(vars.d_Sigma_P, 0, sizeof(float), stream),
functionName, "hipMemset-d_Sigma_P", __LINE__);
hipLaunchKernelGGL(( calculate_sum_A_D_Sigma_P), dim3(gridSize), dim3(blockSize), 0, stream, vars.d_A, vars.d_D, vars.d_Sigma_P,
vars.d_weights, d_F, voxel, n_subjects);
printError(hipMemsetAsync(vars.d_B, 0, sizeof(float), stream),
functionName, "hipMemset-d_B", __LINE__);
printError(hipMemsetAsync(vars.d_C, 0, sizeof(float), stream),
functionName, "hipMemset-d_C", __LINE__);
printError(hipMemsetAsync(vars.d_E, 0, sizeof(float),stream),
functionName, "hipMemset-d_E", __LINE__);
hipLaunchKernelGGL(( calculate_sum_B_C_E), dim3(gridSize), dim3(blockSize), 0 ,stream, vars.d_B, vars.d_C, vars.d_E, vars.d_weights, d_F, aux_vars.d_Z, voxel, n_subjects);
printError(hipMemsetAsync(vars.d_score, 0, sizeof(float), stream),
functionName, "hipMemset-d_score", __LINE__);
hipLaunchKernelGGL(( calculate_score), dim3(gridSize), dim3(blockSize), 0, stream, vars.d_score, aux_vars.d_Z, d_F, vars.d_Sigma_P, voxel, n_subjects);
hipLaunchKernelGGL(( calculate_h2), dim3(1), dim3(1), 0, stream, d_h2, d_indicator, vars.d_Sigma_P, vars.d_score, d_boolean_score, vars.d_A, vars.d_B, vars.d_C, vars.d_D, vars.d_E, voxel);
}
return 1;
}
| 0714273583b68a4a9ffcdf846d1e3ce4c9c3e0c4.cu | #include "cudafPHI.cuh"
#include <stdio.h>
#include <cuda_runtime.h>
static inline void printError(cudaError_t error, const char * functionMess, const char * opMess, int Line) {
if (error == 0){
return;
}else{
//fclose(stderr);
//freopen("cudafPHI.err", "w", stderr);
printf("%s %s Line - %i : %s \n",functionMess , opMess, Line ,cudaGetErrorString(error) );
//std::cerr << functionMess << " " << opMess << " Line- " << Line << ": "
// << cudaGetErrorString(error) << "\n";
//fclose(stderr);
cudaDeviceReset();
exit(0);
}
}
static __global__ void calculate_ZTZ(float * d_Z, float * ZTZI, size_t n_subjects) {
size_t rowIdx = threadIdx.x + blockDim.x * blockIdx.x;
size_t tIdx = threadIdx.x;
__shared__ float shared_ZTZI_0[BLOCK_SIZE_1];
__shared__ float shared_ZTZI_2[BLOCK_SIZE_1];
__shared__ float shared_ZTZI_3[BLOCK_SIZE_1];
if (rowIdx < n_subjects) {
shared_ZTZI_0[tIdx] = d_Z[rowIdx] * d_Z[rowIdx];
shared_ZTZI_2[tIdx] = d_Z[rowIdx + n_subjects] * d_Z[rowIdx];
shared_ZTZI_3[tIdx] = d_Z[rowIdx + n_subjects] * d_Z[rowIdx + n_subjects];
}else{
shared_ZTZI_0[tIdx] = 0.f;
shared_ZTZI_2[tIdx] = 0.f;
shared_ZTZI_3[tIdx] = 0.f;
}
__syncthreads();
for(unsigned int stride = BLOCK_SIZE_1/2 ; stride > 0 ; stride >>=1){
if(threadIdx.x < stride && (rowIdx+stride < n_subjects)){
shared_ZTZI_0[tIdx] += shared_ZTZI_0[tIdx + stride];
shared_ZTZI_2[tIdx] += shared_ZTZI_2[tIdx + stride];
shared_ZTZI_3[tIdx] += shared_ZTZI_3[tIdx + stride];
}
__syncthreads();
}
if((tIdx == 0) && (rowIdx < n_subjects)){
atomicAdd(&ZTZI[0], shared_ZTZI_0[0]);
atomicAdd(&ZTZI[1], shared_ZTZI_2[0]);
atomicAdd(&ZTZI[2], shared_ZTZI_2[0]);
atomicAdd(&ZTZI[3], shared_ZTZI_3[0]);
}
}
static __global__ void Inv4by4(float * ZTZI) {
float a = ZTZI[0];
float b = ZTZI[1];
float c = ZTZI[2];
float d = ZTZI[3];
float det = a * d - b * c;
ZTZI[0] = d / det;
ZTZI[1] = -c / det;
ZTZI[2] = -b / det;
ZTZI[3] = a / det;
}
static __global__ void calculate_d_theta(float * d_theta, float * d_F, float * d_Z, float * d_ZTZI, size_t voxel, size_t n_subjects){
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
size_t tIdx = threadIdx.x;
__shared__ float shared_theta_0[BLOCK_SIZE_1];
__shared__ float shared_theta_1[BLOCK_SIZE_1];
if (rowIdx < n_subjects) {
float d_Z_0 = d_Z[rowIdx];
float d_Z_1 = d_Z[rowIdx + n_subjects];
float F = d_F[rowIdx + voxel*n_subjects];
F = F*F;
shared_theta_0[tIdx] = F*(d_Z_0*d_ZTZI[0] + d_ZTZI[2]*d_Z_1);
shared_theta_1[tIdx] = F*(d_Z_0*d_ZTZI[1] + d_ZTZI[3]*d_Z_1);
// printf("%f \n",shared_theta_0[threadIdx.x]);
}else{
shared_theta_0[tIdx] = 0.f;
shared_theta_1[tIdx] = 0.f;
}
__syncthreads();
for(unsigned int stride = BLOCK_SIZE_1/2 ; stride > 0 ; stride >>=1){
if(tIdx < stride && (rowIdx+stride < n_subjects)){
shared_theta_0[tIdx] += shared_theta_0[tIdx + stride];
shared_theta_1[tIdx] += shared_theta_1[tIdx + stride];
}
__syncthreads();
}
if((tIdx == 0) && (rowIdx < n_subjects)){
atomicAdd(&d_theta[0], shared_theta_0[0]);
atomicAdd(&d_theta[1], shared_theta_1[0]);
}
}
static __global__ void calculate_weights(float * d_weights, float * d_Z, float * d_theta, size_t n_subjects){
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
if(rowIdx < n_subjects){
float theta_0 = d_theta[0];
float theta_1 = d_theta[1];
if(theta_0 < 0.f)
theta_0 = 0.f;
if(theta_1 < 0.f)
theta_1 = 0.f;
float weight = theta_0 + d_Z[rowIdx + n_subjects]*theta_1;
d_weights[rowIdx] = 1.f/(weight*weight);
}
}
static __global__ void calculate_sum_A_D_Sigma_P(float * d_A, float * d_D, float * d_Sigma_P, float * d_weights,
float * d_F, size_t voxel, size_t n_subjects){
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
size_t tIdx = threadIdx.x;
__shared__ float shared_A[BLOCK_SIZE_1];
__shared__ float shared_D[BLOCK_SIZE_1];
__shared__ float shared_Sigma_P[BLOCK_SIZE_1];
if(rowIdx < n_subjects){
float weight = d_weights[rowIdx];
float F = d_F[voxel*n_subjects + rowIdx];
F = F*F;
shared_A[tIdx] = weight;
shared_D[tIdx] = weight*F;
shared_Sigma_P[tIdx] = F;
}else{
shared_A[tIdx] = 0.f;
shared_D[tIdx] = 0.f;
shared_Sigma_P[tIdx] = 0.f;
}
__syncthreads();
for(unsigned int stride = BLOCK_SIZE_1/2 ; stride > 0 ; stride >>=1){
if(tIdx < stride && (rowIdx+stride < n_subjects)){
shared_A[tIdx] += shared_A[tIdx + stride];
shared_D[tIdx] += shared_D[tIdx + stride];
shared_Sigma_P[tIdx] += shared_Sigma_P[tIdx + stride];
}
__syncthreads();
}
if((tIdx == 0) && (rowIdx < n_subjects)){
atomicAdd(d_A, shared_A[0]);
atomicAdd(d_D, shared_D[0]);
atomicAdd(d_Sigma_P, shared_Sigma_P[0]);
}
}
static __global__ void calculate_sum_B_C_E(float * d_B, float * d_C, float * d_E, float * d_weights, float * d_F,
float * d_Z, size_t voxel, size_t n_subjects){
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
size_t tIdx = threadIdx.x;
__shared__ float shared_B[BLOCK_SIZE_1];
__shared__ float shared_C[BLOCK_SIZE_1];
__shared__ float shared_E[BLOCK_SIZE_1];
if(rowIdx < n_subjects){
float lambda = d_Z[rowIdx + n_subjects];
float F = d_F[voxel*n_subjects + rowIdx];
F = F*F;
float weight = d_weights[rowIdx];
shared_B[tIdx] = weight*lambda;
shared_C[tIdx] = weight*lambda*lambda;
shared_E[tIdx] = F*weight*lambda;
}else{
shared_B[tIdx] = 0.f;
shared_C[tIdx] = 0.f;
shared_E[tIdx] = 0.f;
}
__syncthreads();
for(unsigned int stride = BLOCK_SIZE_1/2 ; stride > 0 ; stride >>=1){
if(tIdx < stride && (rowIdx+stride < n_subjects)){
shared_B[tIdx] += shared_B[tIdx + stride];
shared_C[tIdx] += shared_C[tIdx + stride];
shared_E[tIdx] += shared_E[tIdx + stride];
}
__syncthreads();
}
if((threadIdx.x == 0) && (rowIdx < n_subjects)){
atomicAdd(d_B, shared_B[0]);
atomicAdd(d_C, shared_C[0]);
atomicAdd(d_E, shared_E[0]);
}
}
static __global__ void calculate_score(float * d_score, float * d_Z, float * d_F, float * d_Sigma_P, size_t voxel, size_t n_subjects){
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
size_t tIdx = threadIdx.x;
__shared__ float shared_score[BLOCK_SIZE_1];
if(rowIdx < n_subjects){
float F = d_F[voxel*n_subjects + rowIdx];
F = F*F;
shared_score[tIdx] = 0.5f*d_Z[rowIdx + n_subjects]*(F/(*d_Sigma_P/float(n_subjects)) - 1.f);
}else{
shared_score[tIdx] = 0.f;
}
__syncthreads();
for(unsigned int stride = BLOCK_SIZE_1/2 ; stride > 0 ; stride >>=1){
if(tIdx < stride && (rowIdx+stride < n_subjects)){
shared_score[tIdx] += shared_score[tIdx + stride];
}
__syncthreads();
}
if((tIdx == 0) && (rowIdx < n_subjects)){
atomicAdd(d_score, shared_score[0]);
}
}
static __global__ void calculate_h2(float * d_h2, float * d_indicator, float * d_Sigma_P, float * d_score, bool * d_boolean_score, float * d_A, float * d_B, float * d_C, float * d_D, float * d_E, size_t voxel){
d_indicator[voxel] = *d_score;
float A = *d_A;
float B = *d_B;
float C = *d_C;
float D = *d_D;
float E = *d_E;
float sigma_E = (C*D - B*E)/(A*C - B*B);
float sigma_A = (A*E - B*D)/(A*C - B*B);
if(sigma_E < 0.f)
sigma_E = 0.f;
if(sigma_A < 0.f)
sigma_A = 0.f;
float h2r;
if((sigma_E + sigma_A) == 0.f){
d_h2[voxel] = 0.f;
d_boolean_score[voxel] = false;
}else{
if(*d_score < 0){
h2r = 0.f;
}
h2r = sigma_A/(sigma_E + sigma_A);
if(h2r <= 0.f){
h2r = 0.f;
d_boolean_score[voxel] = false;
}else{
d_boolean_score[voxel] = true;
}
d_h2[voxel] = h2r;
*d_Sigma_P = sigma_E + sigma_A;
}
}
/*
static __global__ void calculate_weights_SE(float * d_weights, bool * d_boolean_score, float * d_Z, float * d_h2, float * d_Sigma_P, size_t n_subjects, size_t voxel){
if(d_boolean_score[voxel] == false){
return;
}
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
if(rowIdx < n_subjects){
float Sigma_A = d_h2[voxel]*(*d_Sigma_P);
float Sigma_E = *d_Sigma_P - Sigma_A;
if(Sigma_A < 0.f)
Sigma_A = 0.f;
if(Sigma_E < 0.f)
Sigma_E = 0.f;
float weight = Sigma_E + d_Z[rowIdx + n_subjects]*Sigma_A;
d_weights[rowIdx] = 1.f/(weight*weight);
}
}
static __global__ void calculate_sum_A_B_C(float * d_A, float * d_B, float * d_C, float * d_weights,
float * d_Z, bool * d_boolean_score, size_t n_subjects, size_t voxel){
if(d_boolean_score[voxel] == false)
return;
size_t rowIdx = threadIdx.x + blockIdx.x*blockDim.x;
size_t tIdx = threadIdx.x;
__shared__ float shared_A[BLOCK_SIZE_1];
__shared__ float shared_B[BLOCK_SIZE_1];
__shared__ float shared_C[BLOCK_SIZE_1];
if(rowIdx < n_subjects){
float weight = d_weights[rowIdx];
float lambda = d_Z[rowIdx + n_subjects];
shared_A[tIdx] = weight;
shared_B[tIdx] = weight*F;
shared_C[tIdx] = F;
}else{
shared_A[tIdx] = 0.f;
shared_B[tIdx] = 0.f;
shared_C[tIdx] = 0.f;
}
__syncthreads();
for(unsigned int stride = BLOCK_SIZE_1/2 ; stride > 0 ; stride >>=1){
if(tIdx < stride && (rowIdx+stride < n_subjects)){
shared_A[tIdx] += shared_A[tIdx + stride];
shared_B[tIdx] += shared_B[tIdx + stride];
shared_C[tIdx] += shared_C[tIdx + stride];
}
__syncthreads();
}
if((tIdx == 0) && (rowIdx < n_subjects)){
atomicAdd(d_A, shared_A[0]);
atomicAdd(d_B, shared_B[0]);
atomicAdd(d_C, shared_C[0]);
}
}
*/
int compute_h2(float * d_F, float * d_h2, float * d_indicator, bool * d_boolean_score,
compute_h2_variables vars, aux_variables aux_vars,
size_t n_subjects, size_t n_voxels, cudaStream_t stream) {
const char * functionName = "compute_h2.cu";
dim3 blockSize(BLOCK_SIZE_1, 1, 1);
dim3 gridSize(ceil(float(n_subjects) / float(BLOCK_SIZE_1)), 1, 1);
blockSize.x = BLOCK_SIZE_1;
gridSize.x = ceil(float(n_subjects)/float(BLOCK_SIZE_1));
blockSize.y = 1;
gridSize.y = 1;
for(size_t voxel = 0; voxel < n_voxels ; voxel++){
printError(cudaMemsetAsync(vars.d_theta, 0, sizeof(float)*2, stream),
functionName, "cudaMemset-d_theta", __LINE__);
calculate_d_theta<<<gridSize, blockSize, 0, stream>>>(vars.d_theta, d_F, aux_vars.d_Z, aux_vars.d_ZTZI, voxel, n_subjects);
calculate_weights<<<gridSize, blockSize, 0, stream>>>(vars.d_weights, aux_vars.d_Z, vars.d_theta, n_subjects);
printError(cudaMemsetAsync(vars.d_A, 0, sizeof(float), stream),
functionName, "cudaMemset-d_A", __LINE__);
printError(cudaMemsetAsync(vars.d_D, 0, sizeof(float), stream),
functionName, "cudaMemset-d_D", __LINE__);
printError(cudaMemsetAsync(vars.d_Sigma_P, 0, sizeof(float), stream),
functionName, "cudaMemset-d_Sigma_P", __LINE__);
calculate_sum_A_D_Sigma_P<<<gridSize, blockSize, 0, stream>>>(vars.d_A, vars.d_D, vars.d_Sigma_P,
vars.d_weights, d_F, voxel, n_subjects);
printError(cudaMemsetAsync(vars.d_B, 0, sizeof(float), stream),
functionName, "cudaMemset-d_B", __LINE__);
printError(cudaMemsetAsync(vars.d_C, 0, sizeof(float), stream),
functionName, "cudaMemset-d_C", __LINE__);
printError(cudaMemsetAsync(vars.d_E, 0, sizeof(float),stream),
functionName, "cudaMemset-d_E", __LINE__);
calculate_sum_B_C_E<<<gridSize, blockSize, 0 ,stream>>>(vars.d_B, vars.d_C, vars.d_E, vars.d_weights, d_F, aux_vars.d_Z, voxel, n_subjects);
printError(cudaMemsetAsync(vars.d_score, 0, sizeof(float), stream),
functionName, "cudaMemset-d_score", __LINE__);
calculate_score<<<gridSize, blockSize, 0, stream>>>(vars.d_score, aux_vars.d_Z, d_F, vars.d_Sigma_P, voxel, n_subjects);
calculate_h2<<<1, 1, 0, stream>>>(d_h2, d_indicator, vars.d_Sigma_P, vars.d_score, d_boolean_score, vars.d_A, vars.d_B, vars.d_C, vars.d_D, vars.d_E, voxel);
}
return 1;
}
|
9a93ff266e68b3d8c5ede55ff5c85190a660ce47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "neural_net.h"
#include <time.h>
#include <cstdio>
#include <string>
template <typename T>
__global__ void softmaxLossBackProp(int *y, T *SO, T *dSO, int batch_size, int output_size, float eps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size)
return;
int cur_class = static_cast<int>(y[i]);
dSO[i * output_size + cur_class] = -1 / (SO[i * output_size + cur_class] * batch_size + eps);
}
template <typename T>
__global__ void computeSoftmaxLoss(T *O, int *y, float *loss, int batch_size, int num_classes, float eps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size)
return;
loss[i] = -logf(O[i * num_classes + y[i]] + eps);
}
template <typename T>
__global__ void inferClass(T *O, int *pred_y, int batch_size, int num_classes) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size)
return;
T max = O[i * num_classes];
int index = 0;
for (int j = 1; j < num_classes; j++) {
if (O[i * num_classes + j] > max) {
max = O[i * num_classes + j];
index = j;
}
}
pred_y[i] = index;
}
float NeuralNet::computeLoss() {
if (layer_type[num_layers - 1] == SOFTMAX) {
if (data_type == CUDNN_DATA_FLOAT)
hipLaunchKernelGGL(( computeSoftmaxLoss<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (float *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps);
else if (data_type == CUDNN_DATA_DOUBLE)
hipLaunchKernelGGL(( computeSoftmaxLoss<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (double *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps);
}
checkCudaErrors(hipMemcpy(h_loss, loss, batch_size * sizeof(float), hipMemcpyDeviceToHost));
float total_loss = 0.0;
for (int i = 0; i < batch_size; i++)
total_loss += h_loss[i];
return total_loss / batch_size;
}
void NeuralNet::compareOutputCorrect(int *correct_count, int *y) {
*correct_count = 0;
if (data_type == CUDNN_DATA_FLOAT) {
float *typecast_O = (float *)layer_input[num_layers - 1];
hipLaunchKernelGGL(( inferClass<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, typecast_O, pred_y, batch_size, num_classes);
checkCudaErrors(hipMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < batch_size; i++) {
if (h_pred_y[i] == y[i])
*correct_count = *correct_count + 1;
}
}
else if (data_type == CUDNN_DATA_DOUBLE) {
double *typecast_O = (double *)layer_input[num_layers - 1];
hipLaunchKernelGGL(( inferClass<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, typecast_O, pred_y, batch_size, num_classes);
checkCudaErrors(hipMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < batch_size; i++) {
if (h_pred_y[i] == y[i])
*correct_count = *correct_count + 1;
}
}
}
NeuralNet::NeuralNet(std::vector<LayerSpecifier> &layers, DataType data_type, int batch_size, TensorFormat tensor_format,
long long dropout_seed, float softmax_eps, float init_std_dev, vDNNType vdnn_type, vDNNConvAlgo vdnn_conv_algo,
UpdateRule update_rule) {
// ---------------------- vDNN start ----------------------
checkCudaErrors(hipStreamCreate(&stream_compute));
checkCudaErrors(hipStreamCreate(&stream_memory));
this->vdnn_type = vdnn_type;
this->vdnn_conv_algo = vdnn_conv_algo;
// ---------------------- vDNN end ------------------------
// create handle
checkCUDNN(cudnnCreate(&cudnn_handle));
checkCUDNN(cudnnSetStream(cudnn_handle, stream_compute));
checkCUBLAS(hipblasCreate(&cublas_handle));
checkCUBLAS(hipblasSetStream(cublas_handle, stream_compute));
checkCURAND(hiprandCreateGenerator(&curand_gen, HIPRAND_RNG_PSEUDO_DEFAULT));
checkCURAND(hiprandSetStream(curand_gen, stream_compute));
checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes));
init_free_bytes = free_bytes;
std::cout << "Free bytes at start: " << free_bytes << std::endl;
pre_alloc_conv_derivative = false;
pre_alloc_fc_derivative = false;
pre_alloc_batch_norm_derivative = true;
if (vdnn_type == vDNN_NONE) {
pre_alloc_conv_derivative = true;
pre_alloc_fc_derivative = true;
pre_alloc_batch_norm_derivative = true;
}
if (data_type == DATA_FLOAT) {
this->data_type = CUDNN_DATA_FLOAT;
data_type_size = sizeof(float);
}
else if (data_type == DATA_DOUBLE) {
this->data_type = CUDNN_DATA_DOUBLE;
data_type_size = sizeof(double);
}
if (tensor_format == TENSOR_NCHW)
this->tensor_format = CUDNN_TENSOR_NCHW;
else if (tensor_format == TENSOR_NHWC)
this->tensor_format = CUDNN_TENSOR_NHWC;
this->batch_size = batch_size;
this->softmax_eps = softmax_eps;
this->init_std_dev = init_std_dev;
num_layers = layers.size();
// allocation of space for input to each layer
layer_input = (void **)malloc((num_layers + 1) * sizeof(void *));
layer_input_size = (int *)malloc((num_layers + 1) * sizeof(int));
dlayer_input = (void **)malloc((num_layers + 1) * sizeof(void *));
params = (void **)malloc(num_layers * sizeof(void *));
LayerDimension prev_output_size;
LayerDimension current_output_size;
for (int i = 0; i < num_layers; i++) {
layer_type.push_back(layers[i].type);
if (layers[i].type == CONV) {
ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params;
params[i] = malloc(sizeof(ConvLayerParams));
((ConvLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size, this->tensor_format,
data_type_size, current_output_size, update_rule);
}
else if (layers[i].type == FULLY_CONNECTED) {
FCDescriptor *user_params = (FCDescriptor *)layers[i].params;
params[i] = malloc(sizeof(FCLayerParams));
((FCLayerParams *)params[i])->initializeValues(user_params, batch_size, this->tensor_format, this->data_type,
current_output_size, update_rule);
}
else if (layers[i].type == DROPOUT) {
DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params;
params[i] = malloc(sizeof(DropoutLayerParams));
((DropoutLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size,
this->tensor_format, current_output_size);
}
else if (layers[i].type == BATCHNORM) {
BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params;
params[i] = malloc(sizeof(BatchNormLayerParams));
((BatchNormLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size,
current_output_size, update_rule);
}
else if (layers[i].type == POOLING) {
PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params;
params[i] = malloc(sizeof(BatchNormLayerParams));
((PoolingLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
}
else if (layers[i].type == ACTV) {
ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params;
params[i] = malloc(sizeof(ActivationLayerParams));
((ActivationLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
}
else if (layers[i].type == SOFTMAX) {
SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params;
params[i] = malloc(sizeof(SoftmaxLayerParams));
((SoftmaxLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
// std::cout << current_output_size.N << ' ' << current_output_size.C << current_output_size.H << current_output_size.W << std::endl;
}
if (i == 0) {
prev_output_size = current_output_size;
}
// incomplete - have to check flatten and check exact dimension
// else if (current_output_size.getTotalSize() != prev_output_size.getTotalSize()) {
// std::cout << "Layer " << i << " output and next layer's input size mismatch\n";
// exit(0);
// }
}
// ---------------------- vDNN start ----------------------
// allocate space in host memory for layers to be transferred
h_layer_input = (void **)malloc(num_layers * sizeof(void *));
to_offload = (bool *)malloc(num_layers * sizeof(bool));
prefetched = (bool *)malloc(num_layers * sizeof(bool));
// ---------------------- vDNN end ------------------------
checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes));
std::cout << "Free bytes just before allocate space: " << free_bytes << std::endl;
// allocate space for parameters
// Exception BatchNorm - looks like it will take lots of space if only FC layers - space taken = size of one input
for (int i = 0; i < num_layers; i++) {
size_t input_size;
if (layers[i].type == CONV) {
ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params;
((ConvLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev,
free_bytes, pre_alloc_conv_derivative);
input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = user_params->input_h;
input_w = user_params->input_w;
}
}
else if (layers[i].type == FULLY_CONNECTED) {
FCDescriptor *user_params = (FCDescriptor *)layers[i].params;
((FCLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev,
free_bytes, pre_alloc_fc_derivative);
input_size = batch_size * user_params->input_channels;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = 1;
input_w = 1;
}
}
else if (layers[i].type == DROPOUT) {
DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params;
((DropoutLayerParams *)params[i])->allocateSpace(free_bytes, cudnn_handle, user_params, dropout_seed);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
}
else if (layers[i].type == BATCHNORM) {
BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params;
((BatchNormLayerParams *)params[i])->allocateSpace(this->data_type, data_type_size,
free_bytes, pre_alloc_batch_norm_derivative);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
}
else if (layers[i].type == POOLING) {
PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params;
((PoolingLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = user_params->input_h;
input_w = user_params->input_w;
}
}
else if (layers[i].type == ACTV) {
ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params;
((ActivationLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
}
else if (layers[i].type == SOFTMAX) {
SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params;
((SoftmaxLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
// assuming this is last layer, allocate for next layer as well
// checkCudaErrors(hipMalloc(&layer_input[i + 1], input_size * data_type_size));
// checkCudaErrors(hipMalloc(&dlayer_input[i + 1], input_size * data_type_size));
layer_input_size[i + 1] = input_size;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
if (i == num_layers - 1) {
num_classes = user_params->channels;
}
}
// do not allocate memory initially
// checkCudaErrors(hipMalloc(&layer_input[i], input_size * data_type_size));
// checkCudaErrors(hipMalloc(&dlayer_input[i], input_size * data_type_size));
// ---------------------- vDNN start ----------------------
layer_input_size[i] = input_size;
// ---------------------- vDNN end ------------------------
}
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes));
std::cout << "Free bytes just after allocate space: " << free_bytes << std::endl;
// very small - could be allocated initially itself
checkCudaErrors(hipMalloc((void **)&y, batch_size * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&pred_y, batch_size * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&loss, batch_size * sizeof(float)));
checkCudaErrors(hipMalloc(&one_vec, batch_size * data_type_size));
if (this->data_type == CUDNN_DATA_FLOAT)
hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (float *)one_vec, batch_size, 1);
else
hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (double *)one_vec, batch_size, 1);
checkCudaErrors(hipHostMalloc((void **)&h_loss, batch_size * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&h_pred_y, batch_size * sizeof(int)));
// do not allocate workspace initially
// allocate space for workspace and also keep track of algo
// size_t cur_workspace_size;
// workspace_size = 0;
// for (int i = 0; i < num_layers; i++) {
// if (layers[i].type == CONV) {
// ((ConvLayerParams *)params[i])->getWorkspaceSize(cur_workspace_size, free_bytes);
// if (cur_workspace_size > workspace_size)
// workspace_size = cur_workspace_size;
// }
// }
// checkCudaErrors(hipMalloc(&workspace, workspace_size));
// free_bytes = free_bytes - workspace_size;
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes));
// leave 600 MB and use the rest
std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN start ----------------------
size_t exp_max_consume, max_consume;
vDNNOptimize(exp_max_consume, max_consume);
std::cout << "actual_max_consume: " << max_consume << std::endl;
std::cout << "exp_max_consume: " << exp_max_consume << std::endl;
std::cout << "diff_max_consume(MB): " << (max_consume - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl;
std::cout << "exp_free_bytes(MB): " << (free_bytes - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl;
std::cout << "exp_total_consume(MB): " << (init_free_bytes - (free_bytes - exp_max_consume)) / (1.0 * 1024 * 1024) << std::endl;
std::cout << "actual_total_consume(MB): " << (init_free_bytes - (free_bytes - max_consume)) / (1.0 * 1024 * 1024) << std::endl;
// ---------------------- vDNN end ------------------------
// remove later
exit(0);
// ---------------------- vDNN start ----------------------
free_bytes = max_consume;
cnmemDevice_t cnmem_device;
size_t cnmem_stream_memory_size = free_bytes;
cnmem_device.device = 0;
cnmem_device.size = cnmem_stream_memory_size;
cnmem_device.numStreams = 0;
cnmem_device.streams = NULL;
cnmem_device.streamSizes = NULL;
// do not allow call to hipMalloc
checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW));
// ---------------------- vDNN end ------------------------
// ---------------------- vDNN start ----------------------
for (int i = 0; i < num_layers; i++) {
std::cerr << "to_offload[i] " << to_offload[i] << std::endl;
}
for (int i = 0; i < num_layers; i++) {
// allocate pinned memory in host
if (to_offload[i])
checkCudaErrors(hipHostMalloc(&h_layer_input[i], layer_input_size[i] * data_type_size));
}
// ---------------------- vDNN end ------------------------
checkCudaErrors(hipDeviceSynchronize());
size_t temp_free_bytes;
checkCudaErrors(hipMemGetInfo(&temp_free_bytes, &total_bytes));
std::cout << "Free bytes just before end of NeuralNet: " << temp_free_bytes << std::endl;
// {
// int n;
// std::cout << "waiting..\n";
// std::cin >> n;
// }
// data of time
checkCudaErrors(hipEventCreate(&start_compute));
checkCudaErrors(hipEventCreate(&stop_compute));
checkCudaErrors(hipEventCreate(&start_transfer));
checkCudaErrors(hipEventCreate(&stop_transfer));
}
bool NeuralNet::simulateNeuralNetworkMemory(vDNNConvAlgoPref algo_pref, bool hard, size_t &exp_max_consume, size_t &max_consume) {
CnmemSpace space_tracker(free_bytes);
max_consume = 0;
// forward pass
// allocate space for 1st input
std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating input(MB): " << space_tracker.getConsumed() << std::endl;
std::cerr << "Forward pass" << std::endl;
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == SOFTMAX)
break;
std::cerr << "Processing layer " << i << std::endl;
std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size);
std::cerr << "Used space after output allocation(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
size_t cur_workspace_size;
checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::FWD, algo_pref, hard, cur_workspace_size));
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
space_tracker.updateMaxConsume(max_consume);
if (!space_tracker.isAvailable())
return false;
std::cerr << "Used space after workspace allocation(MB): " << space_tracker.getConsumed() << std::endl;
// current layer computation over, deallocate workspace
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
std::cerr << "Used space after workspace deallocation(MB): " << space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable())
return false;
// deallocate layer input
if (to_offload[i]) {
std::cerr << "deallocating input to " << i << std::endl;
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl;
}
}
std::cerr << "Backward pass" << std::endl;
if (batch_size * num_classes * data_type_size != layer_input_size[num_layers] * data_type_size) {
std::cout << "Panic!! Using wrong size\n";
exit(0);
}
// backward pass
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size);
std::cerr << "Used space after allocating final derivative(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
// std::cerr << "max_consume: " << max_consume << std::endl;
for (int i = num_layers - 1; i >= 0; i--) {
// allocate space for previous layer derivative
std::cerr << "Processing layer " << i << std::endl;
std::cerr << "Used space initial(MB): " << space_tracker.getConsumed() << std::endl;
if (i > 0) {
if (layer_type[i] == SOFTMAX)
continue;
else {
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size);
std::cerr << "Used space after allocating prev. derivative(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
}
// std::cerr << "max_consume: " << max_consume << std::endl;
}
int layer_to_prefetch = findPrefetchLayer(i);
// if layer to be prefetched, allocate space for that layer
if (layer_to_prefetch != -1) {
std::cerr << "Prefetch layer " << layer_to_prefetch << std::endl;
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size);
std::cerr << "Used space after allocating prefetch(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
}
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
size_t cur_filter_workspace_size;
checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_FILTER, algo_pref, hard, cur_filter_workspace_size));
size_t cur_data_workspace_size = 0;
if (i > 0)
checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_DATA, algo_pref, hard, cur_data_workspace_size));
size_t cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size :cur_data_workspace_size;
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
std::cerr << "Used space after allocating workspace(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
if (!pre_alloc_conv_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
// std::cerr << "max_consume: " << max_consume << std::endl;
if (!space_tracker.isAvailable())
return false;
// current layer computation over, deallocate workspace
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
std::cerr << "Used space after deallocating workspace(MB): " << space_tracker.getConsumed() << std::endl;
if (!pre_alloc_conv_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable())
return false;
if (!pre_alloc_fc_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable())
return false;
if (!pre_alloc_batch_norm_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
}
if (!space_tracker.isAvailable())
return false;
// deallocate layer output and derivative
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
std::cerr << "Used space after deallocating output, derivative(MB): " << space_tracker.getConsumed() << std::endl;
// if 1st layer, deallocate input layer also
if (i == 0) {
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl;
}
}
if (space_tracker.getConsumed() > 0)
std::cerr << "Panic!! more free bytes\n";
if (space_tracker.getConsumed() != 0)
std::cerr << "Panic!! bytes not freed properly\n";
// return true;
exp_max_consume = max_consume;
// check with cnmem once
bool ret_val = simulateCNMEMMemory(max_consume);
return ret_val;
}
bool NeuralNet::simulateCNMEMMemory(size_t &max_consume) {
size_t init_max_consume = max_consume;
cnmemDevice_t cnmem_device;
size_t t;
checkCudaErrors(hipMemGetInfo(&free_bytes, &t));
std::cout << "free_bytes: " << free_bytes << std::endl;
free_bytes -= 100 * 1024 * 1024;
cnmem_device.device = 0;
cnmem_device.numStreams = 0;
cnmem_device.streams = NULL;
cnmem_device.streamSizes = NULL;
std::string cnmem_memory_state_filename;
if (vdnn_type == vDNN_ALL) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_all_p.dat";
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_all_m.dat";
}
}
else if (vdnn_type == vDNN_CONV) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_conv_p.dat";
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_conv_m.dat";
}
}
else if (vdnn_type == vDNN_DYN) {
cnmem_memory_state_filename = "cnmem_dyn.dat";
}
else {
cnmem_memory_state_filename = "cnmem_unknown.dat";
}
FILE *cnmem_memory_state_fptr = fopen(cnmem_memory_state_filename.c_str(), "w");
size_t run_count = 0;
bool out_of_memory = false;
while (true) {
run_count++;
if (max_consume >= free_bytes)
break;
out_of_memory = false;
cnmem_device.size = max_consume;
std::cerr << run_count << ' ' << max_consume << std::endl;
if (max_consume > free_bytes)
std::cerr << "panic!! max_consume > free_bytes\n";
checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW));
resetPrefetched();
fprintf(cnmem_memory_state_fptr, "//////////////////////////////////////////////////////////////////\n");
fprintf(cnmem_memory_state_fptr, "run_count: %lu\n", run_count);
fprintf(cnmem_memory_state_fptr, "max_consume: %lu\n", max_consume);
fprintf(cnmem_memory_state_fptr, "//////////////////////////////////////////////////////////////////\n");
fprintf(cnmem_memory_state_fptr, "initial state\n");
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
checkCNMEMSim(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL),
layer_input_size[0] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. layer_input[%d] - size: %lu\n", 0, layer_input_size[0] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
// forward propagate
for (int i = 0; i < num_layers; i++) {
size_t cur_workspace_size;
void *cur_workspace;
checkCNMEMSim(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. layer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (layer_type[i] == CONV) {
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (layer_type[i] == CONV) {
checkCNMEMSim(cnmemFree(cur_workspace, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (to_offload[i]) {
checkCNMEMSim(cnmemFree(layer_input[i], NULL),
layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
}
if (out_of_memory) {
checkCNMEM(cnmemFinalize());
if (max_consume < free_bytes)
continue;
else
break;
}
checkCNMEMSim(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL),
layer_input_size[num_layers] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. dlayer_input[%d] - size: %lu\n", num_layers, layer_input_size[num_layers] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size;
void *cur_workspace;
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
}
else {
int layer_to_prefetch = findPrefetchLayer(i);
if (layer_to_prefetch != -1) {
checkCNMEMSim(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL),
layer_input_size[layer_to_prefetch] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. prefetch layer_input[%d] - size: %lu\n", layer_to_prefetch, layer_input_size[layer_to_prefetch] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
checkCNMEMSim(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL),
layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. dlayer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
if (layer_type[i] == CONV) {
// std::cout << "here\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n", cur_params->kernel_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n", cur_params->C_out * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size;
checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n", cur_params->weight_matrix_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n", cur_params->C_out * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dscale - size: %lu\n", cur_params->allocation_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. dbias - size: %lu\n", cur_params->allocation_size * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
continue;
}
if (layer_type[i] == CONV) {
checkCNMEMSim(cnmemFree(cur_workspace, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == BATCHNORM) {
if (!pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
checkCNMEMSim(cnmemFree(layer_input[i + 1], NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
checkCNMEMSim(cnmemFree(dlayer_input[i + 1], NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free dlayer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (i == 0) {
checkCNMEMSim(cnmemFree(layer_input[i], NULL),
layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
checkCNMEM(cnmemFinalize());
if (out_of_memory) {
if (max_consume < free_bytes)
continue;
else
break;
}
break;
}
free_bytes += 100 * 1024 * 1024;
if (max_consume < free_bytes) {
double exp_size = (init_max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024);
double act_size = (max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024);
fprintf(cnmem_memory_state_fptr, "expected_memory_consume: %f MB\n", exp_size);
fprintf(cnmem_memory_state_fptr, "actual_memory_consume: %f MB\n", act_size);
}
else {
fprintf(cnmem_memory_state_fptr, "out of memory\n");
}
fclose(cnmem_memory_state_fptr);
if (max_consume < free_bytes)
return true;
else
return false;
}
void NeuralNet::vDNNOptimize(size_t &exp_max_consume, size_t &max_consume) {
bool hard = true, soft = false;
// if type is vDNN_ALL or vDNN_CONV, check if sufficient space is available
if (vdnn_type == vDNN_ALL) {
setOffload(OFFLOAD_ALL);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
return;
}
else if (vdnn_type == vDNN_CONV) {
setOffload(OFFLOAD_CONV);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
return;
}
else if (vdnn_type == vDNN_NONE) {
setOffload(OFFLOAD_NONE);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
return;
}
if (vdnn_type == vDNN_DYN) {
// check for trainability
std::cerr << "vDNN_DYN\n";
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if(!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
// check if work with fastest algo and no offload, if so, select it and return
setOffload(NeuralNet::OFFLOAD_NONE);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, NO OFFLOAD\n";
return;
}
// check if conv offload and fastest algo works, then check if all offload and fastest algo works
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, ALL OFFLOAD\n";
return;
}
// optimize using greedy algo memory usage while improving performance
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) {
std::cerr << "Choosing GREEDY, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) {
std::cerr << "Choosing GREEDY, ALL OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing MEM_OPT, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if(simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing MEM_OPT, ALL OFFLOAD\n";
return;
}
}
exit(0);
}
void NeuralNet::setOffload(NeuralNet::OffloadType offload_type) {
if (offload_type == OFFLOAD_NONE) {
for (int i = 0; i < num_layers; i++)
to_offload[i] = false;
}
else if (offload_type == OFFLOAD_CONV) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == CONV)
to_offload[i] = true;
else
to_offload[i] = false;
}
// set last non SOFTMAX/ACTV layer to no_offload
for (int i = num_layers - 1; i >= 0; i--) {
if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV)
;
else {
to_offload[i] = false;
break;
}
}
}
else if (offload_type == OFFLOAD_ALL) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX)
to_offload[i] = false;
else
to_offload[i] = true;
}
// set last non SOFTMAX/ACTV layer to no_offload
for (int i = num_layers - 1; i >= 0; i--) {
if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV)
;
else {
to_offload[i] = false;
break;
}
}
}
}
void NeuralNet::resetPrefetched() {
for (int i = 0; i < num_layers; i++)
prefetched[i] = false;
}
void NeuralNet::getLoss(void *X, int *y, double learning_rate, bool train, int *correct_count, float *loss) {
std::vector<float> t1, t2;
this->getLoss(X, y, learning_rate, t1, t2, train, correct_count, loss);
}
void NeuralNet::getLoss(void *X, int *y, double learning_rate, std::vector<float> &fwd_vdnn_lag, std::vector<float> &bwd_vdnn_lag, bool train, int *correct_count, float *scalar_loss) {
CnmemSpace space_tracker(free_bytes);
// std::cout << "here\n";
// std::cout << "Free bytes: " << free_bytes << std::endl;
for (int i = 0; i < num_layers; i++)
prefetched[i] = false;
checkCNMEM(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size);
checkCudaErrors(hipMemcpy(layer_input[0], X, batch_size * input_channels * input_h * input_w * data_type_size, hipMemcpyHostToDevice));
if (train == true) {
checkCudaErrors(hipMemcpy(this->y, y, batch_size * data_type_size, hipMemcpyHostToDevice));
}
float alpha = 1.0, beta = 0.0;
float Salpha = 1.0, Sbeta = 0.0;
double Dalpha = 1.0, Dbeta = 0.0;
// forward propagate
for (int i = 0; i < num_layers; i++) {
if (train == false && i == num_layers - 1)
break;
// ---------------------- vDNN start ----------------------
size_t cur_workspace_size;
void *cur_workspace;
// offload if required
if (i > 0 && to_offload[i] && train == true)
checkCudaErrors(hipMemcpyAsync(h_layer_input[i], layer_input[i],
layer_input_size[i] * data_type_size, hipMemcpyDeviceToHost, stream_memory));
checkCNMEM(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN end ------------------------
// std::cout << "here" << i << std::endl;
if (layer_type[i] == CONV) {
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
// computation
checkCUDNN(cudnnConvolutionForward(cudnn_handle, &alpha,
cur_params->input_tensor, layer_input[i],
cur_params->filter_desc, cur_params->W,
cur_params->conv_desc, cur_params->fwd_algo,
cur_workspace, cur_workspace_size,
&beta,
cur_params->output_tensor, layer_input[i + 1]));
checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha,
cur_params->bias_desc, cur_params->b,
&alpha,
cur_params->output_tensor, layer_input[i + 1]));
// if activation required
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc,
&alpha,
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, layer_input[i + 1]));
}
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
}
else if (layer_type[i] == FULLY_CONNECTED) {
// std::cout << "FC\n";
FCLayerParams *cur_params = (FCLayerParams *)params[i];
// std::cout << "FChere" << i << std::endl;
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(hipblasSgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, batch_size, cur_params->C_in,
&Salpha,
(float *)cur_params->W, cur_params->C_out,
(float *)layer_input[i], cur_params->C_in,
&Sbeta,
(float *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(hipblasSgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, batch_size, 1,
&Salpha,
(float *)cur_params->b, cur_params->C_out,
(float *)one_vec, 1,
&Salpha,
(float *)layer_input[i + 1], cur_params->C_out));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(hipblasDgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, batch_size, cur_params->C_in,
&Dalpha,
(double *)cur_params->W, cur_params->C_out,
(double *)layer_input[i], cur_params->C_in,
&Dbeta,
(double *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(hipblasDgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, batch_size, 1,
&Dalpha,
(double *)cur_params->b, cur_params->C_out,
(double *)one_vec, 1,
&Dalpha,
(double *)layer_input[i + 1], cur_params->C_out));
}
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc,
&alpha,
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, layer_input[i + 1]));
}
// std::cout << "FChere" << i << std::endl;
}
else if (layer_type[i] == DROPOUT) {
// std::cout << "Dropout\n";
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutForward(cudnn_handle, cur_params->dropout_desc,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, layer_input[i + 1],
cur_params->reserved_space,
cur_params->reserved_space_size));
}
else if (layer_type[i] == BATCHNORM) {
// std::cout << "Batchnorm\n";
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (train == true) {
checkCUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle, cur_params->mode,
&alpha, &beta,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, layer_input[i + 1],
cur_params->sbmv_desc,
cur_params->scale, cur_params->bias,
cur_params->factor,
cur_params->running_mean, cur_params->running_variance,
cur_params->epsilon,
cur_params->result_save_mean, cur_params->result_save_inv_var));
}
else {
checkCUDNN(cudnnBatchNormalizationForwardInference(cudnn_handle, cur_params->mode,
&alpha, &beta,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, layer_input[i + 1],
cur_params->sbmv_desc,
cur_params->scale, cur_params->bias,
cur_params->running_mean, cur_params->running_variance,
cur_params->epsilon));
}
}
else if (layer_type[i] == POOLING) {
// std::cout << "Pooling\n";
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(cudnnPoolingForward(cudnn_handle, cur_params->pool_desc,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->output_tensor, layer_input[i + 1]));
}
else if (layer_type[i] == ACTV) {
// std::cout << "Actv\n";
std::cout << "Panic!! ACTV wrong place\n";
exit(0);
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, layer_input[i + 1]));
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "Softmax\n";
std::cout << "Panic!! SOFTMAX wrong place\n";
exit(0);
if (train == true) {
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, layer_input[i + 1]));
}
}
// ---------------------- vDNN start ----------------------
// synchronization
// checkCudaErrors(hipDeviceSynchronize());
// if next layer is ACTV or SOFTMAX, complete that and come to synchronization
// the case in above if for ACTV and SOFTMAX never occurs
if (layer_type[i + 1] == SOFTMAX) {
i++;
if (train == true) {
layer_input[i + 1] = layer_input[i];
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, layer_input[i + 1]));
}
i--;
}
struct timespec start_time, end_time;
checkCudaErrors(hipStreamSynchronize(stream_compute));
if (train)
clock_gettime(CLOCK_MONOTONIC, &start_time);
checkCudaErrors(hipStreamSynchronize(stream_memory));
if (train) {
clock_gettime(CLOCK_MONOTONIC, &end_time);
float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6;
fwd_vdnn_lag.push_back(lag);
}
// std::cout << "EndSynchere" << i << std::endl;
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
}
if (to_offload[i] && train == true) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
}
if (train == false) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
}
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
// std::cout << "EndSynchere" << i << std::endl;
// ---------------------- vDNN end ------------------------
}
// std::cout << "here" << std::endl;
if (train == false) {
compareOutputCorrect(correct_count, y);
checkCNMEM(cnmemFree(layer_input[num_layers - 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[num_layers - 1] * data_type_size);
return;
}
*scalar_loss = computeLoss();
// ---------------------- vDNN start ----------------------
checkCNMEM(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN end ------------------------
if (layer_type[num_layers - 1] == SOFTMAX) {
// SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[num_layers - 1];
if (data_type == CUDNN_DATA_FLOAT) {
checkCudaErrors(hipMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(float)));
hipLaunchKernelGGL(( softmaxLossBackProp<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, this->y, (float *)layer_input[num_layers],
(float *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps);
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCudaErrors(hipMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(double)));
hipLaunchKernelGGL(( softmaxLossBackProp<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, this->y, (double *)layer_input[num_layers],
(double *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps);
}
}
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size;
void *cur_workspace;
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
}
else {
int layer_to_prefetch = findPrefetchLayer(i);
if (layer_to_prefetch != -1) {
checkCNMEM(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
if (layer_to_prefetch != 0) {
checkCudaErrors(hipMemcpyAsync(layer_input[layer_to_prefetch], h_layer_input[layer_to_prefetch],
layer_input_size[layer_to_prefetch] * data_type_size, hipMemcpyHostToDevice, stream_memory));
}
else {
// std::cout << "transfer here\n";
checkCudaErrors(hipMemcpyAsync(layer_input[layer_to_prefetch], X,
layer_input_size[layer_to_prefetch] * data_type_size, hipMemcpyHostToDevice, stream_memory));
// std::cout << "transfer here\n";
}
}
checkCNMEM(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size);
}
// std::cout << "Free bytes: " << free_bytes << std::endl;
}
// ---------------------- vDNN end ------------------------
if (layer_type[i] == CONV) {
// std::cout << "here\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
// std::cout << "bwd cur_workspace_size: " << cur_workspace_size << std::endl;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handle, &alpha,
cur_params->output_tensor, dlayer_input[i + 1],
&beta,
cur_params->bias_desc, cur_params->db));
// std::cout << "neural_net: backward conv i:" << i << std::endl;
checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handle, &alpha,
cur_params->input_tensor, layer_input[i],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->conv_desc, cur_params->bwd_filter_algo,
cur_workspace, cur_workspace_size,
&beta,
cur_params->filter_desc,
cur_params->dW));
if (i > 0)
checkCUDNN(cudnnConvolutionBackwardData(cudnn_handle, &alpha,
cur_params->filter_desc, cur_params->W,
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->conv_desc, cur_params->bwd_data_algo,
cur_workspace, cur_workspace_size,
&beta,
cur_params->input_tensor, dlayer_input[i]));
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// std::cout << "here\n";
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
if (!pre_alloc_fc_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
}
if (data_type == CUDNN_DATA_FLOAT) {
// bias backward
checkCUBLAS(hipblasSgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, 1, batch_size,
&Salpha,
(float *)dlayer_input[i + 1], cur_params->C_out,
(float *)one_vec, batch_size,
&Sbeta,
(float *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(hipblasSgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
cur_params->C_out, cur_params->C_in, batch_size,
&Salpha,
(float *)dlayer_input[i + 1], cur_params->C_out,
(float *)layer_input[i], cur_params->C_in,
&Sbeta,
(float *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(hipblasSgemm(cublas_handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Salpha,
(float *)cur_params->W, cur_params->C_out,
(float *)dlayer_input[i + 1], cur_params->C_out,
&Sbeta,
(float *)dlayer_input[i], cur_params->C_in));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
// bias backward
checkCUBLAS(hipblasDgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, 1, batch_size,
&Dalpha,
(double *)dlayer_input[i + 1], cur_params->C_out,
(double *)one_vec, batch_size,
&Dbeta,
(double *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(hipblasDgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
cur_params->C_out, cur_params->C_in, batch_size,
&Dalpha,
(double *)dlayer_input[i + 1], cur_params->C_out,
(double *)layer_input[i], cur_params->C_in,
&Dbeta,
(double *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(hipblasDgemm(cublas_handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Dalpha,
(double *)cur_params->W, cur_params->C_out,
(double *)dlayer_input[i + 1], cur_params->C_out,
&Dbeta,
(double *)dlayer_input[i], cur_params->C_in));
}
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == DROPOUT) {
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutBackward(cudnn_handle, cur_params->dropout_desc,
cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, dlayer_input[i],
cur_params->reserved_space, cur_params->reserved_space_size));
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
}
checkCUDNN(cudnnBatchNormalizationBackward(cudnn_handle, cur_params->mode,
&alpha, &beta,
&alpha, &beta,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, dlayer_input[i],
cur_params->sbmv_desc, cur_params->scale,
cur_params->dscale, cur_params->dbias,
cur_params->epsilon,
cur_params->result_save_mean, cur_params->result_save_inv_var));
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == POOLING) {
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, dlayer_input[i]));
}
else if (layer_type[i] == ACTV) {
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->input_tensor, layer_input[i + 1],
cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, dlayer_input[i]));
continue;
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxBackward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i + 1],
cur_params->input_tensor, dlayer_input[i + 1],
&beta,
cur_params->input_tensor, dlayer_input[i]));
// std::cout << "compute here\n";
continue;
}
// ---------------------- vDNN start ----------------------
// checkCudaErrors(hipDeviceSynchronize());
struct timespec start_time, end_time;
checkCudaErrors(hipStreamSynchronize(stream_compute));
if (train)
clock_gettime(CLOCK_MONOTONIC, &start_time);
checkCudaErrors(hipStreamSynchronize(stream_memory));
if (train) {
clock_gettime(CLOCK_MONOTONIC, &end_time);
float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6;
bwd_vdnn_lag.insert(bwd_vdnn_lag.begin(), lag);
}
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
}
}
else if (layer_type[i] == BATCHNORM) {
if (train == true and !pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
}
}
checkCNMEM(cnmemFree(layer_input[i + 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
if (i == 0) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
}
// ---------------------- vDNN end ------------------------
}
if (space_tracker.getConsumed() != 0) {
std::cout << "Panic!! Space not updated properly\n";
}
// exit(0);
}
int NeuralNet::findPrefetchLayer(int cur_layer) {
for (int i = cur_layer - 1; i >= 0; i--) {
if (to_offload[i] && !prefetched[i]) {
prefetched[i] = true;
return i;
}
else if (layer_type[i] == CONV) {
return -1;
}
}
return -1;
}
| 9a93ff266e68b3d8c5ede55ff5c85190a660ce47.cu | #include "neural_net.h"
#include <time.h>
#include <cstdio>
#include <string>
template <typename T>
__global__ void softmaxLossBackProp(int *y, T *SO, T *dSO, int batch_size, int output_size, float eps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size)
return;
int cur_class = static_cast<int>(y[i]);
dSO[i * output_size + cur_class] = -1 / (SO[i * output_size + cur_class] * batch_size + eps);
}
template <typename T>
__global__ void computeSoftmaxLoss(T *O, int *y, float *loss, int batch_size, int num_classes, float eps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size)
return;
loss[i] = -logf(O[i * num_classes + y[i]] + eps);
}
template <typename T>
__global__ void inferClass(T *O, int *pred_y, int batch_size, int num_classes) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size)
return;
T max = O[i * num_classes];
int index = 0;
for (int j = 1; j < num_classes; j++) {
if (O[i * num_classes + j] > max) {
max = O[i * num_classes + j];
index = j;
}
}
pred_y[i] = index;
}
float NeuralNet::computeLoss() {
if (layer_type[num_layers - 1] == SOFTMAX) {
if (data_type == CUDNN_DATA_FLOAT)
computeSoftmaxLoss<float><<<ceil(1.0 * batch_size / BW), BW>>>((float *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps);
else if (data_type == CUDNN_DATA_DOUBLE)
computeSoftmaxLoss<double><<<ceil(1.0 * batch_size / BW), BW>>>((double *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps);
}
checkCudaErrors(cudaMemcpy(h_loss, loss, batch_size * sizeof(float), cudaMemcpyDeviceToHost));
float total_loss = 0.0;
for (int i = 0; i < batch_size; i++)
total_loss += h_loss[i];
return total_loss / batch_size;
}
void NeuralNet::compareOutputCorrect(int *correct_count, int *y) {
*correct_count = 0;
if (data_type == CUDNN_DATA_FLOAT) {
float *typecast_O = (float *)layer_input[num_layers - 1];
inferClass<float><<<ceil(1.0 * batch_size / BW), BW>>>(typecast_O, pred_y, batch_size, num_classes);
checkCudaErrors(cudaMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < batch_size; i++) {
if (h_pred_y[i] == y[i])
*correct_count = *correct_count + 1;
}
}
else if (data_type == CUDNN_DATA_DOUBLE) {
double *typecast_O = (double *)layer_input[num_layers - 1];
inferClass<double><<<ceil(1.0 * batch_size / BW), BW>>>(typecast_O, pred_y, batch_size, num_classes);
checkCudaErrors(cudaMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < batch_size; i++) {
if (h_pred_y[i] == y[i])
*correct_count = *correct_count + 1;
}
}
}
NeuralNet::NeuralNet(std::vector<LayerSpecifier> &layers, DataType data_type, int batch_size, TensorFormat tensor_format,
long long dropout_seed, float softmax_eps, float init_std_dev, vDNNType vdnn_type, vDNNConvAlgo vdnn_conv_algo,
UpdateRule update_rule) {
// ---------------------- vDNN start ----------------------
checkCudaErrors(cudaStreamCreate(&stream_compute));
checkCudaErrors(cudaStreamCreate(&stream_memory));
this->vdnn_type = vdnn_type;
this->vdnn_conv_algo = vdnn_conv_algo;
// ---------------------- vDNN end ------------------------
// create handle
checkCUDNN(cudnnCreate(&cudnn_handle));
checkCUDNN(cudnnSetStream(cudnn_handle, stream_compute));
checkCUBLAS(cublasCreate(&cublas_handle));
checkCUBLAS(cublasSetStream(cublas_handle, stream_compute));
checkCURAND(curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT));
checkCURAND(curandSetStream(curand_gen, stream_compute));
checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes));
init_free_bytes = free_bytes;
std::cout << "Free bytes at start: " << free_bytes << std::endl;
pre_alloc_conv_derivative = false;
pre_alloc_fc_derivative = false;
pre_alloc_batch_norm_derivative = true;
if (vdnn_type == vDNN_NONE) {
pre_alloc_conv_derivative = true;
pre_alloc_fc_derivative = true;
pre_alloc_batch_norm_derivative = true;
}
if (data_type == DATA_FLOAT) {
this->data_type = CUDNN_DATA_FLOAT;
data_type_size = sizeof(float);
}
else if (data_type == DATA_DOUBLE) {
this->data_type = CUDNN_DATA_DOUBLE;
data_type_size = sizeof(double);
}
if (tensor_format == TENSOR_NCHW)
this->tensor_format = CUDNN_TENSOR_NCHW;
else if (tensor_format == TENSOR_NHWC)
this->tensor_format = CUDNN_TENSOR_NHWC;
this->batch_size = batch_size;
this->softmax_eps = softmax_eps;
this->init_std_dev = init_std_dev;
num_layers = layers.size();
// allocation of space for input to each layer
layer_input = (void **)malloc((num_layers + 1) * sizeof(void *));
layer_input_size = (int *)malloc((num_layers + 1) * sizeof(int));
dlayer_input = (void **)malloc((num_layers + 1) * sizeof(void *));
params = (void **)malloc(num_layers * sizeof(void *));
LayerDimension prev_output_size;
LayerDimension current_output_size;
for (int i = 0; i < num_layers; i++) {
layer_type.push_back(layers[i].type);
if (layers[i].type == CONV) {
ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params;
params[i] = malloc(sizeof(ConvLayerParams));
((ConvLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size, this->tensor_format,
data_type_size, current_output_size, update_rule);
}
else if (layers[i].type == FULLY_CONNECTED) {
FCDescriptor *user_params = (FCDescriptor *)layers[i].params;
params[i] = malloc(sizeof(FCLayerParams));
((FCLayerParams *)params[i])->initializeValues(user_params, batch_size, this->tensor_format, this->data_type,
current_output_size, update_rule);
}
else if (layers[i].type == DROPOUT) {
DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params;
params[i] = malloc(sizeof(DropoutLayerParams));
((DropoutLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size,
this->tensor_format, current_output_size);
}
else if (layers[i].type == BATCHNORM) {
BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params;
params[i] = malloc(sizeof(BatchNormLayerParams));
((BatchNormLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size,
current_output_size, update_rule);
}
else if (layers[i].type == POOLING) {
PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params;
params[i] = malloc(sizeof(BatchNormLayerParams));
((PoolingLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
}
else if (layers[i].type == ACTV) {
ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params;
params[i] = malloc(sizeof(ActivationLayerParams));
((ActivationLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
}
else if (layers[i].type == SOFTMAX) {
SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params;
params[i] = malloc(sizeof(SoftmaxLayerParams));
((SoftmaxLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
// std::cout << current_output_size.N << ' ' << current_output_size.C << current_output_size.H << current_output_size.W << std::endl;
}
if (i == 0) {
prev_output_size = current_output_size;
}
// incomplete - have to check flatten and check exact dimension
// else if (current_output_size.getTotalSize() != prev_output_size.getTotalSize()) {
// std::cout << "Layer " << i << " output and next layer's input size mismatch\n";
// exit(0);
// }
}
// ---------------------- vDNN start ----------------------
// allocate space in host memory for layers to be transferred
h_layer_input = (void **)malloc(num_layers * sizeof(void *));
to_offload = (bool *)malloc(num_layers * sizeof(bool));
prefetched = (bool *)malloc(num_layers * sizeof(bool));
// ---------------------- vDNN end ------------------------
checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes));
std::cout << "Free bytes just before allocate space: " << free_bytes << std::endl;
// allocate space for parameters
// Exception BatchNorm - looks like it will take lots of space if only FC layers - space taken = size of one input
for (int i = 0; i < num_layers; i++) {
size_t input_size;
if (layers[i].type == CONV) {
ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params;
((ConvLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev,
free_bytes, pre_alloc_conv_derivative);
input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = user_params->input_h;
input_w = user_params->input_w;
}
}
else if (layers[i].type == FULLY_CONNECTED) {
FCDescriptor *user_params = (FCDescriptor *)layers[i].params;
((FCLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev,
free_bytes, pre_alloc_fc_derivative);
input_size = batch_size * user_params->input_channels;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = 1;
input_w = 1;
}
}
else if (layers[i].type == DROPOUT) {
DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params;
((DropoutLayerParams *)params[i])->allocateSpace(free_bytes, cudnn_handle, user_params, dropout_seed);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
}
else if (layers[i].type == BATCHNORM) {
BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params;
((BatchNormLayerParams *)params[i])->allocateSpace(this->data_type, data_type_size,
free_bytes, pre_alloc_batch_norm_derivative);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
}
else if (layers[i].type == POOLING) {
PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params;
((PoolingLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = user_params->input_h;
input_w = user_params->input_w;
}
}
else if (layers[i].type == ACTV) {
ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params;
((ActivationLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
}
else if (layers[i].type == SOFTMAX) {
SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params;
((SoftmaxLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
// assuming this is last layer, allocate for next layer as well
// checkCudaErrors(cudaMalloc(&layer_input[i + 1], input_size * data_type_size));
// checkCudaErrors(cudaMalloc(&dlayer_input[i + 1], input_size * data_type_size));
layer_input_size[i + 1] = input_size;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
if (i == num_layers - 1) {
num_classes = user_params->channels;
}
}
// do not allocate memory initially
// checkCudaErrors(cudaMalloc(&layer_input[i], input_size * data_type_size));
// checkCudaErrors(cudaMalloc(&dlayer_input[i], input_size * data_type_size));
// ---------------------- vDNN start ----------------------
layer_input_size[i] = input_size;
// ---------------------- vDNN end ------------------------
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes));
std::cout << "Free bytes just after allocate space: " << free_bytes << std::endl;
// very small - could be allocated initially itself
checkCudaErrors(cudaMalloc((void **)&y, batch_size * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&pred_y, batch_size * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&loss, batch_size * sizeof(float)));
checkCudaErrors(cudaMalloc(&one_vec, batch_size * data_type_size));
if (this->data_type == CUDNN_DATA_FLOAT)
fillValue<float><<<ceil(1.0 * batch_size / BW), BW>>>((float *)one_vec, batch_size, 1);
else
fillValue<double><<<ceil(1.0 * batch_size / BW), BW>>>((double *)one_vec, batch_size, 1);
checkCudaErrors(cudaMallocHost((void **)&h_loss, batch_size * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&h_pred_y, batch_size * sizeof(int)));
// do not allocate workspace initially
// allocate space for workspace and also keep track of algo
// size_t cur_workspace_size;
// workspace_size = 0;
// for (int i = 0; i < num_layers; i++) {
// if (layers[i].type == CONV) {
// ((ConvLayerParams *)params[i])->getWorkspaceSize(cur_workspace_size, free_bytes);
// if (cur_workspace_size > workspace_size)
// workspace_size = cur_workspace_size;
// }
// }
// checkCudaErrors(cudaMalloc(&workspace, workspace_size));
// free_bytes = free_bytes - workspace_size;
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes));
// leave 600 MB and use the rest
std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN start ----------------------
size_t exp_max_consume, max_consume;
vDNNOptimize(exp_max_consume, max_consume);
std::cout << "actual_max_consume: " << max_consume << std::endl;
std::cout << "exp_max_consume: " << exp_max_consume << std::endl;
std::cout << "diff_max_consume(MB): " << (max_consume - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl;
std::cout << "exp_free_bytes(MB): " << (free_bytes - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl;
std::cout << "exp_total_consume(MB): " << (init_free_bytes - (free_bytes - exp_max_consume)) / (1.0 * 1024 * 1024) << std::endl;
std::cout << "actual_total_consume(MB): " << (init_free_bytes - (free_bytes - max_consume)) / (1.0 * 1024 * 1024) << std::endl;
// ---------------------- vDNN end ------------------------
// remove later
exit(0);
// ---------------------- vDNN start ----------------------
free_bytes = max_consume;
cnmemDevice_t cnmem_device;
size_t cnmem_stream_memory_size = free_bytes;
cnmem_device.device = 0;
cnmem_device.size = cnmem_stream_memory_size;
cnmem_device.numStreams = 0;
cnmem_device.streams = NULL;
cnmem_device.streamSizes = NULL;
// do not allow call to cudaMalloc
checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW));
// ---------------------- vDNN end ------------------------
// ---------------------- vDNN start ----------------------
for (int i = 0; i < num_layers; i++) {
std::cerr << "to_offload[i] " << to_offload[i] << std::endl;
}
for (int i = 0; i < num_layers; i++) {
// allocate pinned memory in host
if (to_offload[i])
checkCudaErrors(cudaMallocHost(&h_layer_input[i], layer_input_size[i] * data_type_size));
}
// ---------------------- vDNN end ------------------------
checkCudaErrors(cudaDeviceSynchronize());
size_t temp_free_bytes;
checkCudaErrors(cudaMemGetInfo(&temp_free_bytes, &total_bytes));
std::cout << "Free bytes just before end of NeuralNet: " << temp_free_bytes << std::endl;
// {
// int n;
// std::cout << "waiting..\n";
// std::cin >> n;
// }
// data of time
checkCudaErrors(cudaEventCreate(&start_compute));
checkCudaErrors(cudaEventCreate(&stop_compute));
checkCudaErrors(cudaEventCreate(&start_transfer));
checkCudaErrors(cudaEventCreate(&stop_transfer));
}
bool NeuralNet::simulateNeuralNetworkMemory(vDNNConvAlgoPref algo_pref, bool hard, size_t &exp_max_consume, size_t &max_consume) {
CnmemSpace space_tracker(free_bytes);
max_consume = 0;
// forward pass
// allocate space for 1st input
std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating input(MB): " << space_tracker.getConsumed() << std::endl;
std::cerr << "Forward pass" << std::endl;
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == SOFTMAX)
break;
std::cerr << "Processing layer " << i << std::endl;
std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size);
std::cerr << "Used space after output allocation(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
size_t cur_workspace_size;
checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::FWD, algo_pref, hard, cur_workspace_size));
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
space_tracker.updateMaxConsume(max_consume);
if (!space_tracker.isAvailable())
return false;
std::cerr << "Used space after workspace allocation(MB): " << space_tracker.getConsumed() << std::endl;
// current layer computation over, deallocate workspace
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
std::cerr << "Used space after workspace deallocation(MB): " << space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable())
return false;
// deallocate layer input
if (to_offload[i]) {
std::cerr << "deallocating input to " << i << std::endl;
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl;
}
}
std::cerr << "Backward pass" << std::endl;
if (batch_size * num_classes * data_type_size != layer_input_size[num_layers] * data_type_size) {
std::cout << "Panic!! Using wrong size\n";
exit(0);
}
// backward pass
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size);
std::cerr << "Used space after allocating final derivative(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
// std::cerr << "max_consume: " << max_consume << std::endl;
for (int i = num_layers - 1; i >= 0; i--) {
// allocate space for previous layer derivative
std::cerr << "Processing layer " << i << std::endl;
std::cerr << "Used space initial(MB): " << space_tracker.getConsumed() << std::endl;
if (i > 0) {
if (layer_type[i] == SOFTMAX)
continue;
else {
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size);
std::cerr << "Used space after allocating prev. derivative(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
}
// std::cerr << "max_consume: " << max_consume << std::endl;
}
int layer_to_prefetch = findPrefetchLayer(i);
// if layer to be prefetched, allocate space for that layer
if (layer_to_prefetch != -1) {
std::cerr << "Prefetch layer " << layer_to_prefetch << std::endl;
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size);
std::cerr << "Used space after allocating prefetch(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
}
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
size_t cur_filter_workspace_size;
checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_FILTER, algo_pref, hard, cur_filter_workspace_size));
size_t cur_data_workspace_size = 0;
if (i > 0)
checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_DATA, algo_pref, hard, cur_data_workspace_size));
size_t cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size :cur_data_workspace_size;
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
std::cerr << "Used space after allocating workspace(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
if (!pre_alloc_conv_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
// std::cerr << "max_consume: " << max_consume << std::endl;
if (!space_tracker.isAvailable())
return false;
// current layer computation over, deallocate workspace
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
std::cerr << "Used space after deallocating workspace(MB): " << space_tracker.getConsumed() << std::endl;
if (!pre_alloc_conv_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable())
return false;
if (!pre_alloc_fc_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable())
return false;
if (!pre_alloc_batch_norm_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
}
if (!space_tracker.isAvailable())
return false;
// deallocate layer output and derivative
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
std::cerr << "Used space after deallocating output, derivative(MB): " << space_tracker.getConsumed() << std::endl;
// if 1st layer, deallocate input layer also
if (i == 0) {
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl;
}
}
if (space_tracker.getConsumed() > 0)
std::cerr << "Panic!! more free bytes\n";
if (space_tracker.getConsumed() != 0)
std::cerr << "Panic!! bytes not freed properly\n";
// return true;
exp_max_consume = max_consume;
// check with cnmem once
bool ret_val = simulateCNMEMMemory(max_consume);
return ret_val;
}
bool NeuralNet::simulateCNMEMMemory(size_t &max_consume) {
size_t init_max_consume = max_consume;
cnmemDevice_t cnmem_device;
size_t t;
checkCudaErrors(cudaMemGetInfo(&free_bytes, &t));
std::cout << "free_bytes: " << free_bytes << std::endl;
free_bytes -= 100 * 1024 * 1024;
cnmem_device.device = 0;
cnmem_device.numStreams = 0;
cnmem_device.streams = NULL;
cnmem_device.streamSizes = NULL;
std::string cnmem_memory_state_filename;
if (vdnn_type == vDNN_ALL) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_all_p.dat";
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_all_m.dat";
}
}
else if (vdnn_type == vDNN_CONV) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_conv_p.dat";
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_conv_m.dat";
}
}
else if (vdnn_type == vDNN_DYN) {
cnmem_memory_state_filename = "cnmem_dyn.dat";
}
else {
cnmem_memory_state_filename = "cnmem_unknown.dat";
}
FILE *cnmem_memory_state_fptr = fopen(cnmem_memory_state_filename.c_str(), "w");
size_t run_count = 0;
bool out_of_memory = false;
while (true) {
run_count++;
if (max_consume >= free_bytes)
break;
out_of_memory = false;
cnmem_device.size = max_consume;
std::cerr << run_count << ' ' << max_consume << std::endl;
if (max_consume > free_bytes)
std::cerr << "panic!! max_consume > free_bytes\n";
checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW));
resetPrefetched();
fprintf(cnmem_memory_state_fptr, "//////////////////////////////////////////////////////////////////\n");
fprintf(cnmem_memory_state_fptr, "run_count: %lu\n", run_count);
fprintf(cnmem_memory_state_fptr, "max_consume: %lu\n", max_consume);
fprintf(cnmem_memory_state_fptr, "//////////////////////////////////////////////////////////////////\n");
fprintf(cnmem_memory_state_fptr, "initial state\n");
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
checkCNMEMSim(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL),
layer_input_size[0] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. layer_input[%d] - size: %lu\n", 0, layer_input_size[0] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
// forward propagate
for (int i = 0; i < num_layers; i++) {
size_t cur_workspace_size;
void *cur_workspace;
checkCNMEMSim(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. layer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (layer_type[i] == CONV) {
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (layer_type[i] == CONV) {
checkCNMEMSim(cnmemFree(cur_workspace, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (to_offload[i]) {
checkCNMEMSim(cnmemFree(layer_input[i], NULL),
layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
}
if (out_of_memory) {
checkCNMEM(cnmemFinalize());
if (max_consume < free_bytes)
continue;
else
break;
}
checkCNMEMSim(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL),
layer_input_size[num_layers] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. dlayer_input[%d] - size: %lu\n", num_layers, layer_input_size[num_layers] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size;
void *cur_workspace;
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
}
else {
int layer_to_prefetch = findPrefetchLayer(i);
if (layer_to_prefetch != -1) {
checkCNMEMSim(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL),
layer_input_size[layer_to_prefetch] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. prefetch layer_input[%d] - size: %lu\n", layer_to_prefetch, layer_input_size[layer_to_prefetch] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
checkCNMEMSim(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL),
layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. dlayer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
if (layer_type[i] == CONV) {
// std::cout << "here\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n", cur_params->kernel_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n", cur_params->C_out * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size;
checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n", cur_params->weight_matrix_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n", cur_params->C_out * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dscale - size: %lu\n", cur_params->allocation_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. dbias - size: %lu\n", cur_params->allocation_size * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
continue;
}
if (layer_type[i] == CONV) {
checkCNMEMSim(cnmemFree(cur_workspace, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == BATCHNORM) {
if (!pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
checkCNMEMSim(cnmemFree(layer_input[i + 1], NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
checkCNMEMSim(cnmemFree(dlayer_input[i + 1], NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free dlayer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (i == 0) {
checkCNMEMSim(cnmemFree(layer_input[i], NULL),
layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
checkCNMEM(cnmemFinalize());
if (out_of_memory) {
if (max_consume < free_bytes)
continue;
else
break;
}
break;
}
free_bytes += 100 * 1024 * 1024;
if (max_consume < free_bytes) {
double exp_size = (init_max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024);
double act_size = (max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024);
fprintf(cnmem_memory_state_fptr, "expected_memory_consume: %f MB\n", exp_size);
fprintf(cnmem_memory_state_fptr, "actual_memory_consume: %f MB\n", act_size);
}
else {
fprintf(cnmem_memory_state_fptr, "out of memory\n");
}
fclose(cnmem_memory_state_fptr);
if (max_consume < free_bytes)
return true;
else
return false;
}
void NeuralNet::vDNNOptimize(size_t &exp_max_consume, size_t &max_consume) {
bool hard = true, soft = false;
// if type is vDNN_ALL or vDNN_CONV, check if sufficient space is available
if (vdnn_type == vDNN_ALL) {
setOffload(OFFLOAD_ALL);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
return;
}
else if (vdnn_type == vDNN_CONV) {
setOffload(OFFLOAD_CONV);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
return;
}
else if (vdnn_type == vDNN_NONE) {
setOffload(OFFLOAD_NONE);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
return;
}
if (vdnn_type == vDNN_DYN) {
// check for trainability
std::cerr << "vDNN_DYN\n";
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if(!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
// check if work with fastest algo and no offload, if so, select it and return
setOffload(NeuralNet::OFFLOAD_NONE);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, NO OFFLOAD\n";
return;
}
// check if conv offload and fastest algo works, then check if all offload and fastest algo works
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, ALL OFFLOAD\n";
return;
}
// optimize using greedy algo memory usage while improving performance
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) {
std::cerr << "Choosing GREEDY, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) {
std::cerr << "Choosing GREEDY, ALL OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing MEM_OPT, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if(simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing MEM_OPT, ALL OFFLOAD\n";
return;
}
}
exit(0);
}
void NeuralNet::setOffload(NeuralNet::OffloadType offload_type) {
if (offload_type == OFFLOAD_NONE) {
for (int i = 0; i < num_layers; i++)
to_offload[i] = false;
}
else if (offload_type == OFFLOAD_CONV) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == CONV)
to_offload[i] = true;
else
to_offload[i] = false;
}
// set last non SOFTMAX/ACTV layer to no_offload
for (int i = num_layers - 1; i >= 0; i--) {
if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV)
;
else {
to_offload[i] = false;
break;
}
}
}
else if (offload_type == OFFLOAD_ALL) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX)
to_offload[i] = false;
else
to_offload[i] = true;
}
// set last non SOFTMAX/ACTV layer to no_offload
for (int i = num_layers - 1; i >= 0; i--) {
if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV)
;
else {
to_offload[i] = false;
break;
}
}
}
}
void NeuralNet::resetPrefetched() {
for (int i = 0; i < num_layers; i++)
prefetched[i] = false;
}
void NeuralNet::getLoss(void *X, int *y, double learning_rate, bool train, int *correct_count, float *loss) {
std::vector<float> t1, t2;
this->getLoss(X, y, learning_rate, t1, t2, train, correct_count, loss);
}
void NeuralNet::getLoss(void *X, int *y, double learning_rate, std::vector<float> &fwd_vdnn_lag, std::vector<float> &bwd_vdnn_lag, bool train, int *correct_count, float *scalar_loss) {
CnmemSpace space_tracker(free_bytes);
// std::cout << "here\n";
// std::cout << "Free bytes: " << free_bytes << std::endl;
for (int i = 0; i < num_layers; i++)
prefetched[i] = false;
checkCNMEM(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size);
checkCudaErrors(cudaMemcpy(layer_input[0], X, batch_size * input_channels * input_h * input_w * data_type_size, cudaMemcpyHostToDevice));
if (train == true) {
checkCudaErrors(cudaMemcpy(this->y, y, batch_size * data_type_size, cudaMemcpyHostToDevice));
}
float alpha = 1.0, beta = 0.0;
float Salpha = 1.0, Sbeta = 0.0;
double Dalpha = 1.0, Dbeta = 0.0;
// forward propagate
for (int i = 0; i < num_layers; i++) {
if (train == false && i == num_layers - 1)
break;
// ---------------------- vDNN start ----------------------
size_t cur_workspace_size;
void *cur_workspace;
// offload if required
if (i > 0 && to_offload[i] && train == true)
checkCudaErrors(cudaMemcpyAsync(h_layer_input[i], layer_input[i],
layer_input_size[i] * data_type_size, cudaMemcpyDeviceToHost, stream_memory));
checkCNMEM(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN end ------------------------
// std::cout << "here" << i << std::endl;
if (layer_type[i] == CONV) {
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
// computation
checkCUDNN(cudnnConvolutionForward(cudnn_handle, &alpha,
cur_params->input_tensor, layer_input[i],
cur_params->filter_desc, cur_params->W,
cur_params->conv_desc, cur_params->fwd_algo,
cur_workspace, cur_workspace_size,
&beta,
cur_params->output_tensor, layer_input[i + 1]));
checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha,
cur_params->bias_desc, cur_params->b,
&alpha,
cur_params->output_tensor, layer_input[i + 1]));
// if activation required
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc,
&alpha,
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, layer_input[i + 1]));
}
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
}
else if (layer_type[i] == FULLY_CONNECTED) {
// std::cout << "FC\n";
FCLayerParams *cur_params = (FCLayerParams *)params[i];
// std::cout << "FChere" << i << std::endl;
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(cublasSgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, batch_size, cur_params->C_in,
&Salpha,
(float *)cur_params->W, cur_params->C_out,
(float *)layer_input[i], cur_params->C_in,
&Sbeta,
(float *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(cublasSgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, batch_size, 1,
&Salpha,
(float *)cur_params->b, cur_params->C_out,
(float *)one_vec, 1,
&Salpha,
(float *)layer_input[i + 1], cur_params->C_out));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(cublasDgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, batch_size, cur_params->C_in,
&Dalpha,
(double *)cur_params->W, cur_params->C_out,
(double *)layer_input[i], cur_params->C_in,
&Dbeta,
(double *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(cublasDgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, batch_size, 1,
&Dalpha,
(double *)cur_params->b, cur_params->C_out,
(double *)one_vec, 1,
&Dalpha,
(double *)layer_input[i + 1], cur_params->C_out));
}
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc,
&alpha,
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, layer_input[i + 1]));
}
// std::cout << "FChere" << i << std::endl;
}
else if (layer_type[i] == DROPOUT) {
// std::cout << "Dropout\n";
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutForward(cudnn_handle, cur_params->dropout_desc,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, layer_input[i + 1],
cur_params->reserved_space,
cur_params->reserved_space_size));
}
else if (layer_type[i] == BATCHNORM) {
// std::cout << "Batchnorm\n";
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (train == true) {
checkCUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle, cur_params->mode,
&alpha, &beta,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, layer_input[i + 1],
cur_params->sbmv_desc,
cur_params->scale, cur_params->bias,
cur_params->factor,
cur_params->running_mean, cur_params->running_variance,
cur_params->epsilon,
cur_params->result_save_mean, cur_params->result_save_inv_var));
}
else {
checkCUDNN(cudnnBatchNormalizationForwardInference(cudnn_handle, cur_params->mode,
&alpha, &beta,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, layer_input[i + 1],
cur_params->sbmv_desc,
cur_params->scale, cur_params->bias,
cur_params->running_mean, cur_params->running_variance,
cur_params->epsilon));
}
}
else if (layer_type[i] == POOLING) {
// std::cout << "Pooling\n";
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(cudnnPoolingForward(cudnn_handle, cur_params->pool_desc,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->output_tensor, layer_input[i + 1]));
}
else if (layer_type[i] == ACTV) {
// std::cout << "Actv\n";
std::cout << "Panic!! ACTV wrong place\n";
exit(0);
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, layer_input[i + 1]));
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "Softmax\n";
std::cout << "Panic!! SOFTMAX wrong place\n";
exit(0);
if (train == true) {
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, layer_input[i + 1]));
}
}
// ---------------------- vDNN start ----------------------
// synchronization
// checkCudaErrors(cudaDeviceSynchronize());
// if next layer is ACTV or SOFTMAX, complete that and come to synchronization
// the case in above if for ACTV and SOFTMAX never occurs
if (layer_type[i + 1] == SOFTMAX) {
i++;
if (train == true) {
layer_input[i + 1] = layer_input[i];
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, layer_input[i + 1]));
}
i--;
}
struct timespec start_time, end_time;
checkCudaErrors(cudaStreamSynchronize(stream_compute));
if (train)
clock_gettime(CLOCK_MONOTONIC, &start_time);
checkCudaErrors(cudaStreamSynchronize(stream_memory));
if (train) {
clock_gettime(CLOCK_MONOTONIC, &end_time);
float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6;
fwd_vdnn_lag.push_back(lag);
}
// std::cout << "EndSynchere" << i << std::endl;
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
}
if (to_offload[i] && train == true) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
}
if (train == false) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
}
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
// std::cout << "EndSynchere" << i << std::endl;
// ---------------------- vDNN end ------------------------
}
// std::cout << "here" << std::endl;
if (train == false) {
compareOutputCorrect(correct_count, y);
checkCNMEM(cnmemFree(layer_input[num_layers - 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[num_layers - 1] * data_type_size);
return;
}
*scalar_loss = computeLoss();
// ---------------------- vDNN start ----------------------
checkCNMEM(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN end ------------------------
if (layer_type[num_layers - 1] == SOFTMAX) {
// SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[num_layers - 1];
if (data_type == CUDNN_DATA_FLOAT) {
checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(float)));
softmaxLossBackProp<float><<<ceil(1.0 * batch_size / BW), BW>>>(this->y, (float *)layer_input[num_layers],
(float *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps);
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(double)));
softmaxLossBackProp<double><<<ceil(1.0 * batch_size / BW), BW>>>(this->y, (double *)layer_input[num_layers],
(double *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps);
}
}
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size;
void *cur_workspace;
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
}
else {
int layer_to_prefetch = findPrefetchLayer(i);
if (layer_to_prefetch != -1) {
checkCNMEM(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
if (layer_to_prefetch != 0) {
checkCudaErrors(cudaMemcpyAsync(layer_input[layer_to_prefetch], h_layer_input[layer_to_prefetch],
layer_input_size[layer_to_prefetch] * data_type_size, cudaMemcpyHostToDevice, stream_memory));
}
else {
// std::cout << "transfer here\n";
checkCudaErrors(cudaMemcpyAsync(layer_input[layer_to_prefetch], X,
layer_input_size[layer_to_prefetch] * data_type_size, cudaMemcpyHostToDevice, stream_memory));
// std::cout << "transfer here\n";
}
}
checkCNMEM(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size);
}
// std::cout << "Free bytes: " << free_bytes << std::endl;
}
// ---------------------- vDNN end ------------------------
if (layer_type[i] == CONV) {
// std::cout << "here\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
// std::cout << "bwd cur_workspace_size: " << cur_workspace_size << std::endl;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handle, &alpha,
cur_params->output_tensor, dlayer_input[i + 1],
&beta,
cur_params->bias_desc, cur_params->db));
// std::cout << "neural_net: backward conv i:" << i << std::endl;
checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handle, &alpha,
cur_params->input_tensor, layer_input[i],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->conv_desc, cur_params->bwd_filter_algo,
cur_workspace, cur_workspace_size,
&beta,
cur_params->filter_desc,
cur_params->dW));
if (i > 0)
checkCUDNN(cudnnConvolutionBackwardData(cudnn_handle, &alpha,
cur_params->filter_desc, cur_params->W,
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->conv_desc, cur_params->bwd_data_algo,
cur_workspace, cur_workspace_size,
&beta,
cur_params->input_tensor, dlayer_input[i]));
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// std::cout << "here\n";
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
if (!pre_alloc_fc_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
}
if (data_type == CUDNN_DATA_FLOAT) {
// bias backward
checkCUBLAS(cublasSgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, 1, batch_size,
&Salpha,
(float *)dlayer_input[i + 1], cur_params->C_out,
(float *)one_vec, batch_size,
&Sbeta,
(float *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(cublasSgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_T,
cur_params->C_out, cur_params->C_in, batch_size,
&Salpha,
(float *)dlayer_input[i + 1], cur_params->C_out,
(float *)layer_input[i], cur_params->C_in,
&Sbeta,
(float *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(cublasSgemm(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Salpha,
(float *)cur_params->W, cur_params->C_out,
(float *)dlayer_input[i + 1], cur_params->C_out,
&Sbeta,
(float *)dlayer_input[i], cur_params->C_in));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
// bias backward
checkCUBLAS(cublasDgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, 1, batch_size,
&Dalpha,
(double *)dlayer_input[i + 1], cur_params->C_out,
(double *)one_vec, batch_size,
&Dbeta,
(double *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(cublasDgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_T,
cur_params->C_out, cur_params->C_in, batch_size,
&Dalpha,
(double *)dlayer_input[i + 1], cur_params->C_out,
(double *)layer_input[i], cur_params->C_in,
&Dbeta,
(double *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(cublasDgemm(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Dalpha,
(double *)cur_params->W, cur_params->C_out,
(double *)dlayer_input[i + 1], cur_params->C_out,
&Dbeta,
(double *)dlayer_input[i], cur_params->C_in));
}
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == DROPOUT) {
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutBackward(cudnn_handle, cur_params->dropout_desc,
cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, dlayer_input[i],
cur_params->reserved_space, cur_params->reserved_space_size));
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
}
checkCUDNN(cudnnBatchNormalizationBackward(cudnn_handle, cur_params->mode,
&alpha, &beta,
&alpha, &beta,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, dlayer_input[i],
cur_params->sbmv_desc, cur_params->scale,
cur_params->dscale, cur_params->dbias,
cur_params->epsilon,
cur_params->result_save_mean, cur_params->result_save_inv_var));
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == POOLING) {
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, dlayer_input[i]));
}
else if (layer_type[i] == ACTV) {
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->input_tensor, layer_input[i + 1],
cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, dlayer_input[i]));
continue;
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxBackward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i + 1],
cur_params->input_tensor, dlayer_input[i + 1],
&beta,
cur_params->input_tensor, dlayer_input[i]));
// std::cout << "compute here\n";
continue;
}
// ---------------------- vDNN start ----------------------
// checkCudaErrors(cudaDeviceSynchronize());
struct timespec start_time, end_time;
checkCudaErrors(cudaStreamSynchronize(stream_compute));
if (train)
clock_gettime(CLOCK_MONOTONIC, &start_time);
checkCudaErrors(cudaStreamSynchronize(stream_memory));
if (train) {
clock_gettime(CLOCK_MONOTONIC, &end_time);
float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6;
bwd_vdnn_lag.insert(bwd_vdnn_lag.begin(), lag);
}
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
}
}
else if (layer_type[i] == BATCHNORM) {
if (train == true and !pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
}
}
checkCNMEM(cnmemFree(layer_input[i + 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
if (i == 0) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
}
// ---------------------- vDNN end ------------------------
}
if (space_tracker.getConsumed() != 0) {
std::cout << "Panic!! Space not updated properly\n";
}
// exit(0);
}
int NeuralNet::findPrefetchLayer(int cur_layer) {
for (int i = cur_layer - 1; i >= 0; i--) {
if (to_offload[i] && !prefetched[i]) {
prefetched[i] = true;
return i;
}
else if (layer_type[i] == CONV) {
return -1;
}
}
return -1;
}
|
c4bc38bc853a20af904445ce14bf10bd7d41e3c7.hip | // !!! This is a file automatically generated by hipify!!!
/*
#include "PlaceSDPrimitive.cuh"
inline SDModification*
PlaceSDPrimitive::copyToDevice()
{
PlaceSDPrimitive* deviceMod;
hipMalloc((void **)&deviceMod, sizeof(PlaceSDPrimitive));
hipMemcpy(deviceMod, this, sizeof(PlaceSDPrimitive), hipMemcpyHostToDevice);
return deviceMod;
}
inline float
PlaceSDPrimitive::modify(float originalDistance, float modifierDistance)
{
fminf(originalDistance, modifierDistance);
}
*/ | c4bc38bc853a20af904445ce14bf10bd7d41e3c7.cu | /*
#include "PlaceSDPrimitive.cuh"
inline SDModification*
PlaceSDPrimitive::copyToDevice()
{
PlaceSDPrimitive* deviceMod;
cudaMalloc((void **)&deviceMod, sizeof(PlaceSDPrimitive));
cudaMemcpy(deviceMod, this, sizeof(PlaceSDPrimitive), cudaMemcpyHostToDevice);
return deviceMod;
}
inline float
PlaceSDPrimitive::modify(float originalDistance, float modifierDistance)
{
fminf(originalDistance, modifierDistance);
}
*/ |
cc666fc7d0b616cd0cb1ecf27cc210f30b5d3160.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Computes a step in the integration changing only p */
__global__ void step_type2(long n, double a, double *r_gpu, double *p_gpu, double *f_gpu)
{
long tid;
tid=threadIdx.x+blockIdx.x*blockDim.x;
while (tid<n)
{
p_gpu[tid]=p_gpu[tid]+a*f_gpu[tid];
tid+=blockDim.x*gridDim.x;
}
return;
}
| cc666fc7d0b616cd0cb1ecf27cc210f30b5d3160.cu | /* Computes a step in the integration changing only p */
__global__ void step_type2(long n, double a, double *r_gpu, double *p_gpu, double *f_gpu)
{
long tid;
tid=threadIdx.x+blockIdx.x*blockDim.x;
while (tid<n)
{
p_gpu[tid]=p_gpu[tid]+a*f_gpu[tid];
tid+=blockDim.x*gridDim.x;
}
return;
}
|
d4aa391b88ef75a08d865e2f36c6d8bd8c7bc1ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorSort.cuh"
void THCudaLongTensor_fillSliceWithIndex(THCState* state,
THCudaLongTensor* t,
int dim) {
int64_t dims = THCudaLongTensor__nDimension(state, t);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
ptrdiff_t inElements = THCudaLongTensor_nElement(state, t);
int64_t sliceSize = THCudaLongTensor_size(state, t, dim);
ptrdiff_t numSlices = inElements / sliceSize;
dim3 grid;
if (!THC_getGridFromTiles(numSlices, grid)) {
THError("Slice to fill with indices is too large");
}
int64_t maxThreads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
int64_t numThreads = sliceSize;
if (numThreads > maxThreads) {
numThreads = maxThreads;
}
dim3 block(numThreads);
#define FILL_INDEX(T, DIM) \
hipLaunchKernelGGL(( fillSliceWithIndex<T, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
info, numSlices, sliceSize, info.strides[collapseDim])
if (THCTensor_canUse32BitIndexMath(state, t)) {
TensorInfo<int64_t, uint32_t> info =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, t);
info.reduceDim(dim);
int collapseDim = info.collapseDims(dim);
if (info.isContiguous()) {
FILL_INDEX(unsigned int, -2);
} else {
if (info.dims == 1) {
FILL_INDEX(unsigned int, 1);
} else if (info.dims == 2) {
FILL_INDEX(unsigned int, 2);
} else {
FILL_INDEX(unsigned int, -1);
}
}
} else {
TensorInfo<int64_t, uint64_t> info =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, t);
info.reduceDim(dim);
int collapseDim = info.collapseDims(dim);
// catch-all implementation
FILL_INDEX(uint64_t, -1);
}
#undef FILL_INDEX
THCudaCheck(hipGetLastError());
}
| d4aa391b88ef75a08d865e2f36c6d8bd8c7bc1ad.cu | #include "THCTensorSort.cuh"
void THCudaLongTensor_fillSliceWithIndex(THCState* state,
THCudaLongTensor* t,
int dim) {
int64_t dims = THCudaLongTensor__nDimension(state, t);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
ptrdiff_t inElements = THCudaLongTensor_nElement(state, t);
int64_t sliceSize = THCudaLongTensor_size(state, t, dim);
ptrdiff_t numSlices = inElements / sliceSize;
dim3 grid;
if (!THC_getGridFromTiles(numSlices, grid)) {
THError("Slice to fill with indices is too large");
}
int64_t maxThreads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
int64_t numThreads = sliceSize;
if (numThreads > maxThreads) {
numThreads = maxThreads;
}
dim3 block(numThreads);
#define FILL_INDEX(T, DIM) \
fillSliceWithIndex<T, DIM> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
info, numSlices, sliceSize, info.strides[collapseDim])
if (THCTensor_canUse32BitIndexMath(state, t)) {
TensorInfo<int64_t, uint32_t> info =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, t);
info.reduceDim(dim);
int collapseDim = info.collapseDims(dim);
if (info.isContiguous()) {
FILL_INDEX(unsigned int, -2);
} else {
if (info.dims == 1) {
FILL_INDEX(unsigned int, 1);
} else if (info.dims == 2) {
FILL_INDEX(unsigned int, 2);
} else {
FILL_INDEX(unsigned int, -1);
}
}
} else {
TensorInfo<int64_t, uint64_t> info =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, t);
info.reduceDim(dim);
int collapseDim = info.collapseDims(dim);
// catch-all implementation
FILL_INDEX(uint64_t, -1);
}
#undef FILL_INDEX
THCudaCheck(cudaGetLastError());
}
|
82374c241527ac104da36c00ad3636f1ebb8ea68.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include <hip/hip_runtime.h>
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000
#include "oneflow/core/device/cuda_pseudo_bfloat16.h"
namespace oneflow {
namespace user_op {
namespace {
template<typename T>
__global__ void ReluForwardGpu(int64_t n, const T* in, T* out) {
const T zero = static_cast<T>(0.0);
CUDA_1D_KERNEL_LOOP(i, n) {
const T in_i = in[i];
T out_i = zero;
if (in_i > zero) { out_i = in_i; }
out[i] = out_i;
}
}
template<typename T>
__global__ void ReluBackwardGpu(int64_t n, const T* y, const T* dy, T* dx) {
const T zero = static_cast<T>(0.0);
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = y[i] > zero ? dy[i] : zero; }
}
} // namespace
class ReluNvBFloat16Kernel final : public OpKernel {
public:
ReluNvBFloat16Kernel() = default;
~ReluNvBFloat16Kernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(KernelComputeContext* ctx) const override {
const Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const int64_t n = in->shape().elem_cnt();
hipLaunchKernelGGL(( ReluForwardGpu<nv_bfloat16>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
n, reinterpret_cast<const nv_bfloat16*>(in->dptr()),
reinterpret_cast<nv_bfloat16*>(out->mut_dptr()));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
class ReluGradNvBFloat16Kernel final : public OpKernel {
public:
ReluGradNvBFloat16Kernel() = default;
~ReluGradNvBFloat16Kernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(KernelComputeContext* ctx) const override {
const Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const int64_t n = y->shape().elem_cnt();
hipLaunchKernelGGL(( ReluBackwardGpu<nv_bfloat16>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
n, reinterpret_cast<const nv_bfloat16*>(y->dptr()),
reinterpret_cast<const nv_bfloat16*>(dy->dptr()),
reinterpret_cast<nv_bfloat16*>(dx->mut_dptr()));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("relu")
.SetCreateFn<ReluNvBFloat16Kernel>()
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu")
& (user_op::HobDataType("out", 0) == DataType::kBFloat16))
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true));
return Maybe<void>::Ok();
});
REGISTER_USER_KERNEL("relu_grad")
.SetCreateFn<ReluGradNvBFloat16Kernel>()
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu")
& (user_op::HobDataType("dx", 0) == DataType::kBFloat16))
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "dy", 0, true));
return Maybe<void>::Ok();
});
} // namespace user_op
} // namespace oneflow
#endif // defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000
| 82374c241527ac104da36c00ad3636f1ebb8ea68.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include <cuda.h>
#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000
#include "oneflow/core/device/cuda_pseudo_bfloat16.h"
namespace oneflow {
namespace user_op {
namespace {
template<typename T>
__global__ void ReluForwardGpu(int64_t n, const T* in, T* out) {
const T zero = static_cast<T>(0.0);
CUDA_1D_KERNEL_LOOP(i, n) {
const T in_i = in[i];
T out_i = zero;
if (in_i > zero) { out_i = in_i; }
out[i] = out_i;
}
}
template<typename T>
__global__ void ReluBackwardGpu(int64_t n, const T* y, const T* dy, T* dx) {
const T zero = static_cast<T>(0.0);
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = y[i] > zero ? dy[i] : zero; }
}
} // namespace
class ReluNvBFloat16Kernel final : public OpKernel {
public:
ReluNvBFloat16Kernel() = default;
~ReluNvBFloat16Kernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(KernelComputeContext* ctx) const override {
const Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const int64_t n = in->shape().elem_cnt();
ReluForwardGpu<nv_bfloat16>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
n, reinterpret_cast<const nv_bfloat16*>(in->dptr()),
reinterpret_cast<nv_bfloat16*>(out->mut_dptr()));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
class ReluGradNvBFloat16Kernel final : public OpKernel {
public:
ReluGradNvBFloat16Kernel() = default;
~ReluGradNvBFloat16Kernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(KernelComputeContext* ctx) const override {
const Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const int64_t n = y->shape().elem_cnt();
ReluBackwardGpu<nv_bfloat16>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
n, reinterpret_cast<const nv_bfloat16*>(y->dptr()),
reinterpret_cast<const nv_bfloat16*>(dy->dptr()),
reinterpret_cast<nv_bfloat16*>(dx->mut_dptr()));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("relu")
.SetCreateFn<ReluNvBFloat16Kernel>()
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu")
& (user_op::HobDataType("out", 0) == DataType::kBFloat16))
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true));
return Maybe<void>::Ok();
});
REGISTER_USER_KERNEL("relu_grad")
.SetCreateFn<ReluGradNvBFloat16Kernel>()
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu")
& (user_op::HobDataType("dx", 0) == DataType::kBFloat16))
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "dy", 0, true));
return Maybe<void>::Ok();
});
} // namespace user_op
} // namespace oneflow
#endif // defined(CUDA_VERSION) && CUDA_VERSION >= 11000
|
34a68bf3676b307cadee2e036fd8c1d5a1068b5c.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
// Buffer to ping-pong positions
glm::vec3 *dev_pos2;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray << <fullBlocksPerGrid, blockSize >> >(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipMalloc((void**)&dev_pos2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos2 failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 thisPos = pos[iSelf];
glm::vec3 thisVel = vel[iSelf];
glm::vec3 centroid(0.0f, 0.0f, 0.0f);
int neighborCount = 0;
glm::vec3 separate(0.0f, 0.0f, 0.0f);
glm::vec3 cohesion(0.0f, 0.0f, 0.0f);
for (int x = 0; x < N; x++){
if (x == iSelf) continue;
float distance = glm::distance(thisPos, pos[x]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance){
centroid += pos[x];
neighborCount += 1;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance){
separate -= pos[x] - thisPos;
}
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance){
cohesion += vel[x];
}
}
if (neighborCount > 0){
centroid /= neighborCount;
thisVel += (centroid - thisPos) * rule1Scale;
thisVel += cohesion * rule3Scale;
}
thisVel += separate * rule2Scale;
return thisVel;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 newVel = computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
if (glm::length(newVel) > maxSpeed) newVel = newVel * maxSpeed / glm::length(newVel);
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = newVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
// Pointer to actual
indices[index] = index;
// Find grid cell pointer
glm::vec3 thisPosInCells = (pos[index] - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D((int)thisPosInCells[0], (int)thisPosInCells[1], (int)thisPosInCells[2], gridResolution);
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
// 2.3 - Need to arrange the pos and vel arrays to make the particle array
__global__ void kernMakePosAndVelCoherent(int N, int *particleArrayIndices,
glm::vec3 *pos1, glm::vec3 *pos2, glm::vec3 *vel1, glm::vec3 *vel2){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
int oldIndex = particleArrayIndices[index];
pos2[index] = pos1[oldIndex];
vel2[index] = vel1[oldIndex];
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// Get the grid index for this thread
int thisGridIndex = particleGridIndices[index];
// If this is the last grid index, it must be an end point. Similarly, if this is
// the first grid index, it must be a start point.
//if (index == N) {
// gridCellEndIndices[thisGridIndex] = index + 1;
//}
//else {
// // If it is not the last grid index, check its value against the next grid index.
// int theNextGridIndex = particleGridIndices[index + 1];
// if ((theNextGridIndex - thisGridIndex) > 0){
// gridCellEndIndices[thisGridIndex] = index + 1;
// }
//}
if (index == 0) {
gridCellStartIndices[thisGridIndex] = index;
}
int theNextGridIndex = particleGridIndices[index + 1];
if ((theNextGridIndex - thisGridIndex) > 0){
gridCellStartIndices[thisGridIndex + 1] = index;
gridCellEndIndices[thisGridIndex] = index + 1;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
// - Identify the grid cell that this particle is in
glm::vec3 thisPos = pos[index];
glm::vec3 thisPosInCells = (thisPos - gridMin) * inverseCellWidth;
int cellX = (int)thisPosInCells[0];
int cellY = (int)thisPosInCells[1];
int cellZ = (int)thisPosInCells[2];
int thisGridCell = gridIndex3Dto1D(cellX, cellY, cellZ, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
float maxNeighborDistance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
// Use integer flags to determine whether we need to look at +/- cell in X,Y,Z. Do this
// by checking the difference in cell index values when adding the max neighbor distance to the
// start point. If this results in no change, subtract the distance instead.
// -1 means look at minus, 0 means no change, 1 means look at plus
//int deltaX = cellX - (int)((thisPos[0] + maxNeighborDistance - gridMin[0]) * inverseCellWidth);
//int deltaY = cellY - (int)((thisPos[1] + maxNeighborDistance - gridMin[1]) * inverseCellWidth);
//int deltaZ = cellZ - (int)((thisPos[2] + maxNeighborDistance - gridMin[2]) * inverseCellWidth);
//if (deltaX == 0) deltaX = cellX - (int)((thisPos[0] - maxNeighborDistance - gridMin[0]) * inverseCellWidth);
//if (deltaY == 0) deltaY = cellY - (int)((thisPos[1] - maxNeighborDistance - gridMin[1]) * inverseCellWidth);
//if (deltaZ == 0) deltaZ = cellZ - (int)((thisPos[2] - maxNeighborDistance - gridMin[2]) * inverseCellWidth);
glm::vec3 shiftedPos = thisPos - (gridMin + cellWidth * glm::vec3(cellX, cellY, cellZ));
int deltaX = -1;
int deltaY = -1;
int deltaZ = -1;
if (shiftedPos[0] > cellWidth / 2) deltaX = 1;
if (shiftedPos[1] > cellWidth / 2) deltaY = 1;
if (shiftedPos[2] > cellWidth / 2) deltaZ = 1;
// Initialize an length 8 array with -1. We store the possible cells to check here. When looping, we
// can as soon as we reach a -1 since that means no new entries are beyond that point.
//int cellsToCheck[8] = { -1, -1, -1, -1, -1, -1, -1, -1 };
//int c2cIdx = 0;
//for (int x = 0; x < 2; x++){
// for (int y = 0; y < 2; y++){
// for (int z = 0; z < 2; z++){
// cellsToCheck[c2cIdx] = gridIndex3Dto1D(cellX + x * deltaX, cellY + y * deltaY, cellZ + z * deltaZ, gridResolution);
// c2cIdx++;
// }
// }
//}
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 thisVel = vel1[index];
int neighborCount = 0;
glm::vec3 centroid(0.0f, 0.0f, 0.0f);
glm::vec3 separate(0.0f, 0.0f, 0.0f);
glm::vec3 cohesion(0.0f, 0.0f, 0.0f);
for (int x = 0; x < 2; x++){
for (int y = 0; y < 2; y++){
for (int z = 0; z < 2; z++){
int gridCellToCheck = gridIndex3Dto1D(cellX + x * deltaX, cellY + y * deltaY, cellZ + z * deltaZ, gridResolution);
//c2cIdx++;
//for (int x = 0; x < 8; x++){
//if (gridCellToCheck == -1) break;
if (gridCellToCheck > gridResolution*gridResolution*gridResolution) continue;
if (gridCellStartIndices[gridCellToCheck] == -1) continue; // We set all values to -1 beforehand. If it is not changed, it is empty.
for (int y = gridCellStartIndices[gridCellToCheck]; y < gridCellEndIndices[gridCellToCheck]; y++){
int otherBoid = particleArrayIndices[y];
float distance = glm::distance(thisPos, pos[otherBoid]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance){
centroid += pos[otherBoid];
neighborCount += 1;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance){
separate -= pos[otherBoid] - thisPos;
}
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance){
cohesion += vel1[otherBoid];
}
}
//}
}
}
}
if (neighborCount > 0){
centroid /= neighborCount;
thisVel += (centroid - thisPos) * rule1Scale;
thisVel += cohesion * rule3Scale;
}
thisVel += separate * rule2Scale;
// - Clamp the speed change before putting the new speed in vel2
if (glm::length(thisVel) > maxSpeed) thisVel = thisVel * maxSpeed / glm::length(thisVel);
vel2[index] = thisVel;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
// - Identify the grid cell that this particle is in
glm::vec3 thisPos = pos[index];
glm::vec3 thisPosInCells = (thisPos - gridMin) * inverseCellWidth;
int cellX = (int)thisPosInCells[0];
int cellY = (int)thisPosInCells[1];
int cellZ = (int)thisPosInCells[2];
int thisGridCell = gridIndex3Dto1D(cellX, cellY, cellZ, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
float maxNeighborDistance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
// Use integer flags to determine whether we need to look at +/- cell in X,Y,Z. Do this
// by checking the difference in cell index values when adding the max neighbor distance to the
// start point. If this results in no change, subtract the distance instead.
// -1 means look at minus, 0 means no change, 1 means look at plus
//int deltaX = cellX - (int)((thisPos[0] + maxNeighborDistance - gridMin[0]) * inverseCellWidth);
//int deltaY = cellY - (int)((thisPos[1] + maxNeighborDistance - gridMin[1]) * inverseCellWidth);
//int deltaZ = cellZ - (int)((thisPos[2] + maxNeighborDistance - gridMin[2]) * inverseCellWidth);
//if (deltaX == 0) deltaX = cellX - (int)((thisPos[0] - maxNeighborDistance - gridMin[0]) * inverseCellWidth);
//if (deltaY == 0) deltaY = cellY - (int)((thisPos[1] - maxNeighborDistance - gridMin[1]) * inverseCellWidth);
//if (deltaZ == 0) deltaZ = cellZ - (int)((thisPos[2] - maxNeighborDistance - gridMin[2]) * inverseCellWidth);
glm::vec3 shiftedPos = thisPos - (gridMin + cellWidth * glm::vec3(cellX, cellY, cellZ));
int deltaX = -1;
int deltaY = -1;
int deltaZ = -1;
if (shiftedPos[0] > cellWidth / 2) deltaX = 1;
if (shiftedPos[1] > cellWidth / 2) deltaY = 1;
if (shiftedPos[2] > cellWidth / 2) deltaZ = 1;
// Initialize an length 8 array with -1. We store the possible cells to check here. When looping, we
// can as soon as we reach a -1 since that means no new entries are beyond that point.
//int cellsToCheck[8] = { -1, -1, -1, -1, -1, -1, -1, -1 };
//int c2cIdx = 0;
//for (int x = 0; x < 2; x++){
// for (int y = 0; y < 2; y++){
// for (int z = 0; z < 2; z++){
// cellsToCheck[c2cIdx] = gridIndex3Dto1D(cellX + x * deltaX, cellY + y * deltaY, cellZ + z * deltaZ, gridResolution);
// c2cIdx++;
// }
// }
//}
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 thisVel = vel1[index];
int neighborCount = 0;
glm::vec3 centroid(0.0f, 0.0f, 0.0f);
glm::vec3 separate(0.0f, 0.0f, 0.0f);
glm::vec3 cohesion(0.0f, 0.0f, 0.0f);
for (int x = 0; x < 2; x++){
for (int y = 0; y < 2; y++){
for (int z = 0; z < 2; z++){
int gridCellToCheck = gridIndex3Dto1D(cellX + x * deltaX, cellY + y * deltaY, cellZ + z * deltaZ, gridResolution);
//c2cIdx++;
//for (int x = 0; x < 8; x++){
//if (gridCellToCheck == -1) break;
if (gridCellToCheck > gridResolution*gridResolution*gridResolution) continue;
if (gridCellStartIndices[gridCellToCheck] == -1) continue; // We set all values to -1 beforehand. If it is not changed, it is empty.
for (int y = gridCellStartIndices[gridCellToCheck]; y < gridCellEndIndices[gridCellToCheck]; y++){
int otherBoid = y;
float distance = glm::distance(thisPos, pos[otherBoid]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance){
centroid += pos[otherBoid];
neighborCount += 1;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance){
separate -= pos[otherBoid] - thisPos;
}
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance){
cohesion += vel1[otherBoid];
}
}
//}
}
}
}
if (neighborCount > 0){
centroid /= neighborCount;
thisVel += (centroid - thisPos) * rule1Scale;
thisVel += cohesion * rule3Scale;
}
thisVel += separate * rule2Scale;
// - Clamp the speed change before putting the new speed in vel2
if (glm::length(thisVel) > maxSpeed) thisVel = thisVel * maxSpeed / glm::length(thisVel);
vel2[index] = thisVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Update velocity
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, dev_vel1, dev_vel2);
// Update position
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
// TODO-1.2 ping-pong the velocity buffers
glm::vec3 *temp_vel = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp_vel;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// Set all values to -1 first for easy empty cell identification
dim3 fullBlocksPerGridForCells((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> >(
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed
glm::vec3 *temp_vel = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp_vel;
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// Set all values to -1 first for easy empty cell identification
dim3 fullBlocksPerGridForCells((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernMakePosAndVelCoherent << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleArrayIndices, dev_pos, dev_pos2, dev_vel1, dev_vel2);
glm::vec3 *temp = dev_pos;
dev_pos = dev_pos2;
dev_pos2 = temp;
temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> >(
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_vel2);
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed
temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_gridCellEndIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_pos2);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete(intKeys);
delete(intValues);
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| 34a68bf3676b307cadee2e036fd8c1d5a1068b5c.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
// Buffer to ping-pong positions
glm::vec3 *dev_pos2;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray << <fullBlocksPerGrid, blockSize >> >(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaMalloc((void**)&dev_pos2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos2 failed!");
cudaThreadSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaThreadSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 thisPos = pos[iSelf];
glm::vec3 thisVel = vel[iSelf];
glm::vec3 centroid(0.0f, 0.0f, 0.0f);
int neighborCount = 0;
glm::vec3 separate(0.0f, 0.0f, 0.0f);
glm::vec3 cohesion(0.0f, 0.0f, 0.0f);
for (int x = 0; x < N; x++){
if (x == iSelf) continue;
float distance = glm::distance(thisPos, pos[x]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance){
centroid += pos[x];
neighborCount += 1;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance){
separate -= pos[x] - thisPos;
}
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance){
cohesion += vel[x];
}
}
if (neighborCount > 0){
centroid /= neighborCount;
thisVel += (centroid - thisPos) * rule1Scale;
thisVel += cohesion * rule3Scale;
}
thisVel += separate * rule2Scale;
return thisVel;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 newVel = computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
if (glm::length(newVel) > maxSpeed) newVel = newVel * maxSpeed / glm::length(newVel);
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = newVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
// Pointer to actual
indices[index] = index;
// Find grid cell pointer
glm::vec3 thisPosInCells = (pos[index] - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D((int)thisPosInCells[0], (int)thisPosInCells[1], (int)thisPosInCells[2], gridResolution);
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
// 2.3 - Need to arrange the pos and vel arrays to make the particle array
__global__ void kernMakePosAndVelCoherent(int N, int *particleArrayIndices,
glm::vec3 *pos1, glm::vec3 *pos2, glm::vec3 *vel1, glm::vec3 *vel2){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
int oldIndex = particleArrayIndices[index];
pos2[index] = pos1[oldIndex];
vel2[index] = vel1[oldIndex];
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// Get the grid index for this thread
int thisGridIndex = particleGridIndices[index];
// If this is the last grid index, it must be an end point. Similarly, if this is
// the first grid index, it must be a start point.
//if (index == N) {
// gridCellEndIndices[thisGridIndex] = index + 1;
//}
//else {
// // If it is not the last grid index, check its value against the next grid index.
// int theNextGridIndex = particleGridIndices[index + 1];
// if ((theNextGridIndex - thisGridIndex) > 0){
// gridCellEndIndices[thisGridIndex] = index + 1;
// }
//}
if (index == 0) {
gridCellStartIndices[thisGridIndex] = index;
}
int theNextGridIndex = particleGridIndices[index + 1];
if ((theNextGridIndex - thisGridIndex) > 0){
gridCellStartIndices[thisGridIndex + 1] = index;
gridCellEndIndices[thisGridIndex] = index + 1;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
// - Identify the grid cell that this particle is in
glm::vec3 thisPos = pos[index];
glm::vec3 thisPosInCells = (thisPos - gridMin) * inverseCellWidth;
int cellX = (int)thisPosInCells[0];
int cellY = (int)thisPosInCells[1];
int cellZ = (int)thisPosInCells[2];
int thisGridCell = gridIndex3Dto1D(cellX, cellY, cellZ, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
float maxNeighborDistance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
// Use integer flags to determine whether we need to look at +/- cell in X,Y,Z. Do this
// by checking the difference in cell index values when adding the max neighbor distance to the
// start point. If this results in no change, subtract the distance instead.
// -1 means look at minus, 0 means no change, 1 means look at plus
//int deltaX = cellX - (int)((thisPos[0] + maxNeighborDistance - gridMin[0]) * inverseCellWidth);
//int deltaY = cellY - (int)((thisPos[1] + maxNeighborDistance - gridMin[1]) * inverseCellWidth);
//int deltaZ = cellZ - (int)((thisPos[2] + maxNeighborDistance - gridMin[2]) * inverseCellWidth);
//if (deltaX == 0) deltaX = cellX - (int)((thisPos[0] - maxNeighborDistance - gridMin[0]) * inverseCellWidth);
//if (deltaY == 0) deltaY = cellY - (int)((thisPos[1] - maxNeighborDistance - gridMin[1]) * inverseCellWidth);
//if (deltaZ == 0) deltaZ = cellZ - (int)((thisPos[2] - maxNeighborDistance - gridMin[2]) * inverseCellWidth);
glm::vec3 shiftedPos = thisPos - (gridMin + cellWidth * glm::vec3(cellX, cellY, cellZ));
int deltaX = -1;
int deltaY = -1;
int deltaZ = -1;
if (shiftedPos[0] > cellWidth / 2) deltaX = 1;
if (shiftedPos[1] > cellWidth / 2) deltaY = 1;
if (shiftedPos[2] > cellWidth / 2) deltaZ = 1;
// Initialize an length 8 array with -1. We store the possible cells to check here. When looping, we
// can as soon as we reach a -1 since that means no new entries are beyond that point.
//int cellsToCheck[8] = { -1, -1, -1, -1, -1, -1, -1, -1 };
//int c2cIdx = 0;
//for (int x = 0; x < 2; x++){
// for (int y = 0; y < 2; y++){
// for (int z = 0; z < 2; z++){
// cellsToCheck[c2cIdx] = gridIndex3Dto1D(cellX + x * deltaX, cellY + y * deltaY, cellZ + z * deltaZ, gridResolution);
// c2cIdx++;
// }
// }
//}
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 thisVel = vel1[index];
int neighborCount = 0;
glm::vec3 centroid(0.0f, 0.0f, 0.0f);
glm::vec3 separate(0.0f, 0.0f, 0.0f);
glm::vec3 cohesion(0.0f, 0.0f, 0.0f);
for (int x = 0; x < 2; x++){
for (int y = 0; y < 2; y++){
for (int z = 0; z < 2; z++){
int gridCellToCheck = gridIndex3Dto1D(cellX + x * deltaX, cellY + y * deltaY, cellZ + z * deltaZ, gridResolution);
//c2cIdx++;
//for (int x = 0; x < 8; x++){
//if (gridCellToCheck == -1) break;
if (gridCellToCheck > gridResolution*gridResolution*gridResolution) continue;
if (gridCellStartIndices[gridCellToCheck] == -1) continue; // We set all values to -1 beforehand. If it is not changed, it is empty.
for (int y = gridCellStartIndices[gridCellToCheck]; y < gridCellEndIndices[gridCellToCheck]; y++){
int otherBoid = particleArrayIndices[y];
float distance = glm::distance(thisPos, pos[otherBoid]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance){
centroid += pos[otherBoid];
neighborCount += 1;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance){
separate -= pos[otherBoid] - thisPos;
}
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance){
cohesion += vel1[otherBoid];
}
}
//}
}
}
}
if (neighborCount > 0){
centroid /= neighborCount;
thisVel += (centroid - thisPos) * rule1Scale;
thisVel += cohesion * rule3Scale;
}
thisVel += separate * rule2Scale;
// - Clamp the speed change before putting the new speed in vel2
if (glm::length(thisVel) > maxSpeed) thisVel = thisVel * maxSpeed / glm::length(thisVel);
vel2[index] = thisVel;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
// - Identify the grid cell that this particle is in
glm::vec3 thisPos = pos[index];
glm::vec3 thisPosInCells = (thisPos - gridMin) * inverseCellWidth;
int cellX = (int)thisPosInCells[0];
int cellY = (int)thisPosInCells[1];
int cellZ = (int)thisPosInCells[2];
int thisGridCell = gridIndex3Dto1D(cellX, cellY, cellZ, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
float maxNeighborDistance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
// Use integer flags to determine whether we need to look at +/- cell in X,Y,Z. Do this
// by checking the difference in cell index values when adding the max neighbor distance to the
// start point. If this results in no change, subtract the distance instead.
// -1 means look at minus, 0 means no change, 1 means look at plus
//int deltaX = cellX - (int)((thisPos[0] + maxNeighborDistance - gridMin[0]) * inverseCellWidth);
//int deltaY = cellY - (int)((thisPos[1] + maxNeighborDistance - gridMin[1]) * inverseCellWidth);
//int deltaZ = cellZ - (int)((thisPos[2] + maxNeighborDistance - gridMin[2]) * inverseCellWidth);
//if (deltaX == 0) deltaX = cellX - (int)((thisPos[0] - maxNeighborDistance - gridMin[0]) * inverseCellWidth);
//if (deltaY == 0) deltaY = cellY - (int)((thisPos[1] - maxNeighborDistance - gridMin[1]) * inverseCellWidth);
//if (deltaZ == 0) deltaZ = cellZ - (int)((thisPos[2] - maxNeighborDistance - gridMin[2]) * inverseCellWidth);
glm::vec3 shiftedPos = thisPos - (gridMin + cellWidth * glm::vec3(cellX, cellY, cellZ));
int deltaX = -1;
int deltaY = -1;
int deltaZ = -1;
if (shiftedPos[0] > cellWidth / 2) deltaX = 1;
if (shiftedPos[1] > cellWidth / 2) deltaY = 1;
if (shiftedPos[2] > cellWidth / 2) deltaZ = 1;
// Initialize an length 8 array with -1. We store the possible cells to check here. When looping, we
// can as soon as we reach a -1 since that means no new entries are beyond that point.
//int cellsToCheck[8] = { -1, -1, -1, -1, -1, -1, -1, -1 };
//int c2cIdx = 0;
//for (int x = 0; x < 2; x++){
// for (int y = 0; y < 2; y++){
// for (int z = 0; z < 2; z++){
// cellsToCheck[c2cIdx] = gridIndex3Dto1D(cellX + x * deltaX, cellY + y * deltaY, cellZ + z * deltaZ, gridResolution);
// c2cIdx++;
// }
// }
//}
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 thisVel = vel1[index];
int neighborCount = 0;
glm::vec3 centroid(0.0f, 0.0f, 0.0f);
glm::vec3 separate(0.0f, 0.0f, 0.0f);
glm::vec3 cohesion(0.0f, 0.0f, 0.0f);
for (int x = 0; x < 2; x++){
for (int y = 0; y < 2; y++){
for (int z = 0; z < 2; z++){
int gridCellToCheck = gridIndex3Dto1D(cellX + x * deltaX, cellY + y * deltaY, cellZ + z * deltaZ, gridResolution);
//c2cIdx++;
//for (int x = 0; x < 8; x++){
//if (gridCellToCheck == -1) break;
if (gridCellToCheck > gridResolution*gridResolution*gridResolution) continue;
if (gridCellStartIndices[gridCellToCheck] == -1) continue; // We set all values to -1 beforehand. If it is not changed, it is empty.
for (int y = gridCellStartIndices[gridCellToCheck]; y < gridCellEndIndices[gridCellToCheck]; y++){
int otherBoid = y;
float distance = glm::distance(thisPos, pos[otherBoid]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance){
centroid += pos[otherBoid];
neighborCount += 1;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance){
separate -= pos[otherBoid] - thisPos;
}
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance){
cohesion += vel1[otherBoid];
}
}
//}
}
}
}
if (neighborCount > 0){
centroid /= neighborCount;
thisVel += (centroid - thisPos) * rule1Scale;
thisVel += cohesion * rule3Scale;
}
thisVel += separate * rule2Scale;
// - Clamp the speed change before putting the new speed in vel2
if (glm::length(thisVel) > maxSpeed) thisVel = thisVel * maxSpeed / glm::length(thisVel);
vel2[index] = thisVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Update velocity
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, dev_vel1, dev_vel2);
// Update position
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
// TODO-1.2 ping-pong the velocity buffers
glm::vec3 *temp_vel = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp_vel;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// Set all values to -1 first for easy empty cell identification
dim3 fullBlocksPerGridForCells((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> >(
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed
glm::vec3 *temp_vel = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp_vel;
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// Set all values to -1 first for easy empty cell identification
dim3 fullBlocksPerGridForCells((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernMakePosAndVelCoherent << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleArrayIndices, dev_pos, dev_pos2, dev_vel1, dev_vel2);
glm::vec3 *temp = dev_pos;
dev_pos = dev_pos2;
dev_pos2 = temp;
temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> >(
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_vel2);
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed
temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_pos2);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete(intKeys);
delete(intValues);
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
d36f341b0000f15490ca6acf6a3e07f213a9f2ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _SCAN_BEST_KERNEL_CU_
#define _SCAN_BEST_KERNEL_CU_
// Define this to more rigorously avoid bank conflicts,
// even at the lower (root) levels of the tree
// Note that due to the higher addressing overhead, performance
// is lower with ZERO_BANK_CONFLICTS enabled. It is provided
// as an example.
//#define ZERO_BANK_CONFLICTS
// 16 banks on G80
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
///////////////////////////////////////////////////////////////////////////////
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses
// n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS)
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf
//
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// excellent paper "Prefix sums and their applications".
// http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
template <bool isNP2>
__device__ void loadSharedChunkFromMem(float *s_data,
const float *g_idata,
int n, int baseIndex,
int& ai, int& bi,
int& mem_ai, int& mem_bi,
int& bankOffsetA, int& bankOffsetB)
{
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
// compute spacing to avoid bank conflicts
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
// pad values beyond n with zeros
s_data[ai + bankOffsetA] = g_idata[mem_ai];
if (isNP2) // compile-time decision
{
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
}
else
{
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2>
__device__ void storeSharedChunkToMem(float* g_odata,
const float* s_data,
int n,
int ai, int bi,
int mem_ai, int mem_bi,
int bankOffsetA, int bankOffsetB)
{
__syncthreads();
// write results to global memory
g_odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) // compile-time decision
{
if (bi < n)
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
else
{
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__ void clearLastElement(float* s_data,
float *g_blockSums,
int blockIndex)
{
if (threadIdx.x == 0)
{
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) // compile-time decision
{
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
// zero the last element in the scan so it will propagate back to the front
s_data[index] = 0;
}
}
__device__ unsigned int buildSum(float *s_data)
{
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ void scanRootToLeaves(float *s_data, unsigned int stride)
{
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2)
{
stride >>= 1;
__syncthreads();
if (thid < d)
{
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__ void prescanBlock(float *data, int blockIndex, float *blockSums)
{
int stride = buildSum(data); // build the sum in place up the tree
clearLastElement<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride); // traverse down tree to build the scan
}
template <bool storeSum, bool isNP2>
__global__ void prescan(float *g_odata,
const float *g_idata,
float *g_blockSums,
int n,
int blockIndex,
int baseIndex)
{
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ float s_data[];
// load data into shared memory
loadSharedChunkFromMem<isNP2>(s_data, g_idata, n,
(baseIndex == 0) ?
__mul24(blockIdx.x, (blockDim.x << 1)):baseIndex,
ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
// scan the data in each block
prescanBlock<storeSum>(s_data, blockIndex, g_blockSums);
// write results to device memory
storeSharedChunkToMem<isNP2>(g_odata, s_data, n,
ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
}
__global__ void uniformAdd(float *g_data,
float *uniforms,
int n,
int blockOffset,
int baseIndex)
{
__shared__ float uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
#endif // #ifndef _SCAN_BEST_KERNEL_CU_ | d36f341b0000f15490ca6acf6a3e07f213a9f2ea.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _SCAN_BEST_KERNEL_CU_
#define _SCAN_BEST_KERNEL_CU_
// Define this to more rigorously avoid bank conflicts,
// even at the lower (root) levels of the tree
// Note that due to the higher addressing overhead, performance
// is lower with ZERO_BANK_CONFLICTS enabled. It is provided
// as an example.
//#define ZERO_BANK_CONFLICTS
// 16 banks on G80
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
///////////////////////////////////////////////////////////////////////////////
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses
// n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS)
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf
//
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// excellent paper "Prefix sums and their applications".
// http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
template <bool isNP2>
__device__ void loadSharedChunkFromMem(float *s_data,
const float *g_idata,
int n, int baseIndex,
int& ai, int& bi,
int& mem_ai, int& mem_bi,
int& bankOffsetA, int& bankOffsetB)
{
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
// compute spacing to avoid bank conflicts
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
// pad values beyond n with zeros
s_data[ai + bankOffsetA] = g_idata[mem_ai];
if (isNP2) // compile-time decision
{
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
}
else
{
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2>
__device__ void storeSharedChunkToMem(float* g_odata,
const float* s_data,
int n,
int ai, int bi,
int mem_ai, int mem_bi,
int bankOffsetA, int bankOffsetB)
{
__syncthreads();
// write results to global memory
g_odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) // compile-time decision
{
if (bi < n)
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
else
{
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__ void clearLastElement(float* s_data,
float *g_blockSums,
int blockIndex)
{
if (threadIdx.x == 0)
{
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) // compile-time decision
{
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
// zero the last element in the scan so it will propagate back to the front
s_data[index] = 0;
}
}
__device__ unsigned int buildSum(float *s_data)
{
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ void scanRootToLeaves(float *s_data, unsigned int stride)
{
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2)
{
stride >>= 1;
__syncthreads();
if (thid < d)
{
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__ void prescanBlock(float *data, int blockIndex, float *blockSums)
{
int stride = buildSum(data); // build the sum in place up the tree
clearLastElement<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride); // traverse down tree to build the scan
}
template <bool storeSum, bool isNP2>
__global__ void prescan(float *g_odata,
const float *g_idata,
float *g_blockSums,
int n,
int blockIndex,
int baseIndex)
{
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ float s_data[];
// load data into shared memory
loadSharedChunkFromMem<isNP2>(s_data, g_idata, n,
(baseIndex == 0) ?
__mul24(blockIdx.x, (blockDim.x << 1)):baseIndex,
ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
// scan the data in each block
prescanBlock<storeSum>(s_data, blockIndex, g_blockSums);
// write results to device memory
storeSharedChunkToMem<isNP2>(g_odata, s_data, n,
ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
}
__global__ void uniformAdd(float *g_data,
float *uniforms,
int n,
int blockOffset,
int baseIndex)
{
__shared__ float uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
#endif // #ifndef _SCAN_BEST_KERNEL_CU_ |
4eb858cbaeb7c63548ee4507eb6c75453d1b91fe.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <hip/hip_runtime.h>
typedef unsigned int U4;
#ifdef USE_FP32
typedef float T_real;
#else
typedef double T_real;
#endif
#ifndef N_SERIES
#define N_SERIES ( U4 )( 1<<31 )
#endif
#define TRUE_PI "3.141592653589793238462643383279"
#define STEP ( T_real )( 1./N_SERIES )
static void HandleError( hipError_t err,
const char *file,
int line )
{
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#ifdef USE_ATOMIC
__global__ void calc_pi( T_real *pi_series ) {
U4 tid = threadIdx.x + blockIdx.x * blockDim.x;
T_real item;
__shared__ T_real pi_cache[NTHREADS];
pi_cache[threadIdx.x] = 0.;
// __syncthreads();
while( tid < N_SERIES ) {
item = ( tid + 0.5 ) * STEP;
item = 4. / (1. + item*item);
pi_cache[threadIdx.x] += item; // No need for atomicAdd here
tid += blockDim.x * gridDim.x;
}
atomicAdd( &pi_series[threadIdx.x], pi_cache[threadIdx.x] );
}
#else
__global__ void calc_pi( T_real *pi_series ) {
U4 tid = threadIdx.x + blockIdx.x * blockDim.x;
T_real item;
__shared__ T_real pi_cache[NTHREADS];
pi_cache[threadIdx.x] = 0.;
while( tid < N_SERIES ) {
item = ( tid + 0.5 ) * STEP;
item = 4. / (1. + item*item);
pi_cache[threadIdx.x] += item; // No need for atomicAdd here
tid += blockDim.x * gridDim.x;
}
__syncthreads();
// Reduction to NBLOCKS elements
int i = NTHREADS/2;
while( i ) {
if ( threadIdx.x < i ) {
pi_cache[threadIdx.x] += pi_cache[i + threadIdx.x];
}
__syncthreads();
i /= 2;
}
if (threadIdx.x == 0)
pi_series[blockIdx.x] += pi_cache[0];
}
#endif
int main ( int argc, char* argv[] ) {
U4 NTHREADS = std::stoi(argv[1]);
U4 NBLOCKS = std::stoi(argv[2]);
U4 size = NBLOCKS * sizeof( T_real );
T_real my_pi(0.);
float elapsed_time;
// Reduced PI series of NTHREADS elements
T_real *pi_series = ( T_real* )malloc( size );
T_real *dev_pi_series;
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_pi_series, size ) );
HANDLE_ERROR( hipMemset( dev_pi_series, 0., size ) );
dim3 dimGrid( NBLOCKS, 1, 1 );
dim3 dimBlocks( NTHREADS, 1, 1 );
hipLaunchKernelGGL(( calc_pi) , dim3(dimGrid), dim3(dimBlocks) , 0, 0, dev_pi_series );
HANDLE_ERROR( hipDeviceSynchronize() );
HANDLE_ERROR( hipMemcpy( pi_series, dev_pi_series, size, hipMemcpyDeviceToHost) );
// Further Reduction
for (int i=0; i<SIZE_SERIES; ++i) {
my_pi += pi_series[i];
}
my_pi *= STEP;
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
HANDLE_ERROR( hipEventElapsedTime( &elapsed_time, start, stop ) );
HANDLE_ERROR( hipFree( dev_pi_series ) );
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
free( pi_series );
printf("\nResult with 30 digits:\n");
printf("\n PI calculated = %1.30f\n", my_pi);
printf(" True PI = %s\n", TRUE_PI);
printf(" ");
for (int i=1; i!=15; ++i)
printf(" ");
printf("^\n");
printf("================================================\n\n");
printf("Run with configuration: \n");
printf(" N thread blocks = %12u\n", NBLOCKS);
printf("N threads per block = %12u\n", NTHREADS);
printf(" Series Length = %12u\n", N_SERIES);
printf(" N Total Threads = %12u\n\n", NTHREADS*NBLOCKS);
printf(" Elapsed Time = %.3f ms.\n\n", elapsed_time);
std::ofstream file;
file.open( 'data.txt' );
if( file.si_open() )
file << elapsed_time << '\n';
file.close();
return 0;
}
| 4eb858cbaeb7c63548ee4507eb6c75453d1b91fe.cu | #include <iostream>
#include <fstream>
#include <cuda.h>
typedef unsigned int U4;
#ifdef USE_FP32
typedef float T_real;
#else
typedef double T_real;
#endif
#ifndef N_SERIES
#define N_SERIES ( U4 )( 1<<31 )
#endif
#define TRUE_PI "3.141592653589793238462643383279"
#define STEP ( T_real )( 1./N_SERIES )
static void HandleError( cudaError_t err,
const char *file,
int line )
{
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#ifdef USE_ATOMIC
__global__ void calc_pi( T_real *pi_series ) {
U4 tid = threadIdx.x + blockIdx.x * blockDim.x;
T_real item;
__shared__ T_real pi_cache[NTHREADS];
pi_cache[threadIdx.x] = 0.;
// __syncthreads();
while( tid < N_SERIES ) {
item = ( tid + 0.5 ) * STEP;
item = 4. / (1. + item*item);
pi_cache[threadIdx.x] += item; // No need for atomicAdd here
tid += blockDim.x * gridDim.x;
}
atomicAdd( &pi_series[threadIdx.x], pi_cache[threadIdx.x] );
}
#else
__global__ void calc_pi( T_real *pi_series ) {
U4 tid = threadIdx.x + blockIdx.x * blockDim.x;
T_real item;
__shared__ T_real pi_cache[NTHREADS];
pi_cache[threadIdx.x] = 0.;
while( tid < N_SERIES ) {
item = ( tid + 0.5 ) * STEP;
item = 4. / (1. + item*item);
pi_cache[threadIdx.x] += item; // No need for atomicAdd here
tid += blockDim.x * gridDim.x;
}
__syncthreads();
// Reduction to NBLOCKS elements
int i = NTHREADS/2;
while( i ) {
if ( threadIdx.x < i ) {
pi_cache[threadIdx.x] += pi_cache[i + threadIdx.x];
}
__syncthreads();
i /= 2;
}
if (threadIdx.x == 0)
pi_series[blockIdx.x] += pi_cache[0];
}
#endif
int main ( int argc, char* argv[] ) {
U4 NTHREADS = std::stoi(argv[1]);
U4 NBLOCKS = std::stoi(argv[2]);
U4 size = NBLOCKS * sizeof( T_real );
T_real my_pi(0.);
float elapsed_time;
// Reduced PI series of NTHREADS elements
T_real *pi_series = ( T_real* )malloc( size );
T_real *dev_pi_series;
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_pi_series, size ) );
HANDLE_ERROR( cudaMemset( dev_pi_series, 0., size ) );
dim3 dimGrid( NBLOCKS, 1, 1 );
dim3 dimBlocks( NTHREADS, 1, 1 );
calc_pi <<< dimGrid, dimBlocks >>> ( dev_pi_series );
HANDLE_ERROR( cudaDeviceSynchronize() );
HANDLE_ERROR( cudaMemcpy( pi_series, dev_pi_series, size, cudaMemcpyDeviceToHost) );
// Further Reduction
for (int i=0; i<SIZE_SERIES; ++i) {
my_pi += pi_series[i];
}
my_pi *= STEP;
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsed_time, start, stop ) );
HANDLE_ERROR( cudaFree( dev_pi_series ) );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
free( pi_series );
printf("\nResult with 30 digits:\n");
printf("\n PI calculated = %1.30f\n", my_pi);
printf(" True PI = %s\n", TRUE_PI);
printf(" ");
for (int i=1; i!=15; ++i)
printf(" ");
printf("^\n");
printf("================================================\n\n");
printf("Run with configuration: \n");
printf(" N thread blocks = %12u\n", NBLOCKS);
printf("N threads per block = %12u\n", NTHREADS);
printf(" Series Length = %12u\n", N_SERIES);
printf(" N Total Threads = %12u\n\n", NTHREADS*NBLOCKS);
printf(" Elapsed Time = %.3f ms.\n\n", elapsed_time);
std::ofstream file;
file.open( 'data.txt' );
if( file.si_open() )
file << elapsed_time << '\n';
file.close();
return 0;
}
|
78445ccc2c06c5c0c0d53124ffb995fcb40fd925.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "MatKernel.hpp"
#if __CUDA_ARCH__ > 200
#define edcellupdate(RR,RP1,RP2,RPP,WUN,TMP) \
asm("vmin4.s32.s32.s32.add" "%0, %1.b3210, %2.b4321, %3;": "=r" (RR) : "r" (RP1), "r" (RP2), "r" (WUN)); \
asm("vadd4.s32.s32.s32" "%0, %1, %2, %3;": "=r" (TMP) : "r" (MM), "r" (RZ), "r" (RR)); \
asm("vmin4.s32.s32.s32" "%0, %1, %2, %3;": "=r" (RR) : "r" (TMP), "r" (RR), "r" (RR));
__device__ void hammingcell(int &a0, int a1, int b0, int w0, int &c, int tmp, int zero) {
asm("and.b32" "%0, %1, %2;": "=r" (tmp) : "r" (a0), "r" (b0));
asm("vset4.s32.s32.eq" "%0, %1, %2, %3;": "=r" (tmp) : "r" (tmp), "r" (zero), "r" (zero));
asm("vsub4.s32.s32.s32" "%0, %1, %2, %3;": "=r" (tmp) : "r" (zero), "r" (tmp), "r" (zero));
asm("vmin4.u32.u32.u32.add" "%0, %1, %2, %3;": "=r" (c) : "r" (w0), "r" (tmp), "r" (c));
asm("vmax4.u32.u32.u32" "%0, %1.b4321, %2.b4321, %3;": "=r" (a0) : "r" (a0), "r" (a1), "r" (a0));
}
__device__ void rotate1(int &a0) {
asm("shr.b32" "%0, %1, 8;": "=r" (a0) : "r" (a0));
}
template<int VECLEN, int NVEC, int TLEN>
__global__ void __hammingdists(int *a, int *b, int *w, int *op, int *ow, int n) {
__shared__ int sa[TLEN];
__shared__ int sb[32][VECLEN*NVEC+1];
__shared__ int sw[32][VECLEN*NVEC+1];
__shared__ int sop[32];
__shared__ int sow[32];
register int aa[VECLEN+1];
register int bb[VECLEN];
register int ww[VECLEN];
int i, ioff, ioffmv, ip, tmp, tmp1, j, k, c, cmin, imin;
int zero = 0;
int sid = threadIdx.x + blockDim.x * threadIdx.y;
if (threadIdx.y + blockDim.y * blockIdx.x < n) {
// Load data into shared memory
for (i = 0; i < TLEN/1024; i++) {
sa[sid + i*1024] = a[sid + i*1024 + TLEN*blockIdx.x];
}
for (i = 0; i < VECLEN*NVEC/32; i++) {
sb[threadIdx.y][threadIdx.x + i*blockDim.x] = b[sid + i*1024 + VECLEN*NVEC*blockIdx.x];
sw[threadIdx.y][threadIdx.x + i*blockDim.x] = w[sid + i*1024 + VECLEN*NVEC*blockIdx.x];
}
__syncthreads();
ip = threadIdx.x / NVEC;
ioffmv = (threadIdx.x % NVEC) * VECLEN;
ioff = ioffmv + ip * (TLEN*NVEC/32);
cmin = 0x7fffffff;
imin = -1;
// Load data for this thread into registers
#pragma unroll
for (j = 0; j < VECLEN; j++) {
tmp = j + ioff;
if (tmp < TLEN) {
aa[j] = sa[tmp];
}
bb[j] = sb[threadIdx.y][j + ioffmv];
ww[j] = sw[threadIdx.y][j + ioffmv];
}
// Step through offsets in A string
for (j = 0; j < TLEN*NVEC/8; j++) {
tmp = VECLEN + ioff + j / 4;
if (tmp - ioffmv < TLEN - VECLEN * NVEC) {
if (j % 4 == 0) {
aa[VECLEN] = sa[tmp];
}
c = 0;
// Inner loop over the length of the vector in registers
#pragma unroll
for (k = 0; k < VECLEN; k++) {
hammingcell(aa[k], aa[k+1], bb[k], ww[k], c, tmp, zero);
}
rotate1(aa[VECLEN]);
// Need to sum over NVEC to get complete score for a string
#pragma unroll
for (k = 1; k < NVEC; k *= 2) {
tmp = __shfl_down(c, k);
c = c + tmp;
}
// Now compare with the accumulated min
if (c < cmin) {
cmin = c;
imin = 4 * ioff + j;
}
}
}
// Compute the min across groups of NVEC threads in this warp
for (k = NVEC; k < 32; k *= 2) {
tmp = __shfl_down(cmin, k);
tmp1 = __shfl_down(imin, k);
if (tmp < cmin) {
cmin = tmp;
imin = tmp1;
}
}
// Save to shared memory in prep for saving to main memory
if (threadIdx.x == 0) {
sop[threadIdx.y] = imin;
sow[threadIdx.y] = cmin;
}
__syncthreads();
// Save to main memory
if (threadIdx.y == 0) {
op[threadIdx.x + 32*blockIdx.x] = sop[threadIdx.x];
ow[threadIdx.x + 32*blockIdx.x] = sow[threadIdx.x];
}
}
}
__global__ void __veccmp(int *a, int *b, int *d) {
int xa = *a;
int xb = *b;
int xc = 0;
int xd = 0;
asm("vset4.s32.s32.ne" "%0, %1.b0000, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc));
*d++ = xd;
asm("vset4.s32.s32.ne" "%0, %1.b1111, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc));
*d++ = xd;
asm("vset4.s32.s32.ne" "%0, %1.b2222, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc));
*d++ = xd;
asm("vset4.s32.s32.ne" "%0, %1.b3333, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc));
*d = xd;
}
#else
__global__ void __veccmp(int *a, int *b, int *d) {
printf("__veccmp() not defined for CUDA Arch < 300\n");
}
template<int VECLEN, int NVEC, int TLEN>
__global__ void __hammingdists(int *a, int *b, int *w, int *op, int *ow, int n) {
printf("__hammingdists() not defined for CUDA Arch < 300\n");
}
#endif
int veccmp(int *a, int *b, int *d) {
hipLaunchKernelGGL(( __veccmp), dim3(1),dim3(1), 0, 0, a, b, d);
return 0;
}
int hammingdists(int *a, int *b, int *w, int *op, int *ow, int n) {
int nb = 1+((n-1)/32);
dim3 blockdims(32,32,1);
hipLaunchKernelGGL(( __hammingdists<16,2,1024>), dim3(nb),dim3(blockdims), 0, 0, a, b, w, op, ow, n);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
| 78445ccc2c06c5c0c0d53124ffb995fcb40fd925.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include "MatKernel.hpp"
#if __CUDA_ARCH__ > 200
#define edcellupdate(RR,RP1,RP2,RPP,WUN,TMP) \
asm("vmin4.s32.s32.s32.add" "%0, %1.b3210, %2.b4321, %3;": "=r" (RR) : "r" (RP1), "r" (RP2), "r" (WUN)); \
asm("vadd4.s32.s32.s32" "%0, %1, %2, %3;": "=r" (TMP) : "r" (MM), "r" (RZ), "r" (RR)); \
asm("vmin4.s32.s32.s32" "%0, %1, %2, %3;": "=r" (RR) : "r" (TMP), "r" (RR), "r" (RR));
__device__ void hammingcell(int &a0, int a1, int b0, int w0, int &c, int tmp, int zero) {
asm("and.b32" "%0, %1, %2;": "=r" (tmp) : "r" (a0), "r" (b0));
asm("vset4.s32.s32.eq" "%0, %1, %2, %3;": "=r" (tmp) : "r" (tmp), "r" (zero), "r" (zero));
asm("vsub4.s32.s32.s32" "%0, %1, %2, %3;": "=r" (tmp) : "r" (zero), "r" (tmp), "r" (zero));
asm("vmin4.u32.u32.u32.add" "%0, %1, %2, %3;": "=r" (c) : "r" (w0), "r" (tmp), "r" (c));
asm("vmax4.u32.u32.u32" "%0, %1.b4321, %2.b4321, %3;": "=r" (a0) : "r" (a0), "r" (a1), "r" (a0));
}
__device__ void rotate1(int &a0) {
asm("shr.b32" "%0, %1, 8;": "=r" (a0) : "r" (a0));
}
template<int VECLEN, int NVEC, int TLEN>
__global__ void __hammingdists(int *a, int *b, int *w, int *op, int *ow, int n) {
__shared__ int sa[TLEN];
__shared__ int sb[32][VECLEN*NVEC+1];
__shared__ int sw[32][VECLEN*NVEC+1];
__shared__ int sop[32];
__shared__ int sow[32];
register int aa[VECLEN+1];
register int bb[VECLEN];
register int ww[VECLEN];
int i, ioff, ioffmv, ip, tmp, tmp1, j, k, c, cmin, imin;
int zero = 0;
int sid = threadIdx.x + blockDim.x * threadIdx.y;
if (threadIdx.y + blockDim.y * blockIdx.x < n) {
// Load data into shared memory
for (i = 0; i < TLEN/1024; i++) {
sa[sid + i*1024] = a[sid + i*1024 + TLEN*blockIdx.x];
}
for (i = 0; i < VECLEN*NVEC/32; i++) {
sb[threadIdx.y][threadIdx.x + i*blockDim.x] = b[sid + i*1024 + VECLEN*NVEC*blockIdx.x];
sw[threadIdx.y][threadIdx.x + i*blockDim.x] = w[sid + i*1024 + VECLEN*NVEC*blockIdx.x];
}
__syncthreads();
ip = threadIdx.x / NVEC;
ioffmv = (threadIdx.x % NVEC) * VECLEN;
ioff = ioffmv + ip * (TLEN*NVEC/32);
cmin = 0x7fffffff;
imin = -1;
// Load data for this thread into registers
#pragma unroll
for (j = 0; j < VECLEN; j++) {
tmp = j + ioff;
if (tmp < TLEN) {
aa[j] = sa[tmp];
}
bb[j] = sb[threadIdx.y][j + ioffmv];
ww[j] = sw[threadIdx.y][j + ioffmv];
}
// Step through offsets in A string
for (j = 0; j < TLEN*NVEC/8; j++) {
tmp = VECLEN + ioff + j / 4;
if (tmp - ioffmv < TLEN - VECLEN * NVEC) {
if (j % 4 == 0) {
aa[VECLEN] = sa[tmp];
}
c = 0;
// Inner loop over the length of the vector in registers
#pragma unroll
for (k = 0; k < VECLEN; k++) {
hammingcell(aa[k], aa[k+1], bb[k], ww[k], c, tmp, zero);
}
rotate1(aa[VECLEN]);
// Need to sum over NVEC to get complete score for a string
#pragma unroll
for (k = 1; k < NVEC; k *= 2) {
tmp = __shfl_down(c, k);
c = c + tmp;
}
// Now compare with the accumulated min
if (c < cmin) {
cmin = c;
imin = 4 * ioff + j;
}
}
}
// Compute the min across groups of NVEC threads in this warp
for (k = NVEC; k < 32; k *= 2) {
tmp = __shfl_down(cmin, k);
tmp1 = __shfl_down(imin, k);
if (tmp < cmin) {
cmin = tmp;
imin = tmp1;
}
}
// Save to shared memory in prep for saving to main memory
if (threadIdx.x == 0) {
sop[threadIdx.y] = imin;
sow[threadIdx.y] = cmin;
}
__syncthreads();
// Save to main memory
if (threadIdx.y == 0) {
op[threadIdx.x + 32*blockIdx.x] = sop[threadIdx.x];
ow[threadIdx.x + 32*blockIdx.x] = sow[threadIdx.x];
}
}
}
__global__ void __veccmp(int *a, int *b, int *d) {
int xa = *a;
int xb = *b;
int xc = 0;
int xd = 0;
asm("vset4.s32.s32.ne" "%0, %1.b0000, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc));
*d++ = xd;
asm("vset4.s32.s32.ne" "%0, %1.b1111, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc));
*d++ = xd;
asm("vset4.s32.s32.ne" "%0, %1.b2222, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc));
*d++ = xd;
asm("vset4.s32.s32.ne" "%0, %1.b3333, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc));
*d = xd;
}
#else
__global__ void __veccmp(int *a, int *b, int *d) {
printf("__veccmp() not defined for CUDA Arch < 300\n");
}
template<int VECLEN, int NVEC, int TLEN>
__global__ void __hammingdists(int *a, int *b, int *w, int *op, int *ow, int n) {
printf("__hammingdists() not defined for CUDA Arch < 300\n");
}
#endif
int veccmp(int *a, int *b, int *d) {
__veccmp<<<1,1>>>(a, b, d);
return 0;
}
int hammingdists(int *a, int *b, int *w, int *op, int *ow, int n) {
int nb = 1+((n-1)/32);
dim3 blockdims(32,32,1);
__hammingdists<16,2,1024><<<nb,blockdims>>>(a, b, w, op, ow, n);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
|
6c76f5a8721d5c5244116407738d0b3fb24fb102.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "error.h"
#include "kernel.ptx"
#include <hip/hip_runtime.h>
#include <time.h>
#include <stdio.h>
int main()
{
hipDevice_t device;
hipCtx_t context;
hipModule_t module;
hipFunction_t get_clock;
hipFunction_t get_time;
cuErr(hipInit(0));
cuErr(hipDeviceGet(&device, 0));
cuErr(hipCtxCreate(&context, 0, device));
cuErr(hipModuleLoadData(&module, kernel_ptx));
cuErr(hipModuleGetFunction(&get_clock, module, "get_clock"));
cuErr(hipModuleGetFunction(&get_time, module, "get_time"));
unsigned long long h_clock[1];
unsigned long long clock = 0;
unsigned long long time = 0;
unsigned long long n = 0;
hipDeviceptr_t d_clock;
cuErr(cuMemAlloc(&d_clock, sizeof(h_clock[0])));
while(true)
{
{
void* args[] = { &d_clock };
cuErr(hipModuleLaunchKernel(get_clock, 1, 1, 1, 1, 1, 1, 0, NULL, (void**)args, NULL));
cuErr(cuMemcpyDtoH((void*)&h_clock, d_clock, sizeof(h_clock[0])));
clock += h_clock[0];
}
{
void* args[] = { &d_clock };
cuErr(hipModuleLaunchKernel(get_time, 1, 1, 1, 1, 1, 1, 0, NULL, (void**)args, NULL));
cuErr(cuMemcpyDtoH((void*)&h_clock, d_clock, sizeof(h_clock[0])));
time += h_clock[0];
}
n += 1;
printf("\rClock: %8.2f Time: %8.2f", (double)(clock) / n, (double)(time) / n);
}
cudaErr(hipDeviceReset());
return 0;
} | 6c76f5a8721d5c5244116407738d0b3fb24fb102.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "error.h"
#include "kernel.ptx"
#include <cuda.h>
#include <time.h>
#include <stdio.h>
int main()
{
CUdevice device;
CUcontext context;
CUmodule module;
CUfunction get_clock;
CUfunction get_time;
cuErr(cuInit(0));
cuErr(cuDeviceGet(&device, 0));
cuErr(cuCtxCreate(&context, 0, device));
cuErr(cuModuleLoadData(&module, kernel_ptx));
cuErr(cuModuleGetFunction(&get_clock, module, "get_clock"));
cuErr(cuModuleGetFunction(&get_time, module, "get_time"));
unsigned long long h_clock[1];
unsigned long long clock = 0;
unsigned long long time = 0;
unsigned long long n = 0;
CUdeviceptr d_clock;
cuErr(cuMemAlloc(&d_clock, sizeof(h_clock[0])));
while(true)
{
{
void* args[] = { &d_clock };
cuErr(cuLaunchKernel(get_clock, 1, 1, 1, 1, 1, 1, 0, NULL, (void**)args, NULL));
cuErr(cuMemcpyDtoH((void*)&h_clock, d_clock, sizeof(h_clock[0])));
clock += h_clock[0];
}
{
void* args[] = { &d_clock };
cuErr(cuLaunchKernel(get_time, 1, 1, 1, 1, 1, 1, 0, NULL, (void**)args, NULL));
cuErr(cuMemcpyDtoH((void*)&h_clock, d_clock, sizeof(h_clock[0])));
time += h_clock[0];
}
n += 1;
printf("\rClock: %8.2f Time: %8.2f", (double)(clock) / n, (double)(time) / n);
}
cudaErr(cudaDeviceReset());
return 0;
} |
e4d6f60fd340a609428918259e6c3fa74998fa5b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <sstream>
#include "../cuGameEngine/gdiPlusInit.cuh"
#include "../cuGameEngine/renderWindow.cuh"
#include "../cuGameEngine/cuSurface.cuh"
#include "../cuGameEngine/sdfTextRenderer.cuh"
__global__ void render(cuPixel* buffer, int width, int height, float xOffset, float yOffset, float maxXOffset, float maxYOffset, cuPixel c1, cuPixel c2)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= 0 && x < width && y >= 0 && y < height)
{
buffer[y * width + x].r = map(x + xOffset, -maxXOffset, width + maxXOffset, c1.r, c2.r);
buffer[y * width + x].g = map(x + y + xOffset + yOffset, -xOffset - yOffset, width + height + maxXOffset + maxYOffset, c1.g, c2.g);
buffer[y * width + x].b = map(y + yOffset, -maxYOffset, height + maxYOffset, c1.b, c2.b);
}
}
class screenSaver : public cuEffect
{
private:
renderWindow wnd;
sdfTextRenderer renderer{ L"lucidaconsole.fnt", L"lucidaconsole.png" };
bool colorSwitched = false;
cuPixel c1 = randColor();
cuPixel c2 = randColor();
cuPixel lastC1 = randColor();
cuPixel lastC2 = randColor();
int frameCounter = 0;
public:
screenSaver() : wnd(1024, 768, true, L"Render Test")
{
wnd.pipeLine->addEffect(this);
wnd.inputMgr->key += createBoundHandler(&screenSaver::onKey, this);
}
void run()
{
bool isRunning = true;
wnd.runLoop(true, false, isRunning);
}
void onKey(keyboardEventArgs* e)
{
if (e->key == VK_ESCAPE)
{
ExitProcess(0);
}
}
void apply(cuSurface* in, cuSurface* out)
{
int64_t width, height;
dim3 blocks, threads;
calcGrid(in, out, width, height, blocks, threads);
clock_t t = clock();
float maxXOffset = (width / 5.0f);
float maxYOffset = (height / 5.0f);
float xOffset = sin(t / 2500.0f) * maxXOffset;
float yOffset = sin(t / 2500.0f + 1.0f) * maxYOffset;
cuPixel renderC1 = blendColor(c1, lastC1, frameCounter / 150.0f);
cuPixel renderC2 = blendColor(c2, lastC2, frameCounter / 150.0f);
if (frameCounter++ == 150)
{
lastC1 = c1;
lastC2 = c2;
c1 = randColor();
c2 = randColor();
frameCounter = 0;
}
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, out->buffer, width, height, xOffset, yOffset, maxXOffset, maxYOffset, renderC1, renderC2);
std::wstringstream str;
str << "FPS:\t\t" << wnd.lastFps << "\nFrametime:\t" << wnd.lastTotalTime << "us";
renderer.renderString(out, str.str(), 4, 4, out->width, 3, cuPixel(255, 255, 255, 255), true);
}
};
int main()
{
//ShowCursor(false);
auto render = new screenSaver();
render->run();
} | e4d6f60fd340a609428918259e6c3fa74998fa5b.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <sstream>
#include "../cuGameEngine/gdiPlusInit.cuh"
#include "../cuGameEngine/renderWindow.cuh"
#include "../cuGameEngine/cuSurface.cuh"
#include "../cuGameEngine/sdfTextRenderer.cuh"
__global__ void render(cuPixel* buffer, int width, int height, float xOffset, float yOffset, float maxXOffset, float maxYOffset, cuPixel c1, cuPixel c2)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= 0 && x < width && y >= 0 && y < height)
{
buffer[y * width + x].r = map(x + xOffset, -maxXOffset, width + maxXOffset, c1.r, c2.r);
buffer[y * width + x].g = map(x + y + xOffset + yOffset, -xOffset - yOffset, width + height + maxXOffset + maxYOffset, c1.g, c2.g);
buffer[y * width + x].b = map(y + yOffset, -maxYOffset, height + maxYOffset, c1.b, c2.b);
}
}
class screenSaver : public cuEffect
{
private:
renderWindow wnd;
sdfTextRenderer renderer{ L"lucidaconsole.fnt", L"lucidaconsole.png" };
bool colorSwitched = false;
cuPixel c1 = randColor();
cuPixel c2 = randColor();
cuPixel lastC1 = randColor();
cuPixel lastC2 = randColor();
int frameCounter = 0;
public:
screenSaver() : wnd(1024, 768, true, L"Render Test")
{
wnd.pipeLine->addEffect(this);
wnd.inputMgr->key += createBoundHandler(&screenSaver::onKey, this);
}
void run()
{
bool isRunning = true;
wnd.runLoop(true, false, isRunning);
}
void onKey(keyboardEventArgs* e)
{
if (e->key == VK_ESCAPE)
{
ExitProcess(0);
}
}
void apply(cuSurface* in, cuSurface* out)
{
int64_t width, height;
dim3 blocks, threads;
calcGrid(in, out, width, height, blocks, threads);
clock_t t = clock();
float maxXOffset = (width / 5.0f);
float maxYOffset = (height / 5.0f);
float xOffset = sin(t / 2500.0f) * maxXOffset;
float yOffset = sin(t / 2500.0f + 1.0f) * maxYOffset;
cuPixel renderC1 = blendColor(c1, lastC1, frameCounter / 150.0f);
cuPixel renderC2 = blendColor(c2, lastC2, frameCounter / 150.0f);
if (frameCounter++ == 150)
{
lastC1 = c1;
lastC2 = c2;
c1 = randColor();
c2 = randColor();
frameCounter = 0;
}
render<<<blocks, threads>>>(out->buffer, width, height, xOffset, yOffset, maxXOffset, maxYOffset, renderC1, renderC2);
std::wstringstream str;
str << "FPS:\t\t" << wnd.lastFps << "\nFrametime:\t" << wnd.lastTotalTime << "us";
renderer.renderString(out, str.str(), 4, 4, out->width, 3, cuPixel(255, 255, 255, 255), true);
}
};
int main()
{
//ShowCursor(false);
auto render = new screenSaver();
render->run();
} |
754ec1698cad9f53d173ac9180e0d59d2cdb3f14.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "column_utilities.hpp"
#include "cudf/utilities/type_dispatcher.hpp"
#include "detail/column_utilities.hpp"
#include "thrust/iterator/counting_iterator.h"
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/structs/struct_view.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <sstream>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <jit/type.h>
#include <thrust/equal.h>
#include <thrust/logical.h>
#include <numeric>
namespace cudf {
namespace test {
namespace {
template <bool check_exact_equality>
struct column_property_comparator {
void compare_common(cudf::column_view const& lhs, cudf::column_view const& rhs)
{
EXPECT_EQ(lhs.type(), rhs.type());
EXPECT_EQ(lhs.size(), rhs.size());
if (lhs.size() > 0 && check_exact_equality) { EXPECT_EQ(lhs.nullable(), rhs.nullable()); }
// equivalent, but not exactly equal columns can have a different number of children if their
// sizes are both 0. Specifically, empty string columns may or may not have children.
if (check_exact_equality || lhs.size() > 0) {
EXPECT_EQ(lhs.num_children(), rhs.num_children());
}
}
template <typename T, std::enable_if_t<!std::is_same<T, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& lhs, cudf::column_view const& rhs)
{
compare_common(lhs, rhs);
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& lhs, cudf::column_view const& rhs)
{
compare_common(lhs, rhs);
cudf::lists_column_view lhs_l(lhs);
cudf::lists_column_view rhs_l(rhs);
// recurse
cudf::type_dispatcher(lhs_l.child().type(),
column_property_comparator<check_exact_equality>{},
lhs_l.get_sliced_child(0),
rhs_l.get_sliced_child(0));
}
};
class corresponding_rows_unequal {
public:
corresponding_rows_unequal(table_device_view d_lhs, table_device_view d_rhs) : comp(d_lhs, d_rhs)
{
}
cudf::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index) { return !comp(index, index); }
};
class corresponding_rows_not_equivalent {
table_device_view d_lhs;
table_device_view d_rhs;
public:
corresponding_rows_not_equivalent(table_device_view d_lhs, table_device_view d_rhs)
: d_lhs(d_lhs), d_rhs(d_rhs), comp(d_lhs, d_rhs)
{
CUDF_EXPECTS(d_lhs.num_columns() == 1 and d_rhs.num_columns() == 1,
"Unsupported number of columns");
}
struct typed_element_not_equivalent {
template <typename T>
__device__ std::enable_if_t<std::is_floating_point<T>::value, bool> operator()(
column_device_view const& lhs, column_device_view const& rhs, size_type index)
{
if (lhs.is_valid(index) and rhs.is_valid(index)) {
int ulp = 4; // value taken from google test
T x = lhs.element<T>(index);
T y = rhs.element<T>(index);
return std::abs(x - y) > std::numeric_limits<T>::epsilon() * std::abs(x + y) * ulp &&
std::abs(x - y) >= std::numeric_limits<T>::min();
} else {
// if either is null, then the inequality was checked already
return true;
}
}
template <typename T, typename... Args>
__device__ std::enable_if_t<not std::is_floating_point<T>::value, bool> operator()(Args... args)
{
// Non-floating point inequality is checked already
return true;
}
};
cudf::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index)
{
if (not comp(index, index)) {
auto lhs_col = this->d_lhs.column(0);
auto rhs_col = this->d_rhs.column(0);
return type_dispatcher(
lhs_col.type(), typed_element_not_equivalent{}, lhs_col, rhs_col, index);
}
return false;
}
};
void print_differences(thrust::device_vector<int> const& differences,
column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
if (differences.size() <= 0) { return; }
std::string depth_str = depth > 0 ? "depth " + std::to_string(depth) + std::string("\n") : "";
if (print_all_differences) {
// If there are differences, display them all
std::ostringstream buffer;
buffer << depth_str << "differences:" << std::endl;
cudf::table_view source_table({lhs, rhs});
fixed_width_column_wrapper<int32_t> diff_column(differences.begin(), differences.end());
std::unique_ptr<cudf::table> diff_table = cudf::gather(source_table, diff_column);
// Need to pull back the differences
std::vector<std::string> h_left_strings = to_strings(diff_table->get_column(0));
std::vector<std::string> h_right_strings = to_strings(diff_table->get_column(1));
for (size_t i = 0; i < differences.size(); ++i) {
buffer << depth_str << "lhs[" << differences[i] << "] = " << h_left_strings[i] << ", rhs["
<< differences[i] << "] = " << h_right_strings[i] << std::endl;
}
EXPECT_EQ(differences.size(), size_t{0}) << buffer.str();
} else {
// If there are differences, just display the first one
int index = differences[0];
auto diff_lhs = cudf::detail::slice(lhs, index, index + 1);
auto diff_rhs = cudf::detail::slice(rhs, index, index + 1);
std::vector<std::string> h_left_strings = to_strings(diff_lhs);
std::vector<std::string> h_right_strings = to_strings(diff_rhs);
EXPECT_EQ(differences.size(), size_t{0})
<< depth_str << "first difference: "
<< "lhs[" << index << "] = " << to_string(diff_lhs, "") << ", rhs[" << index
<< "] = " << to_string(diff_rhs, "");
}
}
// non-nested column types
template <typename T, bool check_exact_equality>
struct column_comparator_impl {
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
auto d_lhs = cudf::table_device_view::create(table_view{{lhs}});
auto d_rhs = cudf::table_device_view::create(table_view{{rhs}});
using ComparatorType = std::conditional_t<check_exact_equality,
corresponding_rows_unequal,
corresponding_rows_not_equivalent>;
// worst case - everything is different
thrust::device_vector<int> differences(lhs.size());
auto diff_iter = thrust::copy_if(thrust::device,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs.size()),
differences.begin(),
ComparatorType(*d_lhs, *d_rhs));
// shrink back down
differences.resize(thrust::distance(differences.begin(), diff_iter));
print_differences(differences, lhs, rhs, print_all_differences, depth);
}
};
// forward declaration for nested-type recursion.
template <bool check_exact_equality>
struct column_comparator;
// specialization for list columns
template <bool check_exact_equality>
struct column_comparator_impl<list_view, check_exact_equality> {
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
lists_column_view lhs_l(lhs);
lists_column_view rhs_l(rhs);
CUDF_EXPECTS(lhs_l.size() == rhs_l.size(), "List column size mismatch");
if (lhs_l.size() == 0) { return; }
// worst case - everything is different
thrust::device_vector<int> differences(lhs.size());
// TODO : determine how equals/equivalency should work for columns with divergent underlying
// data, but equivalent null masks. Example:
//
// List<int32_t>:
// Length : 3
// Offsets : 0, 3, 5, 5
// Nulls: 011
// Children :
// 1, 2, 3, 4, 5
//
// List<int32_t>:
// Length : 3
// Offsets : 0, 3, 5, 7
// Nulls: 011
// Children :
// 1, 2, 3, 4, 5, 7, 8
//
// These two columns are seemingly equivalent, since their top level rows are the same, with
// just the last element being null. However, pyArrow will say these are -not- equal and
// does not appear to have an equivalent() check. So the question is : should we be handling
// this case when someone calls expect_columns_equivalent()?
// compare offsets, taking slicing into account
// left side
size_type lhs_shift = cudf::detail::get_value<size_type>(lhs_l.offsets(), lhs_l.offset(), 0);
auto lhs_offsets = thrust::make_transform_iterator(
lhs_l.offsets().begin<size_type>() + lhs_l.offset(),
[lhs_shift] __device__(size_type offset) { return offset - lhs_shift; });
auto lhs_valids = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[mask = lhs_l.null_mask(), offset = lhs_l.offset()] __device__(size_type index) {
return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset);
});
// right side
size_type rhs_shift = cudf::detail::get_value<size_type>(rhs_l.offsets(), rhs_l.offset(), 0);
auto rhs_offsets = thrust::make_transform_iterator(
rhs_l.offsets().begin<size_type>() + rhs_l.offset(),
[rhs_shift] __device__(size_type offset) { return offset - rhs_shift; });
auto rhs_valids = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[mask = rhs_l.null_mask(), offset = rhs_l.offset()] __device__(size_type index) {
return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset);
});
auto diff_iter = thrust::copy_if(
thrust::device,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs_l.size() + 1),
differences.begin(),
[lhs_offsets, rhs_offsets, lhs_valids, rhs_valids, num_rows = lhs_l.size()] __device__(
size_type index) {
// last offset has no validity associated with it
if (index < num_rows - 1) {
if (lhs_valids[index] != rhs_valids[index]) { return true; }
// if validity matches -and- is false, we can ignore the actual values. this
// is technically not checking "equal()", but it's how the non-list code path handles it
if (!lhs_valids[index]) { return false; }
}
return lhs_offsets[index] == rhs_offsets[index] ? false : true;
});
// shrink back down
differences.resize(thrust::distance(differences.begin(), diff_iter));
print_differences(differences, lhs, rhs, print_all_differences, depth);
// recurse
auto lhs_child = lhs_l.get_sliced_child(0);
auto rhs_child = rhs_l.get_sliced_child(0);
cudf::type_dispatcher(lhs_child.type(),
column_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
print_all_differences,
depth + 1);
}
};
template <bool check_exact_equality>
struct column_comparator_impl<struct_view, check_exact_equality> {
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
std::for_each(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + lhs.num_children(),
[&](auto i) {
cudf::type_dispatcher(lhs.child(i).type(),
column_comparator<check_exact_equality>{},
lhs.child(i),
rhs.child(i),
print_all_differences,
depth + 1);
});
}
};
template <bool check_exact_equality>
struct column_comparator {
template <typename T>
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth = 0)
{
// compare properties
cudf::type_dispatcher(lhs.type(), column_property_comparator<check_exact_equality>{}, lhs, rhs);
// compare values
column_comparator_impl<T, check_exact_equality> comparator{};
comparator(lhs, rhs, print_all_differences, depth);
}
};
} // namespace
/**
* @copydoc cudf::test::expect_column_properties_equal
*
*/
void expect_column_properties_equal(column_view const& lhs, column_view const& rhs)
{
cudf::type_dispatcher(lhs.type(), column_property_comparator<true>{}, lhs, rhs);
}
/**
* @copydoc cudf::test::expect_column_properties_equivalent
*
*/
void expect_column_properties_equivalent(column_view const& lhs, column_view const& rhs)
{
cudf::type_dispatcher(lhs.type(), column_property_comparator<false>{}, lhs, rhs);
}
/**
* @copydoc cudf::test::expect_columns_equal
*
*/
void expect_columns_equal(cudf::column_view const& lhs,
cudf::column_view const& rhs,
bool print_all_differences)
{
cudf::type_dispatcher(lhs.type(), column_comparator<true>{}, lhs, rhs, print_all_differences);
}
/**
* @copydoc cudf::test::expect_columns_equivalent
*
*/
void expect_columns_equivalent(cudf::column_view const& lhs,
cudf::column_view const& rhs,
bool print_all_differences)
{
cudf::type_dispatcher(lhs.type(), column_comparator<false>{}, lhs, rhs, print_all_differences);
}
/**
* @copydoc cudf::test::expect_equal_buffers
*
*/
void expect_equal_buffers(void const* lhs, void const* rhs, std::size_t size_bytes)
{
if (size_bytes > 0) {
EXPECT_NE(nullptr, lhs);
EXPECT_NE(nullptr, rhs);
}
auto typed_lhs = static_cast<char const*>(lhs);
auto typed_rhs = static_cast<char const*>(rhs);
EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes, typed_rhs));
}
/**
* @copydoc cudf::test::bitmask_to_host
*
*/
std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c)
{
if (c.nullable()) {
auto num_bitmasks = bitmask_allocation_size_bytes(c.size()) / sizeof(bitmask_type);
std::vector<bitmask_type> host_bitmask(num_bitmasks);
if (c.offset() == 0) {
CUDA_TRY(hipMemcpy(host_bitmask.data(),
c.null_mask(),
num_bitmasks * sizeof(bitmask_type),
hipMemcpyDeviceToHost));
} else {
auto mask = copy_bitmask(c.null_mask(), c.offset(), c.offset() + c.size());
CUDA_TRY(hipMemcpy(host_bitmask.data(),
mask.data(),
num_bitmasks * sizeof(bitmask_type),
hipMemcpyDeviceToHost));
}
return host_bitmask;
} else {
return std::vector<bitmask_type>{};
}
}
namespace {
template <typename T, typename std::enable_if_t<std::is_integral<T>::value>* = nullptr>
static auto numeric_to_string_precise(T value)
{
return std::to_string(value);
}
template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value>* = nullptr>
static auto numeric_to_string_precise(T value)
{
std::ostringstream o;
o << std::setprecision(std::numeric_limits<T>::max_digits10) << value;
return o.str();
}
static auto duration_suffix(cudf::duration_D) { return " days"; }
static auto duration_suffix(cudf::duration_s) { return " seconds"; }
static auto duration_suffix(cudf::duration_ms) { return " milliseconds"; }
static auto duration_suffix(cudf::duration_us) { return " microseconds"; }
static auto duration_suffix(cudf::duration_ns) { return " nanoseconds"; }
std::string get_nested_type_str(cudf::column_view const& view)
{
if (view.type().id() == cudf::type_id::LIST) {
lists_column_view lcv(view);
return cudf::jit::get_type_name(view.type()) + "<" + (get_nested_type_str(lcv.child())) + ">";
}
if (view.type().id() == cudf::type_id::STRUCT) {
std::ostringstream out;
out << cudf::jit::get_type_name(view.type()) + "<";
std::transform(view.child_begin(),
view.child_end(),
std::ostream_iterator<std::string>(out, ","),
[&out](auto const col) { return get_nested_type_str(col); });
out << ">";
return out.str();
}
return cudf::jit::get_type_name(view.type());
}
template <typename NestedColumnView>
std::string nested_offsets_to_string(NestedColumnView const& c, std::string const& delimiter = ", ")
{
column_view offsets = (c.parent()).child(NestedColumnView::offsets_column_index);
CUDF_EXPECTS(offsets.type().id() == type_id::INT32,
"Column does not appear to be an offsets column");
CUDF_EXPECTS(offsets.offset() == 0, "Offsets column has an internal offset!");
size_type output_size = c.size() + 1;
// the first offset value to normalize everything against
size_type first = cudf::detail::get_value<size_type>(offsets, c.offset(), 0);
rmm::device_vector<size_type> shifted_offsets(output_size);
// normalize the offset values for the column offset
size_type const* d_offsets = offsets.head<size_type>() + c.offset();
thrust::transform(
rmm::exec_policy(0)->on(0),
d_offsets,
d_offsets + output_size,
shifted_offsets.begin(),
[first] __device__(int32_t offset) { return static_cast<size_type>(offset - first); });
thrust::host_vector<size_type> h_shifted_offsets(shifted_offsets);
std::ostringstream buffer;
for (size_t idx = 0; idx < h_shifted_offsets.size(); idx++) {
buffer << h_shifted_offsets[idx];
if (idx < h_shifted_offsets.size() - 1) { buffer << delimiter; }
}
return buffer.str();
}
struct column_view_printer {
template <typename Element, typename std::enable_if_t<is_numeric<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx)
? numeric_to_string_precise(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return numeric_to_string_precise(el);
});
}
}
template <typename Element, typename std::enable_if_t<is_timestamp<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
//
// For timestamps, convert timestamp column to column of strings, then
// call string version
//
auto col_as_strings = cudf::strings::from_timestamps(col);
if (col_as_strings->size() == 0) { return; }
this->template operator()<cudf::string_view>(*col_as_strings, out, indent);
}
template <typename Element, typename std::enable_if_t<cudf::is_fixed_point<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
auto const h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&](auto idx) {
auto const d = static_cast<double>(h_data.first[idx]);
return std::to_string(d);
});
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::string_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
//
// Implementation for strings, call special to_host variant
//
auto h_data = cudf::test::to_host<std::string>(col);
out.resize(col.size());
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return h_data.second.empty() || bit_is_set(h_data.second.data(), idx)
? h_data.first[idx]
: std::string("NULL");
});
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::dictionary32>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
cudf::dictionary_column_view dictionary(col);
if (col.size() == 0) return;
std::vector<std::string> keys = to_strings(dictionary.keys());
std::vector<std::string> indices = to_strings({cudf::data_type{cudf::type_id::INT32},
dictionary.size(),
dictionary.indices().head<int32_t>(),
dictionary.null_mask(),
dictionary.null_count(),
dictionary.offset()});
out.insert(out.end(), keys.begin(), keys.end());
if (!indices.empty()) {
std::string first = "\x08 : " + indices.front(); // use : as delimiter
out.push_back(first); // between keys and indices
out.insert(out.end(), indices.begin() + 1, indices.end());
}
}
// Print the tick counts with the units
template <typename Element, typename std::enable_if_t<is_duration<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx)
? numeric_to_string_precise(h_data.first[idx].count()) +
duration_suffix(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return numeric_to_string_precise(el.count()) + duration_suffix(el);
});
}
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
lists_column_view lcv(col);
// propage slicing to the child if necessary
column_view child = lcv.get_sliced_child(0);
bool const is_sliced = lcv.offset() > 0 || child.offset() > 0;
std::string tmp =
get_nested_type_str(col) + (is_sliced ? "(sliced)" : "") + ":\n" + indent +
"Length : " + std::to_string(lcv.size()) + "\n" + indent +
"Offsets : " + (lcv.size() > 0 ? nested_offsets_to_string(lcv) : "") + "\n" +
(lcv.has_nulls() ? indent + "Null count: " + std::to_string(lcv.null_count()) + "\n" +
detail::to_string(bitmask_to_host(col), col.size(), indent) + "\n"
: "") +
indent + "Children :\n" +
(child.type().id() != type_id::LIST && child.has_nulls()
? indent + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n"
: "") +
(detail::to_string(child, ", ", indent + " ")) + "\n";
out.push_back(tmp);
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::struct_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
structs_column_view view{col};
std::ostringstream out_stream;
out_stream << get_nested_type_str(col) << ":\n"
<< indent << "Length : " << view.size() << ":\n";
if (view.has_nulls()) {
out_stream << indent << "Null count: " << view.null_count() << "\n"
<< detail::to_string(bitmask_to_host(col), col.size(), indent) << "\n";
}
std::transform(
view.child_begin(),
view.child_end(),
std::ostream_iterator<std::string>(out_stream, "\n"),
[&](auto child_column) { return detail::to_string(child_column, ", ", indent + " "); });
out.push_back(out_stream.str());
}
};
} // namespace
namespace detail {
/**
* @copydoc cudf::test::detail::to_strings
*
*/
std::vector<std::string> to_strings(cudf::column_view const& col, std::string const& indent)
{
std::vector<std::string> reply;
cudf::type_dispatcher(col.type(), column_view_printer{}, col, reply, indent);
return reply;
}
/**
* @copydoc cudf::test::detail::to_string(cudf::column_view, std::string, std::string)
*
* @param indent Indentation for all output
*/
std::string to_string(cudf::column_view const& col,
std::string const& delimiter,
std::string const& indent)
{
std::ostringstream buffer;
std::vector<std::string> h_data = to_strings(col, indent);
buffer << indent;
std::copy(h_data.begin(),
h_data.end() - (!h_data.empty()),
std::ostream_iterator<std::string>(buffer, delimiter.c_str()));
if (!h_data.empty()) buffer << h_data.back();
return buffer.str();
}
/**
* @copydoc cudf::test::detail::to_string(std::vector<bitmask_type>, size_type, std::string)
*
* @param indent Indentation for all output. See comment in `to_strings` for
* a detailed description.
*/
std::string to_string(std::vector<bitmask_type> const& null_mask,
size_type null_mask_size,
std::string const& indent)
{
std::ostringstream buffer;
buffer << indent;
for (int idx = null_mask_size - 1; idx >= 0; idx--) {
buffer << (cudf::bit_is_set(null_mask.data(), idx) ? "1" : "0");
}
return buffer.str();
}
} // namespace detail
/**
* @copydoc cudf::test::to_strings
*
*/
std::vector<std::string> to_strings(cudf::column_view const& col)
{
return detail::to_strings(col);
}
/**
* @copydoc cudf::test::to_string(cudf::column_view, std::string)
*
*/
std::string to_string(cudf::column_view const& col, std::string const& delimiter)
{
return detail::to_string(col, delimiter);
}
/**
* @copydoc cudf::test::to_string(std::vector<bitmask_type>, size_type)
*
*/
std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size)
{
return detail::to_string(null_mask, null_mask_size);
}
/**
* @copydoc cudf::test::print
*
*/
void print(cudf::column_view const& col, std::ostream& os, std::string const& delimiter)
{
os << to_string(col, delimiter) << std::endl;
}
/**
* @copydoc cudf::test::validate_host_masks
*
*/
bool validate_host_masks(std::vector<bitmask_type> const& expected_mask,
std::vector<bitmask_type> const& got_mask,
size_type number_of_elements)
{
return std::all_of(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(number_of_elements),
[&expected_mask, &got_mask](auto index) {
return cudf::bit_is_set(expected_mask.data(), index) ==
cudf::bit_is_set(got_mask.data(), index);
});
}
} // namespace test
} // namespace cudf
| 754ec1698cad9f53d173ac9180e0d59d2cdb3f14.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "column_utilities.hpp"
#include "cudf/utilities/type_dispatcher.hpp"
#include "detail/column_utilities.hpp"
#include "thrust/iterator/counting_iterator.h"
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/structs/struct_view.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <sstream>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <jit/type.h>
#include <thrust/equal.h>
#include <thrust/logical.h>
#include <numeric>
namespace cudf {
namespace test {
namespace {
template <bool check_exact_equality>
struct column_property_comparator {
void compare_common(cudf::column_view const& lhs, cudf::column_view const& rhs)
{
EXPECT_EQ(lhs.type(), rhs.type());
EXPECT_EQ(lhs.size(), rhs.size());
if (lhs.size() > 0 && check_exact_equality) { EXPECT_EQ(lhs.nullable(), rhs.nullable()); }
// equivalent, but not exactly equal columns can have a different number of children if their
// sizes are both 0. Specifically, empty string columns may or may not have children.
if (check_exact_equality || lhs.size() > 0) {
EXPECT_EQ(lhs.num_children(), rhs.num_children());
}
}
template <typename T, std::enable_if_t<!std::is_same<T, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& lhs, cudf::column_view const& rhs)
{
compare_common(lhs, rhs);
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& lhs, cudf::column_view const& rhs)
{
compare_common(lhs, rhs);
cudf::lists_column_view lhs_l(lhs);
cudf::lists_column_view rhs_l(rhs);
// recurse
cudf::type_dispatcher(lhs_l.child().type(),
column_property_comparator<check_exact_equality>{},
lhs_l.get_sliced_child(0),
rhs_l.get_sliced_child(0));
}
};
class corresponding_rows_unequal {
public:
corresponding_rows_unequal(table_device_view d_lhs, table_device_view d_rhs) : comp(d_lhs, d_rhs)
{
}
cudf::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index) { return !comp(index, index); }
};
class corresponding_rows_not_equivalent {
table_device_view d_lhs;
table_device_view d_rhs;
public:
corresponding_rows_not_equivalent(table_device_view d_lhs, table_device_view d_rhs)
: d_lhs(d_lhs), d_rhs(d_rhs), comp(d_lhs, d_rhs)
{
CUDF_EXPECTS(d_lhs.num_columns() == 1 and d_rhs.num_columns() == 1,
"Unsupported number of columns");
}
struct typed_element_not_equivalent {
template <typename T>
__device__ std::enable_if_t<std::is_floating_point<T>::value, bool> operator()(
column_device_view const& lhs, column_device_view const& rhs, size_type index)
{
if (lhs.is_valid(index) and rhs.is_valid(index)) {
int ulp = 4; // value taken from google test
T x = lhs.element<T>(index);
T y = rhs.element<T>(index);
return std::abs(x - y) > std::numeric_limits<T>::epsilon() * std::abs(x + y) * ulp &&
std::abs(x - y) >= std::numeric_limits<T>::min();
} else {
// if either is null, then the inequality was checked already
return true;
}
}
template <typename T, typename... Args>
__device__ std::enable_if_t<not std::is_floating_point<T>::value, bool> operator()(Args... args)
{
// Non-floating point inequality is checked already
return true;
}
};
cudf::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index)
{
if (not comp(index, index)) {
auto lhs_col = this->d_lhs.column(0);
auto rhs_col = this->d_rhs.column(0);
return type_dispatcher(
lhs_col.type(), typed_element_not_equivalent{}, lhs_col, rhs_col, index);
}
return false;
}
};
void print_differences(thrust::device_vector<int> const& differences,
column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
if (differences.size() <= 0) { return; }
std::string depth_str = depth > 0 ? "depth " + std::to_string(depth) + std::string("\n") : "";
if (print_all_differences) {
// If there are differences, display them all
std::ostringstream buffer;
buffer << depth_str << "differences:" << std::endl;
cudf::table_view source_table({lhs, rhs});
fixed_width_column_wrapper<int32_t> diff_column(differences.begin(), differences.end());
std::unique_ptr<cudf::table> diff_table = cudf::gather(source_table, diff_column);
// Need to pull back the differences
std::vector<std::string> h_left_strings = to_strings(diff_table->get_column(0));
std::vector<std::string> h_right_strings = to_strings(diff_table->get_column(1));
for (size_t i = 0; i < differences.size(); ++i) {
buffer << depth_str << "lhs[" << differences[i] << "] = " << h_left_strings[i] << ", rhs["
<< differences[i] << "] = " << h_right_strings[i] << std::endl;
}
EXPECT_EQ(differences.size(), size_t{0}) << buffer.str();
} else {
// If there are differences, just display the first one
int index = differences[0];
auto diff_lhs = cudf::detail::slice(lhs, index, index + 1);
auto diff_rhs = cudf::detail::slice(rhs, index, index + 1);
std::vector<std::string> h_left_strings = to_strings(diff_lhs);
std::vector<std::string> h_right_strings = to_strings(diff_rhs);
EXPECT_EQ(differences.size(), size_t{0})
<< depth_str << "first difference: "
<< "lhs[" << index << "] = " << to_string(diff_lhs, "") << ", rhs[" << index
<< "] = " << to_string(diff_rhs, "");
}
}
// non-nested column types
template <typename T, bool check_exact_equality>
struct column_comparator_impl {
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
auto d_lhs = cudf::table_device_view::create(table_view{{lhs}});
auto d_rhs = cudf::table_device_view::create(table_view{{rhs}});
using ComparatorType = std::conditional_t<check_exact_equality,
corresponding_rows_unequal,
corresponding_rows_not_equivalent>;
// worst case - everything is different
thrust::device_vector<int> differences(lhs.size());
auto diff_iter = thrust::copy_if(thrust::device,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs.size()),
differences.begin(),
ComparatorType(*d_lhs, *d_rhs));
// shrink back down
differences.resize(thrust::distance(differences.begin(), diff_iter));
print_differences(differences, lhs, rhs, print_all_differences, depth);
}
};
// forward declaration for nested-type recursion.
template <bool check_exact_equality>
struct column_comparator;
// specialization for list columns
template <bool check_exact_equality>
struct column_comparator_impl<list_view, check_exact_equality> {
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
lists_column_view lhs_l(lhs);
lists_column_view rhs_l(rhs);
CUDF_EXPECTS(lhs_l.size() == rhs_l.size(), "List column size mismatch");
if (lhs_l.size() == 0) { return; }
// worst case - everything is different
thrust::device_vector<int> differences(lhs.size());
// TODO : determine how equals/equivalency should work for columns with divergent underlying
// data, but equivalent null masks. Example:
//
// List<int32_t>:
// Length : 3
// Offsets : 0, 3, 5, 5
// Nulls: 011
// Children :
// 1, 2, 3, 4, 5
//
// List<int32_t>:
// Length : 3
// Offsets : 0, 3, 5, 7
// Nulls: 011
// Children :
// 1, 2, 3, 4, 5, 7, 8
//
// These two columns are seemingly equivalent, since their top level rows are the same, with
// just the last element being null. However, pyArrow will say these are -not- equal and
// does not appear to have an equivalent() check. So the question is : should we be handling
// this case when someone calls expect_columns_equivalent()?
// compare offsets, taking slicing into account
// left side
size_type lhs_shift = cudf::detail::get_value<size_type>(lhs_l.offsets(), lhs_l.offset(), 0);
auto lhs_offsets = thrust::make_transform_iterator(
lhs_l.offsets().begin<size_type>() + lhs_l.offset(),
[lhs_shift] __device__(size_type offset) { return offset - lhs_shift; });
auto lhs_valids = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[mask = lhs_l.null_mask(), offset = lhs_l.offset()] __device__(size_type index) {
return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset);
});
// right side
size_type rhs_shift = cudf::detail::get_value<size_type>(rhs_l.offsets(), rhs_l.offset(), 0);
auto rhs_offsets = thrust::make_transform_iterator(
rhs_l.offsets().begin<size_type>() + rhs_l.offset(),
[rhs_shift] __device__(size_type offset) { return offset - rhs_shift; });
auto rhs_valids = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[mask = rhs_l.null_mask(), offset = rhs_l.offset()] __device__(size_type index) {
return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset);
});
auto diff_iter = thrust::copy_if(
thrust::device,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs_l.size() + 1),
differences.begin(),
[lhs_offsets, rhs_offsets, lhs_valids, rhs_valids, num_rows = lhs_l.size()] __device__(
size_type index) {
// last offset has no validity associated with it
if (index < num_rows - 1) {
if (lhs_valids[index] != rhs_valids[index]) { return true; }
// if validity matches -and- is false, we can ignore the actual values. this
// is technically not checking "equal()", but it's how the non-list code path handles it
if (!lhs_valids[index]) { return false; }
}
return lhs_offsets[index] == rhs_offsets[index] ? false : true;
});
// shrink back down
differences.resize(thrust::distance(differences.begin(), diff_iter));
print_differences(differences, lhs, rhs, print_all_differences, depth);
// recurse
auto lhs_child = lhs_l.get_sliced_child(0);
auto rhs_child = rhs_l.get_sliced_child(0);
cudf::type_dispatcher(lhs_child.type(),
column_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
print_all_differences,
depth + 1);
}
};
template <bool check_exact_equality>
struct column_comparator_impl<struct_view, check_exact_equality> {
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
std::for_each(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + lhs.num_children(),
[&](auto i) {
cudf::type_dispatcher(lhs.child(i).type(),
column_comparator<check_exact_equality>{},
lhs.child(i),
rhs.child(i),
print_all_differences,
depth + 1);
});
}
};
template <bool check_exact_equality>
struct column_comparator {
template <typename T>
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth = 0)
{
// compare properties
cudf::type_dispatcher(lhs.type(), column_property_comparator<check_exact_equality>{}, lhs, rhs);
// compare values
column_comparator_impl<T, check_exact_equality> comparator{};
comparator(lhs, rhs, print_all_differences, depth);
}
};
} // namespace
/**
* @copydoc cudf::test::expect_column_properties_equal
*
*/
void expect_column_properties_equal(column_view const& lhs, column_view const& rhs)
{
cudf::type_dispatcher(lhs.type(), column_property_comparator<true>{}, lhs, rhs);
}
/**
* @copydoc cudf::test::expect_column_properties_equivalent
*
*/
void expect_column_properties_equivalent(column_view const& lhs, column_view const& rhs)
{
cudf::type_dispatcher(lhs.type(), column_property_comparator<false>{}, lhs, rhs);
}
/**
* @copydoc cudf::test::expect_columns_equal
*
*/
void expect_columns_equal(cudf::column_view const& lhs,
cudf::column_view const& rhs,
bool print_all_differences)
{
cudf::type_dispatcher(lhs.type(), column_comparator<true>{}, lhs, rhs, print_all_differences);
}
/**
* @copydoc cudf::test::expect_columns_equivalent
*
*/
void expect_columns_equivalent(cudf::column_view const& lhs,
cudf::column_view const& rhs,
bool print_all_differences)
{
cudf::type_dispatcher(lhs.type(), column_comparator<false>{}, lhs, rhs, print_all_differences);
}
/**
* @copydoc cudf::test::expect_equal_buffers
*
*/
void expect_equal_buffers(void const* lhs, void const* rhs, std::size_t size_bytes)
{
if (size_bytes > 0) {
EXPECT_NE(nullptr, lhs);
EXPECT_NE(nullptr, rhs);
}
auto typed_lhs = static_cast<char const*>(lhs);
auto typed_rhs = static_cast<char const*>(rhs);
EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes, typed_rhs));
}
/**
* @copydoc cudf::test::bitmask_to_host
*
*/
std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c)
{
if (c.nullable()) {
auto num_bitmasks = bitmask_allocation_size_bytes(c.size()) / sizeof(bitmask_type);
std::vector<bitmask_type> host_bitmask(num_bitmasks);
if (c.offset() == 0) {
CUDA_TRY(cudaMemcpy(host_bitmask.data(),
c.null_mask(),
num_bitmasks * sizeof(bitmask_type),
cudaMemcpyDeviceToHost));
} else {
auto mask = copy_bitmask(c.null_mask(), c.offset(), c.offset() + c.size());
CUDA_TRY(cudaMemcpy(host_bitmask.data(),
mask.data(),
num_bitmasks * sizeof(bitmask_type),
cudaMemcpyDeviceToHost));
}
return host_bitmask;
} else {
return std::vector<bitmask_type>{};
}
}
namespace {
template <typename T, typename std::enable_if_t<std::is_integral<T>::value>* = nullptr>
static auto numeric_to_string_precise(T value)
{
return std::to_string(value);
}
template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value>* = nullptr>
static auto numeric_to_string_precise(T value)
{
std::ostringstream o;
o << std::setprecision(std::numeric_limits<T>::max_digits10) << value;
return o.str();
}
static auto duration_suffix(cudf::duration_D) { return " days"; }
static auto duration_suffix(cudf::duration_s) { return " seconds"; }
static auto duration_suffix(cudf::duration_ms) { return " milliseconds"; }
static auto duration_suffix(cudf::duration_us) { return " microseconds"; }
static auto duration_suffix(cudf::duration_ns) { return " nanoseconds"; }
std::string get_nested_type_str(cudf::column_view const& view)
{
if (view.type().id() == cudf::type_id::LIST) {
lists_column_view lcv(view);
return cudf::jit::get_type_name(view.type()) + "<" + (get_nested_type_str(lcv.child())) + ">";
}
if (view.type().id() == cudf::type_id::STRUCT) {
std::ostringstream out;
out << cudf::jit::get_type_name(view.type()) + "<";
std::transform(view.child_begin(),
view.child_end(),
std::ostream_iterator<std::string>(out, ","),
[&out](auto const col) { return get_nested_type_str(col); });
out << ">";
return out.str();
}
return cudf::jit::get_type_name(view.type());
}
template <typename NestedColumnView>
std::string nested_offsets_to_string(NestedColumnView const& c, std::string const& delimiter = ", ")
{
column_view offsets = (c.parent()).child(NestedColumnView::offsets_column_index);
CUDF_EXPECTS(offsets.type().id() == type_id::INT32,
"Column does not appear to be an offsets column");
CUDF_EXPECTS(offsets.offset() == 0, "Offsets column has an internal offset!");
size_type output_size = c.size() + 1;
// the first offset value to normalize everything against
size_type first = cudf::detail::get_value<size_type>(offsets, c.offset(), 0);
rmm::device_vector<size_type> shifted_offsets(output_size);
// normalize the offset values for the column offset
size_type const* d_offsets = offsets.head<size_type>() + c.offset();
thrust::transform(
rmm::exec_policy(0)->on(0),
d_offsets,
d_offsets + output_size,
shifted_offsets.begin(),
[first] __device__(int32_t offset) { return static_cast<size_type>(offset - first); });
thrust::host_vector<size_type> h_shifted_offsets(shifted_offsets);
std::ostringstream buffer;
for (size_t idx = 0; idx < h_shifted_offsets.size(); idx++) {
buffer << h_shifted_offsets[idx];
if (idx < h_shifted_offsets.size() - 1) { buffer << delimiter; }
}
return buffer.str();
}
struct column_view_printer {
template <typename Element, typename std::enable_if_t<is_numeric<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx)
? numeric_to_string_precise(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return numeric_to_string_precise(el);
});
}
}
template <typename Element, typename std::enable_if_t<is_timestamp<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
//
// For timestamps, convert timestamp column to column of strings, then
// call string version
//
auto col_as_strings = cudf::strings::from_timestamps(col);
if (col_as_strings->size() == 0) { return; }
this->template operator()<cudf::string_view>(*col_as_strings, out, indent);
}
template <typename Element, typename std::enable_if_t<cudf::is_fixed_point<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
auto const h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&](auto idx) {
auto const d = static_cast<double>(h_data.first[idx]);
return std::to_string(d);
});
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::string_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
//
// Implementation for strings, call special to_host variant
//
auto h_data = cudf::test::to_host<std::string>(col);
out.resize(col.size());
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return h_data.second.empty() || bit_is_set(h_data.second.data(), idx)
? h_data.first[idx]
: std::string("NULL");
});
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::dictionary32>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
cudf::dictionary_column_view dictionary(col);
if (col.size() == 0) return;
std::vector<std::string> keys = to_strings(dictionary.keys());
std::vector<std::string> indices = to_strings({cudf::data_type{cudf::type_id::INT32},
dictionary.size(),
dictionary.indices().head<int32_t>(),
dictionary.null_mask(),
dictionary.null_count(),
dictionary.offset()});
out.insert(out.end(), keys.begin(), keys.end());
if (!indices.empty()) {
std::string first = "\x08 : " + indices.front(); // use : as delimiter
out.push_back(first); // between keys and indices
out.insert(out.end(), indices.begin() + 1, indices.end());
}
}
// Print the tick counts with the units
template <typename Element, typename std::enable_if_t<is_duration<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx)
? numeric_to_string_precise(h_data.first[idx].count()) +
duration_suffix(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return numeric_to_string_precise(el.count()) + duration_suffix(el);
});
}
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
lists_column_view lcv(col);
// propage slicing to the child if necessary
column_view child = lcv.get_sliced_child(0);
bool const is_sliced = lcv.offset() > 0 || child.offset() > 0;
std::string tmp =
get_nested_type_str(col) + (is_sliced ? "(sliced)" : "") + ":\n" + indent +
"Length : " + std::to_string(lcv.size()) + "\n" + indent +
"Offsets : " + (lcv.size() > 0 ? nested_offsets_to_string(lcv) : "") + "\n" +
(lcv.has_nulls() ? indent + "Null count: " + std::to_string(lcv.null_count()) + "\n" +
detail::to_string(bitmask_to_host(col), col.size(), indent) + "\n"
: "") +
indent + "Children :\n" +
(child.type().id() != type_id::LIST && child.has_nulls()
? indent + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n"
: "") +
(detail::to_string(child, ", ", indent + " ")) + "\n";
out.push_back(tmp);
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::struct_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
structs_column_view view{col};
std::ostringstream out_stream;
out_stream << get_nested_type_str(col) << ":\n"
<< indent << "Length : " << view.size() << ":\n";
if (view.has_nulls()) {
out_stream << indent << "Null count: " << view.null_count() << "\n"
<< detail::to_string(bitmask_to_host(col), col.size(), indent) << "\n";
}
std::transform(
view.child_begin(),
view.child_end(),
std::ostream_iterator<std::string>(out_stream, "\n"),
[&](auto child_column) { return detail::to_string(child_column, ", ", indent + " "); });
out.push_back(out_stream.str());
}
};
} // namespace
namespace detail {
/**
* @copydoc cudf::test::detail::to_strings
*
*/
std::vector<std::string> to_strings(cudf::column_view const& col, std::string const& indent)
{
std::vector<std::string> reply;
cudf::type_dispatcher(col.type(), column_view_printer{}, col, reply, indent);
return reply;
}
/**
* @copydoc cudf::test::detail::to_string(cudf::column_view, std::string, std::string)
*
* @param indent Indentation for all output
*/
std::string to_string(cudf::column_view const& col,
std::string const& delimiter,
std::string const& indent)
{
std::ostringstream buffer;
std::vector<std::string> h_data = to_strings(col, indent);
buffer << indent;
std::copy(h_data.begin(),
h_data.end() - (!h_data.empty()),
std::ostream_iterator<std::string>(buffer, delimiter.c_str()));
if (!h_data.empty()) buffer << h_data.back();
return buffer.str();
}
/**
* @copydoc cudf::test::detail::to_string(std::vector<bitmask_type>, size_type, std::string)
*
* @param indent Indentation for all output. See comment in `to_strings` for
* a detailed description.
*/
std::string to_string(std::vector<bitmask_type> const& null_mask,
size_type null_mask_size,
std::string const& indent)
{
std::ostringstream buffer;
buffer << indent;
for (int idx = null_mask_size - 1; idx >= 0; idx--) {
buffer << (cudf::bit_is_set(null_mask.data(), idx) ? "1" : "0");
}
return buffer.str();
}
} // namespace detail
/**
* @copydoc cudf::test::to_strings
*
*/
std::vector<std::string> to_strings(cudf::column_view const& col)
{
return detail::to_strings(col);
}
/**
* @copydoc cudf::test::to_string(cudf::column_view, std::string)
*
*/
std::string to_string(cudf::column_view const& col, std::string const& delimiter)
{
return detail::to_string(col, delimiter);
}
/**
* @copydoc cudf::test::to_string(std::vector<bitmask_type>, size_type)
*
*/
std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size)
{
return detail::to_string(null_mask, null_mask_size);
}
/**
* @copydoc cudf::test::print
*
*/
void print(cudf::column_view const& col, std::ostream& os, std::string const& delimiter)
{
os << to_string(col, delimiter) << std::endl;
}
/**
* @copydoc cudf::test::validate_host_masks
*
*/
bool validate_host_masks(std::vector<bitmask_type> const& expected_mask,
std::vector<bitmask_type> const& got_mask,
size_type number_of_elements)
{
return std::all_of(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(number_of_elements),
[&expected_mask, &got_mask](auto index) {
return cudf::bit_is_set(expected_mask.data(), index) ==
cudf::bit_is_set(got_mask.data(), index);
});
}
} // namespace test
} // namespace cudf
|
a91ce6a5131c081891a1467c1348cbe2df043aab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void reduceUnrolling16 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 16 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 16;
// unrolling 16
if (idx + 15 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
int c1 = g_idata[idx + 8 * blockDim.x];
int c2 = g_idata[idx + 9 * blockDim.x];
int c3 = g_idata[idx + 10 * blockDim.x];
int c4 = g_idata[idx + 11 * blockDim.x];
int d1 = g_idata[idx + 12 * blockDim.x];
int d2 = g_idata[idx + 13 * blockDim.x];
int d3 = g_idata[idx + 14 * blockDim.x];
int d4 = g_idata[idx + 15 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4 + c1 + c2 + c3 + c4
+ d1 + d2 + d3 + d4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
} | a91ce6a5131c081891a1467c1348cbe2df043aab.cu | #include "includes.h"
__global__ void reduceUnrolling16 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 16 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 16;
// unrolling 16
if (idx + 15 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
int c1 = g_idata[idx + 8 * blockDim.x];
int c2 = g_idata[idx + 9 * blockDim.x];
int c3 = g_idata[idx + 10 * blockDim.x];
int c4 = g_idata[idx + 11 * blockDim.x];
int d1 = g_idata[idx + 12 * blockDim.x];
int d2 = g_idata[idx + 13 * blockDim.x];
int d3 = g_idata[idx + 14 * blockDim.x];
int d4 = g_idata[idx + 15 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4 + c1 + c2 + c3 + c4
+ d1 + d2 + d3 + d4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
} |
f4797e79a70e196d175cdafded1fac423173e2cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Babak Poursartip
// 09/28/2020
// warp divergence
#include <iostream>
__global__ void code_without_divergence() {
int gid = blockDim.x * blockIdx.x + threadIdx.x;
float a = 0, b = 0;
int warp_id = gid / 32;
if (warp_id % 2 == 0) {
a = 100.0;
b = 50.0;
} else {
a = 200.0;
b = 72.0;
}
}
__global__ void code_with_divergence() {
int gid = blockDim.x * blockIdx.x + threadIdx.x;
float a = 0, b = 0;
if (gid % 2 == 0) {
a = 100.0;
b = 50.0;
} else {
a = 200.0;
b = 72.0;
}
}
int main() {
printf(" starts ...\n");
int size = 1 << 22;
printf("size: %d \n", size);
dim3 block(128);
dim3 grid((size + block.x - 1) / block.x);
hipLaunchKernelGGL(( code_without_divergence), dim3(grid), dim3(block), 0, 0, );
hipDeviceSynchronize();
hipLaunchKernelGGL(( code_with_divergence), dim3(grid), dim3(block), 0, 0, );
hipDeviceSynchronize();
hipDeviceSynchronize();
printf(" finished.\n");
return 0;
} | f4797e79a70e196d175cdafded1fac423173e2cd.cu |
// Babak Poursartip
// 09/28/2020
// warp divergence
#include <iostream>
__global__ void code_without_divergence() {
int gid = blockDim.x * blockIdx.x + threadIdx.x;
float a = 0, b = 0;
int warp_id = gid / 32;
if (warp_id % 2 == 0) {
a = 100.0;
b = 50.0;
} else {
a = 200.0;
b = 72.0;
}
}
__global__ void code_with_divergence() {
int gid = blockDim.x * blockIdx.x + threadIdx.x;
float a = 0, b = 0;
if (gid % 2 == 0) {
a = 100.0;
b = 50.0;
} else {
a = 200.0;
b = 72.0;
}
}
int main() {
printf(" starts ...\n");
int size = 1 << 22;
printf("size: %d \n", size);
dim3 block(128);
dim3 grid((size + block.x - 1) / block.x);
code_without_divergence<<<grid, block>>>();
cudaDeviceSynchronize();
code_with_divergence<<<grid, block>>>();
cudaDeviceSynchronize();
cudaDeviceSynchronize();
printf(" finished.\n");
return 0;
} |
5f16ed03cd3bf5fb435096f68f2bba5060495297.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <THH/THHAtomics.cuh>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data,
const int height, const int width,
scalar_t y, scalar_t x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
// do bilinear interpolation
scalar_t lt = bottom_data[y_low * width + x_low];
scalar_t rt = bottom_data[y_low * width + x_high];
scalar_t lb = bottom_data[y_high * width + x_low];
scalar_t rb = bottom_data[y_high * width + x_high];
scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb);
return val;
}
template <typename scalar_t>
__global__ void ROIAlignForward(const int nthreads, const scalar_t *bottom_data,
const scalar_t *bottom_rois,
const scalar_t spatial_scale,
const int sample_num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
scalar_t *top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale;
scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale;
scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale;
scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale;
// Force malformed ROIs to be 1x1
scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.);
scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.);
scalar_t bin_size_h = roi_height / pooled_height;
scalar_t bin_size_w = roi_width / pooled_width;
const scalar_t *offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
int sample_num_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int sample_num_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
scalar_t output_val = 0;
for (int iy = 0; iy < sample_num_h; iy++) {
const scalar_t y = roi_start_h + ph * bin_size_h +
(scalar_t)(iy + scalar_t(.5f)) * bin_size_h /
(scalar_t)(sample_num_h);
for (int ix = 0; ix < sample_num_w; ix++) {
const scalar_t x = roi_start_w + pw * bin_size_w +
(scalar_t)(ix + scalar_t(.5f)) * bin_size_w /
(scalar_t)(sample_num_w);
scalar_t val = bilinear_interpolate<scalar_t>(offset_bottom_data,
height, width, y, x);
output_val += val;
}
}
output_val /= (sample_num_h * sample_num_w);
top_data[index] = output_val;
}
}
int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
at::Tensor output) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
features.scalar_type(), "ROIAlignLaucherForward", ([&] {
const scalar_t *bottom_data = features.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *top_data = output.data<scalar_t>();
hipLaunchKernelGGL(( ROIAlignForward<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, bottom_data, rois_data, scalar_t(spatial_scale),
sample_num, channels, height, width, pooled_height,
pooled_width, top_data);
}));
THCudaCheck(hipGetLastError());
return 1;
}
template <typename scalar_t>
__device__ void bilinear_interpolate_gradient(const int height, const int width,
scalar_t y, scalar_t x,
scalar_t &w1, scalar_t &w2,
scalar_t &w3, scalar_t &w4,
int &x_low, int &x_high,
int &y_low, int &y_high) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename scalar_t>
__global__ void ROIAlignBackward(
const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois,
const scalar_t spatial_scale, const int sample_num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, scalar_t *bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale;
scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale;
scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale;
scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale;
// Force malformed ROIs to be 1x1
scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.);
scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.);
scalar_t bin_size_h = roi_height / pooled_height;
scalar_t bin_size_w = roi_width / pooled_width;
scalar_t *offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int offset_top = (n * channels + c) * pooled_height * pooled_width +
ph * pooled_width + pw;
scalar_t offset_top_diff = top_diff[offset_top];
int sample_num_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int sample_num_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
const scalar_t count = (scalar_t)(sample_num_h * sample_num_w);
for (int iy = 0; iy < sample_num_h; iy++) {
const scalar_t y =
roi_start_h + ph * bin_size_h +
(scalar_t)(iy + .5f) * bin_size_h / (scalar_t)(sample_num_h);
for (int ix = 0; ix < sample_num_w; ix++) {
const scalar_t x =
roi_start_w + pw * bin_size_w +
(scalar_t)(ix + .5f) * bin_size_w / (scalar_t)(sample_num_w);
scalar_t w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient<scalar_t>(
height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
scalar_t g1 = offset_top_diff * w1 / count;
scalar_t g2 = offset_top_diff * w2 / count;
scalar_t g3 = offset_top_diff * w3 / count;
scalar_t g4 = offset_top_diff * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(offset_bottom_diff + y_low * width + x_low, g1);
atomicAdd(offset_bottom_diff + y_low * width + x_high, g2);
atomicAdd(offset_bottom_diff + y_high * width + x_low, g3);
atomicAdd(offset_bottom_diff + y_high * width + x_high, g4);
}
}
}
}
}
int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
at::Tensor bottom_grad) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.scalar_type(), "ROIAlignLaucherBackward", ([&] {
const scalar_t *top_diff = top_grad.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *bottom_diff = bottom_grad.data<scalar_t>();
if (sizeof(scalar_t) == sizeof(double)) {
fprintf(stderr, "double is not supported\n");
exit(-1);
}
hipLaunchKernelGGL(( ROIAlignBackward<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, top_diff, rois_data, spatial_scale, sample_num,
channels, height, width, pooled_height, pooled_width,
bottom_diff);
}));
THCudaCheck(hipGetLastError());
return 1;
}
| 5f16ed03cd3bf5fb435096f68f2bba5060495297.cu | #include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data,
const int height, const int width,
scalar_t y, scalar_t x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
// do bilinear interpolation
scalar_t lt = bottom_data[y_low * width + x_low];
scalar_t rt = bottom_data[y_low * width + x_high];
scalar_t lb = bottom_data[y_high * width + x_low];
scalar_t rb = bottom_data[y_high * width + x_high];
scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb);
return val;
}
template <typename scalar_t>
__global__ void ROIAlignForward(const int nthreads, const scalar_t *bottom_data,
const scalar_t *bottom_rois,
const scalar_t spatial_scale,
const int sample_num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
scalar_t *top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale;
scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale;
scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale;
scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale;
// Force malformed ROIs to be 1x1
scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.);
scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.);
scalar_t bin_size_h = roi_height / pooled_height;
scalar_t bin_size_w = roi_width / pooled_width;
const scalar_t *offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
int sample_num_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int sample_num_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
scalar_t output_val = 0;
for (int iy = 0; iy < sample_num_h; iy++) {
const scalar_t y = roi_start_h + ph * bin_size_h +
(scalar_t)(iy + scalar_t(.5f)) * bin_size_h /
(scalar_t)(sample_num_h);
for (int ix = 0; ix < sample_num_w; ix++) {
const scalar_t x = roi_start_w + pw * bin_size_w +
(scalar_t)(ix + scalar_t(.5f)) * bin_size_w /
(scalar_t)(sample_num_w);
scalar_t val = bilinear_interpolate<scalar_t>(offset_bottom_data,
height, width, y, x);
output_val += val;
}
}
output_val /= (sample_num_h * sample_num_w);
top_data[index] = output_val;
}
}
int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
at::Tensor output) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
features.scalar_type(), "ROIAlignLaucherForward", ([&] {
const scalar_t *bottom_data = features.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *top_data = output.data<scalar_t>();
ROIAlignForward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, bottom_data, rois_data, scalar_t(spatial_scale),
sample_num, channels, height, width, pooled_height,
pooled_width, top_data);
}));
THCudaCheck(cudaGetLastError());
return 1;
}
template <typename scalar_t>
__device__ void bilinear_interpolate_gradient(const int height, const int width,
scalar_t y, scalar_t x,
scalar_t &w1, scalar_t &w2,
scalar_t &w3, scalar_t &w4,
int &x_low, int &x_high,
int &y_low, int &y_high) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename scalar_t>
__global__ void ROIAlignBackward(
const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois,
const scalar_t spatial_scale, const int sample_num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, scalar_t *bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale;
scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale;
scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale;
scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale;
// Force malformed ROIs to be 1x1
scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.);
scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.);
scalar_t bin_size_h = roi_height / pooled_height;
scalar_t bin_size_w = roi_width / pooled_width;
scalar_t *offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int offset_top = (n * channels + c) * pooled_height * pooled_width +
ph * pooled_width + pw;
scalar_t offset_top_diff = top_diff[offset_top];
int sample_num_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int sample_num_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
const scalar_t count = (scalar_t)(sample_num_h * sample_num_w);
for (int iy = 0; iy < sample_num_h; iy++) {
const scalar_t y =
roi_start_h + ph * bin_size_h +
(scalar_t)(iy + .5f) * bin_size_h / (scalar_t)(sample_num_h);
for (int ix = 0; ix < sample_num_w; ix++) {
const scalar_t x =
roi_start_w + pw * bin_size_w +
(scalar_t)(ix + .5f) * bin_size_w / (scalar_t)(sample_num_w);
scalar_t w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient<scalar_t>(
height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
scalar_t g1 = offset_top_diff * w1 / count;
scalar_t g2 = offset_top_diff * w2 / count;
scalar_t g3 = offset_top_diff * w3 / count;
scalar_t g4 = offset_top_diff * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(offset_bottom_diff + y_low * width + x_low, g1);
atomicAdd(offset_bottom_diff + y_low * width + x_high, g2);
atomicAdd(offset_bottom_diff + y_high * width + x_low, g3);
atomicAdd(offset_bottom_diff + y_high * width + x_high, g4);
}
}
}
}
}
int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
at::Tensor bottom_grad) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.scalar_type(), "ROIAlignLaucherBackward", ([&] {
const scalar_t *top_diff = top_grad.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *bottom_diff = bottom_grad.data<scalar_t>();
if (sizeof(scalar_t) == sizeof(double)) {
fprintf(stderr, "double is not supported\n");
exit(-1);
}
ROIAlignBackward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, top_diff, rois_data, spatial_scale, sample_num,
channels, height, width, pooled_height, pooled_width,
bottom_diff);
}));
THCudaCheck(cudaGetLastError());
return 1;
}
|
6077b47ac59f98674f0552f8c55c45a3db802eec.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <omp.h>
/*
* An example of using OpenMP to parallelize the creation of CUDA work in
* multiple streams. This example using n_streams OpenMP threads to launch 4
* kernels in each stream. Note the new pragma introduced, #pragma omp parallel.
*/
#define N 300000
#define NSTREAM 4
__global__ void kernel_1()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int main(int argc, char **argv)
{
int n_streams = NSTREAM;
int isize = 1;
int iblock = 1;
int bigcase = 0;
// get argument from command line
if (argc > 1) n_streams = atoi(argv[1]);
if (argc > 2) bigcase = atoi(argv[2]);
float elapsed_time;
// set up max connectioin
char *iname = "CUDA_DEVICE_MAX_CONNECTIONS";
SET_ENV(iname, "32", 1);
char *ivalue = GET_ENV(iname);
//printf ("%s = %s\n", iname, ivalue);
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams);
CHECK(hipSetDevice(dev));
// check if device support hyper-q
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n");
printf("> CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf("> CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Compute Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// Allocate and initialize an array of stream handles
hipStream_t *streams = (hipStream_t *) malloc(n_streams * sizeof(hipStream_t));
for (int i = 0; i < n_streams; i++)
{
CHECK(hipStreamCreate(&(streams[i])));
}
// run kernel with more threads
if (bigcase == 1)
{
iblock = 512;
isize = 1 << 12;
}
// set up execution configuration
dim3 block(iblock);
dim3 grid(isize / iblock);
//printf("> grid %d block %d\n", grid.x, block.x);
// creat events
hipEvent_t start, stop;
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
// record start event
CHECK(hipEventRecord(start, 0));
// dispatch job with depth first ordering using OpenMP
omp_set_num_threads(n_streams);
#pragma omp parallel
{
int i = omp_get_thread_num();
hipLaunchKernelGGL(( kernel_1), dim3(grid), dim3(block), 0, streams[i], );
hipLaunchKernelGGL(( kernel_2), dim3(grid), dim3(block), 0, streams[i], );
hipLaunchKernelGGL(( kernel_3), dim3(grid), dim3(block), 0, streams[i], );
hipLaunchKernelGGL(( kernel_4), dim3(grid), dim3(block), 0, streams[i], );
}
// record stop event
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
// calculate elapsed time
CHECK(hipEventElapsedTime(&elapsed_time, start, stop));
printf("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f);
// release all stream
for (int i = 0; i < n_streams; i++)
{
CHECK(hipStreamDestroy(streams[i]));
}
free(streams);
// destroy events
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
// reset device
CHECK(hipDeviceReset());
return 0;
}
| 6077b47ac59f98674f0552f8c55c45a3db802eec.cu | #include "../common/common.h"
#include <stdio.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <omp.h>
/*
* An example of using OpenMP to parallelize the creation of CUDA work in
* multiple streams. This example using n_streams OpenMP threads to launch 4
* kernels in each stream. Note the new pragma introduced, #pragma omp parallel.
*/
#define N 300000
#define NSTREAM 4
__global__ void kernel_1()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int main(int argc, char **argv)
{
int n_streams = NSTREAM;
int isize = 1;
int iblock = 1;
int bigcase = 0;
// get argument from command line
if (argc > 1) n_streams = atoi(argv[1]);
if (argc > 2) bigcase = atoi(argv[2]);
float elapsed_time;
// set up max connectioin
char *iname = "CUDA_DEVICE_MAX_CONNECTIONS";
SET_ENV(iname, "32", 1);
char *ivalue = GET_ENV(iname);
//printf ("%s = %s\n", iname, ivalue);
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams);
CHECK(cudaSetDevice(dev));
// check if device support hyper-q
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n");
printf("> CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf("> CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Compute Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// Allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t *) malloc(n_streams * sizeof(cudaStream_t));
for (int i = 0; i < n_streams; i++)
{
CHECK(cudaStreamCreate(&(streams[i])));
}
// run kernel with more threads
if (bigcase == 1)
{
iblock = 512;
isize = 1 << 12;
}
// set up execution configuration
dim3 block(iblock);
dim3 grid(isize / iblock);
//printf("> grid %d block %d\n", grid.x, block.x);
// creat events
cudaEvent_t start, stop;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
// record start event
CHECK(cudaEventRecord(start, 0));
// dispatch job with depth first ordering using OpenMP
omp_set_num_threads(n_streams);
#pragma omp parallel
{
int i = omp_get_thread_num();
kernel_1<<<grid, block, 0, streams[i]>>>();
kernel_2<<<grid, block, 0, streams[i]>>>();
kernel_3<<<grid, block, 0, streams[i]>>>();
kernel_4<<<grid, block, 0, streams[i]>>>();
}
// record stop event
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
// calculate elapsed time
CHECK(cudaEventElapsedTime(&elapsed_time, start, stop));
printf("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f);
// release all stream
for (int i = 0; i < n_streams; i++)
{
CHECK(cudaStreamDestroy(streams[i]));
}
free(streams);
// destroy events
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
// reset device
CHECK(cudaDeviceReset());
return 0;
}
|
01be6fa286b7fbaf6e7041b27152a59c002b2258.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <chrono>
#include "cuda_utils.h"
#include "logging.h"
#include "common.hpp"
#include "utils.h"
#include "calibrator.h"
#include "yolov5_lib.h"
using namespace cv;
#define USE_FP16 // comment out this if want to use FP32
#define DEVICE 0 // GPU id
#define NMS_THRESH 0.4
#define CONF_THRESH 0.5
#define BATCH_SIZE 1
// stuff we know about the network and the input/output blobs
static const int INPUT_H = Yolo::INPUT_H;
static const int INPUT_W = Yolo::INPUT_W;
static const int CLASS_NUM = Yolo::CLASS_NUM;
static const int OUTPUT_SIZE = Yolo::MAX_OUTPUT_BBOX_COUNT * sizeof(Yolo::Detection) / sizeof(float) + 1; // we assume the yololayer outputs no more than MAX_OUTPUT_BBOX_COUNT boxes that conf >= 0.1
const char* INPUT_BLOB_NAME = "data";
const char* OUTPUT_BLOB_NAME = "prob";
static Logger gLogger;
typedef struct{
float *data;
float *prob;
IRuntime *runtime;
ICudaEngine *engine;
IExecutionContext *exe_context;
void* buffers[2];
hipStream_t cuda_stream;
int inputIndex;
int outputIndex;
char result_json_str[16384];
}Yolov5TRTContext;
static void doInference(IExecutionContext& context, hipStream_t& stream, void **buffers, float* input, float* output, int batchSize) {
// DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
CUDA_CHECK(hipMemcpyAsync(buffers[0], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof(float), hipMemcpyHostToDevice, stream));
context.enqueue(batchSize, buffers, stream, nullptr);
CUDA_CHECK(hipMemcpyAsync(output, buffers[1], batchSize * OUTPUT_SIZE * sizeof(float), hipMemcpyDeviceToHost, stream));
hipStreamSynchronize(stream);
}
void * yolov5_trt_create(const char * engine_name)
{
size_t size = 0;
char *trtModelStream = NULL;
Yolov5TRTContext * trt_ctx = NULL;
trt_ctx = new Yolov5TRTContext();
std::ifstream file(engine_name, std::ios::binary);
printf("yolov5_trt_create ... \n");
if (file.good()) {
file.seekg(0, file.end);
size = file.tellg();
file.seekg(0, file.beg);
trtModelStream = new char[size];
assert(trtModelStream);
file.read(trtModelStream, size);
file.close();
}else
return NULL;
trt_ctx->data = new float[BATCH_SIZE * 3 * INPUT_H * INPUT_W];
trt_ctx->prob = new float[BATCH_SIZE * OUTPUT_SIZE];
trt_ctx->runtime = createInferRuntime(gLogger);
assert(trt_ctx->runtime != nullptr);
printf("yolov5_trt_create cuda engine... \n");
trt_ctx->engine = trt_ctx->runtime->deserializeCudaEngine(trtModelStream, size);
assert(trt_ctx->engine != nullptr);
trt_ctx->exe_context = trt_ctx->engine->createExecutionContext();
delete[] trtModelStream;
assert(trt_ctx->engine->getNbBindings() == 2);
// In order to bind the buffers, we need to know the names of the input and output tensors.
// Note that indices are guaranteed to be less than IEngine::getNbBindings()
trt_ctx->inputIndex = trt_ctx->engine->getBindingIndex(INPUT_BLOB_NAME);
trt_ctx->outputIndex = trt_ctx->engine->getBindingIndex(OUTPUT_BLOB_NAME);
assert(trt_ctx->inputIndex == 0);
assert(trt_ctx->outputIndex == 1);
// Create GPU buffers on device
printf("yolov5_trt_create buffer ... \n");
CUDA_CHECK(hipMalloc(&trt_ctx->buffers[trt_ctx->inputIndex], BATCH_SIZE * 3 * INPUT_H * INPUT_W * sizeof(float)));
CUDA_CHECK(hipMalloc(&trt_ctx->buffers[trt_ctx->outputIndex], BATCH_SIZE * OUTPUT_SIZE * sizeof(float)));
// Create stream
printf("yolov5_trt_create stream ... \n");
CUDA_CHECK(hipStreamCreate(&trt_ctx->cuda_stream));
printf("yolov5_trt_create done ... \n");
return (void *)trt_ctx;
}
const char * yolov5_trt_detect(void *h, cv::Mat &img, float threshold)
{
Yolov5TRTContext *trt_ctx;
int i;
int delay_preprocess;
int delay_infer;
trt_ctx = (Yolov5TRTContext *)h;
trt_ctx->result_json_str[0] = 0;
if (img.empty()) return trt_ctx->result_json_str;
auto start0 = std::chrono::system_clock::now();
//printf("yolov5_trt_detect start preprocess img \n");
cv::Mat pr_img = preprocess_img(img, INPUT_H, INPUT_W);
//printf("yolov5_trt_detect start convert img to float\n");
// letterbox BGR to RGB
i = 0;
for (int row = 0; row < INPUT_H; ++row) {
uchar* uc_pixel = pr_img.data + row * pr_img.step;
for (int col = 0; col < INPUT_W; ++col) {
trt_ctx->data[i] = (float)uc_pixel[2] / 255.0;
trt_ctx->data[i + INPUT_H * INPUT_W] = (float)uc_pixel[1] / 255.0;
trt_ctx->data[i + 2 * INPUT_H * INPUT_W] = (float)uc_pixel[0] / 255.0;
uc_pixel += 3;
++i;
}
}
auto end0 = std::chrono::system_clock::now();
delay_preprocess = std::chrono::duration_cast<std::chrono::milliseconds>(end0 - start0).count();
// Run inference
//printf("yolov5_trt_detect start do inference\n");
auto start = std::chrono::system_clock::now();
doInference(*trt_ctx->exe_context, trt_ctx->cuda_stream, trt_ctx->buffers, trt_ctx->data, trt_ctx->prob, BATCH_SIZE);
auto end = std::chrono::system_clock::now();
delay_infer = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
std::cout <<"delay_proress:" << delay_preprocess << "ms, " << "delay_infer:" << delay_infer << "ms" << std::endl;
//printf("yolov5_trt_detect start do process infer result \n");
int fcount = 1;
int str_len;
std::vector<std::vector<Yolo::Detection>> batch_res(1);
auto& res = batch_res[0];
nms(res, &trt_ctx->prob[0], threshold, NMS_THRESH);
sprintf(trt_ctx->result_json_str,
"{\"delay_preprocess\": %d,"
"\"delay_infer\": %d,"
"\"num_det\":%d, \"objects\":[", delay_preprocess, delay_infer, (int) res.size());
str_len = strlen(trt_ctx->result_json_str);
i = 0;
for(i = 0 ; i < res.size(); i++){
int x1, y1, x2, y2;
int class_id;
cv::Rect r = get_rect(img, res[i].bbox);
x1 = r.x;
y1 = r.y;
x2 = r.x + r.width;
y2 = r.y + r.height;
class_id = (int)res[i].class_id;
if (0 == i){
sprintf(trt_ctx->result_json_str + str_len, "(%d,%d,%d,%d,%d)", class_id, x1, y1, x2, y2);
}else {
sprintf(trt_ctx->result_json_str + str_len, ",(%d,%d,%d,%d,%d)", class_id, x1, y1, x2, y2);
}
str_len = strlen(trt_ctx->result_json_str);
if (str_len >= 16300)
break;
}
sprintf(trt_ctx->result_json_str + str_len, "]}");
return trt_ctx->result_json_str;
}
void yolov5_trt_destroy(void *h)
{
Yolov5TRTContext *trt_ctx;
trt_ctx = (Yolov5TRTContext *)h;
// Release stream and buffers
hipStreamDestroy(trt_ctx->cuda_stream);
CUDA_CHECK(hipFree(trt_ctx->buffers[trt_ctx->inputIndex]));
CUDA_CHECK(hipFree(trt_ctx->buffers[trt_ctx->outputIndex]));
// Destroy the engine
trt_ctx->exe_context->destroy();
trt_ctx->engine->destroy();
trt_ctx->runtime->destroy();
delete trt_ctx->data;
delete trt_ctx->prob;
delete trt_ctx;
} | 01be6fa286b7fbaf6e7041b27152a59c002b2258.cu | #include <iostream>
#include <chrono>
#include "cuda_utils.h"
#include "logging.h"
#include "common.hpp"
#include "utils.h"
#include "calibrator.h"
#include "yolov5_lib.h"
using namespace cv;
#define USE_FP16 // comment out this if want to use FP32
#define DEVICE 0 // GPU id
#define NMS_THRESH 0.4
#define CONF_THRESH 0.5
#define BATCH_SIZE 1
// stuff we know about the network and the input/output blobs
static const int INPUT_H = Yolo::INPUT_H;
static const int INPUT_W = Yolo::INPUT_W;
static const int CLASS_NUM = Yolo::CLASS_NUM;
static const int OUTPUT_SIZE = Yolo::MAX_OUTPUT_BBOX_COUNT * sizeof(Yolo::Detection) / sizeof(float) + 1; // we assume the yololayer outputs no more than MAX_OUTPUT_BBOX_COUNT boxes that conf >= 0.1
const char* INPUT_BLOB_NAME = "data";
const char* OUTPUT_BLOB_NAME = "prob";
static Logger gLogger;
typedef struct{
float *data;
float *prob;
IRuntime *runtime;
ICudaEngine *engine;
IExecutionContext *exe_context;
void* buffers[2];
cudaStream_t cuda_stream;
int inputIndex;
int outputIndex;
char result_json_str[16384];
}Yolov5TRTContext;
static void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* input, float* output, int batchSize) {
// DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
CUDA_CHECK(cudaMemcpyAsync(buffers[0], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
context.enqueue(batchSize, buffers, stream, nullptr);
CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
}
void * yolov5_trt_create(const char * engine_name)
{
size_t size = 0;
char *trtModelStream = NULL;
Yolov5TRTContext * trt_ctx = NULL;
trt_ctx = new Yolov5TRTContext();
std::ifstream file(engine_name, std::ios::binary);
printf("yolov5_trt_create ... \n");
if (file.good()) {
file.seekg(0, file.end);
size = file.tellg();
file.seekg(0, file.beg);
trtModelStream = new char[size];
assert(trtModelStream);
file.read(trtModelStream, size);
file.close();
}else
return NULL;
trt_ctx->data = new float[BATCH_SIZE * 3 * INPUT_H * INPUT_W];
trt_ctx->prob = new float[BATCH_SIZE * OUTPUT_SIZE];
trt_ctx->runtime = createInferRuntime(gLogger);
assert(trt_ctx->runtime != nullptr);
printf("yolov5_trt_create cuda engine... \n");
trt_ctx->engine = trt_ctx->runtime->deserializeCudaEngine(trtModelStream, size);
assert(trt_ctx->engine != nullptr);
trt_ctx->exe_context = trt_ctx->engine->createExecutionContext();
delete[] trtModelStream;
assert(trt_ctx->engine->getNbBindings() == 2);
// In order to bind the buffers, we need to know the names of the input and output tensors.
// Note that indices are guaranteed to be less than IEngine::getNbBindings()
trt_ctx->inputIndex = trt_ctx->engine->getBindingIndex(INPUT_BLOB_NAME);
trt_ctx->outputIndex = trt_ctx->engine->getBindingIndex(OUTPUT_BLOB_NAME);
assert(trt_ctx->inputIndex == 0);
assert(trt_ctx->outputIndex == 1);
// Create GPU buffers on device
printf("yolov5_trt_create buffer ... \n");
CUDA_CHECK(cudaMalloc(&trt_ctx->buffers[trt_ctx->inputIndex], BATCH_SIZE * 3 * INPUT_H * INPUT_W * sizeof(float)));
CUDA_CHECK(cudaMalloc(&trt_ctx->buffers[trt_ctx->outputIndex], BATCH_SIZE * OUTPUT_SIZE * sizeof(float)));
// Create stream
printf("yolov5_trt_create stream ... \n");
CUDA_CHECK(cudaStreamCreate(&trt_ctx->cuda_stream));
printf("yolov5_trt_create done ... \n");
return (void *)trt_ctx;
}
const char * yolov5_trt_detect(void *h, cv::Mat &img, float threshold)
{
Yolov5TRTContext *trt_ctx;
int i;
int delay_preprocess;
int delay_infer;
trt_ctx = (Yolov5TRTContext *)h;
trt_ctx->result_json_str[0] = 0;
if (img.empty()) return trt_ctx->result_json_str;
auto start0 = std::chrono::system_clock::now();
//printf("yolov5_trt_detect start preprocess img \n");
cv::Mat pr_img = preprocess_img(img, INPUT_H, INPUT_W);
//printf("yolov5_trt_detect start convert img to float\n");
// letterbox BGR to RGB
i = 0;
for (int row = 0; row < INPUT_H; ++row) {
uchar* uc_pixel = pr_img.data + row * pr_img.step;
for (int col = 0; col < INPUT_W; ++col) {
trt_ctx->data[i] = (float)uc_pixel[2] / 255.0;
trt_ctx->data[i + INPUT_H * INPUT_W] = (float)uc_pixel[1] / 255.0;
trt_ctx->data[i + 2 * INPUT_H * INPUT_W] = (float)uc_pixel[0] / 255.0;
uc_pixel += 3;
++i;
}
}
auto end0 = std::chrono::system_clock::now();
delay_preprocess = std::chrono::duration_cast<std::chrono::milliseconds>(end0 - start0).count();
// Run inference
//printf("yolov5_trt_detect start do inference\n");
auto start = std::chrono::system_clock::now();
doInference(*trt_ctx->exe_context, trt_ctx->cuda_stream, trt_ctx->buffers, trt_ctx->data, trt_ctx->prob, BATCH_SIZE);
auto end = std::chrono::system_clock::now();
delay_infer = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
std::cout <<"delay_proress:" << delay_preprocess << "ms, " << "delay_infer:" << delay_infer << "ms" << std::endl;
//printf("yolov5_trt_detect start do process infer result \n");
int fcount = 1;
int str_len;
std::vector<std::vector<Yolo::Detection>> batch_res(1);
auto& res = batch_res[0];
nms(res, &trt_ctx->prob[0], threshold, NMS_THRESH);
sprintf(trt_ctx->result_json_str,
"{\"delay_preprocess\": %d,"
"\"delay_infer\": %d,"
"\"num_det\":%d, \"objects\":[", delay_preprocess, delay_infer, (int) res.size());
str_len = strlen(trt_ctx->result_json_str);
i = 0;
for(i = 0 ; i < res.size(); i++){
int x1, y1, x2, y2;
int class_id;
cv::Rect r = get_rect(img, res[i].bbox);
x1 = r.x;
y1 = r.y;
x2 = r.x + r.width;
y2 = r.y + r.height;
class_id = (int)res[i].class_id;
if (0 == i){
sprintf(trt_ctx->result_json_str + str_len, "(%d,%d,%d,%d,%d)", class_id, x1, y1, x2, y2);
}else {
sprintf(trt_ctx->result_json_str + str_len, ",(%d,%d,%d,%d,%d)", class_id, x1, y1, x2, y2);
}
str_len = strlen(trt_ctx->result_json_str);
if (str_len >= 16300)
break;
}
sprintf(trt_ctx->result_json_str + str_len, "]}");
return trt_ctx->result_json_str;
}
void yolov5_trt_destroy(void *h)
{
Yolov5TRTContext *trt_ctx;
trt_ctx = (Yolov5TRTContext *)h;
// Release stream and buffers
cudaStreamDestroy(trt_ctx->cuda_stream);
CUDA_CHECK(cudaFree(trt_ctx->buffers[trt_ctx->inputIndex]));
CUDA_CHECK(cudaFree(trt_ctx->buffers[trt_ctx->outputIndex]));
// Destroy the engine
trt_ctx->exe_context->destroy();
trt_ctx->engine->destroy();
trt_ctx->runtime->destroy();
delete trt_ctx->data;
delete trt_ctx->prob;
delete trt_ctx;
} |
9351381577e5f3ccd8e7bb9f147e7b4961692dfa.hip | // !!! This is a file automatically generated by hipify!!!
#include "sha256.cu"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <fcntl.h>
#include <sys/io.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", msg, \
hipGetErrorString(__err), __FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
int num_passwords = 19922147;
int password_length = 30;
char* host_passwords;
int* host_password_lengths;
int* host_start_indexes;
char* device_passwords;
int* device_password_lengths;
int* device_start_indexes;
unsigned char* device_targets;
__device__ int device_num_targets;
__device__ int device_num_passwords;
__device__ size_t device_password_file_size;
__device__ void calculate_hash(unsigned char* pass_cleartext,
unsigned char* hash, int length) {
SHA256_CTX ctx;
sha256_init(&ctx);
sha256_update(&ctx, pass_cleartext, length);
sha256_final(&ctx, hash);
}
__global__ void compare_hashes(char* hashes, int* lengths, int* start_indexes,
unsigned char* targets) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
int numThreads = blockDim.x * gridDim.x;
int num_to_calculate = device_num_passwords / numThreads;
num_to_calculate += 1;
if (id == 1) {
printf("Device num: %d increment: %d \n", device_num_passwords,
numThreads);
}
bool ran = false;
int test = 0;
int i;
for (i = id; i < device_num_passwords; i += numThreads) {
int length = lengths[i];
int start = start_indexes[i];
if ((start + length) < device_password_file_size &&
i < device_num_passwords) {
if (length > 15306) {
printf("Thread id: %d Length at %d is over much %d \n", id, i,
length);
length = 5;
}
unsigned char pass_cleartext[15306];
memcpy(pass_cleartext, &hashes[start], length);
pass_cleartext[length] = '\0';
unsigned char hash[30];
for (int e = 0; e < 25; e++){
unsigned char pass_cleartext_with_salt[15306] = "";
if (e<10){
pass_cleartext_with_salt[0] = '0';
pass_cleartext_with_salt[1] = (48+e);
} else {
int first_int = (e/10);
pass_cleartext_with_salt[0] = (48+first_int);
pass_cleartext_with_salt[1] = (48 + (e-(first_int*10)));
}
for (int r = 0; r < length; r++){
pass_cleartext_with_salt[r+2] = pass_cleartext[r];
}
calculate_hash(pass_cleartext_with_salt, hash, length+2);
for (int k = 0; k < device_num_targets; k++) {
bool found = true;
for (int j = 0; j < 32; j++) {
if (hash[j] != targets[k * 32 + j]) {
found = false;
break;
}
}
if (found) {
printf("Thread %d found it! The password is %s \nsalt is: %s \n", id,
pass_cleartext, pass_cleartext_with_salt);
}
}
}
} else {
printf("Thread %d went over!: %d\n", id, i);
}
}
}
int main() {
FILE* fp = fopen("targetsShort.txt", "r");
const char* file_name =
"/datadrive/cracklist/top100/passwords_one_line.txt";
FILE* length_file =
fopen("/datadrive/cracklist/top100/password_lengths.txt", "r");
// Calculate file size
int fd = open(file_name, O_RDONLY);
size_t password_file_size;
password_file_size = lseek(fd, 0, SEEK_END);
printf("%zu \n", password_file_size);
host_password_lengths = (int*)malloc(num_passwords * sizeof(int));
host_start_indexes = (int*)malloc(num_passwords * sizeof(int));
host_passwords =
(char*)mmap(0, password_file_size, PROT_READ, MAP_PRIVATE, fd, 0);
// Copy all the lengths into host_password_lengths
int i = 0;
int counter = 0;
int start = 0;
fscanf(length_file, "%d", &i);
host_password_lengths[counter] = i;
host_start_indexes[counter] = 0;
start += i;
counter++;
while (!feof(length_file) && counter < num_passwords) {
fscanf(length_file, "%d", &i);
host_password_lengths[counter] = i;
host_start_indexes[counter] = start;
start += i;
counter++;
}
fclose(length_file);
int host_num_targets = 0;
int ch = 0;
while (!feof(fp)) {
ch = fgetc(fp);
if (ch == '\n') {
host_num_targets++;
}
}
unsigned char* host_targets =
(unsigned char*)malloc(32 * host_num_targets * sizeof(unsigned char));
char* pos;
char str[65];
fseek(fp, 0, SEEK_SET);
for (int i = 0; i < host_num_targets; i++) {
if (fgets(str, 100, fp) != NULL) {
printf("New hash: %s \n", str);
pos = str;
int count;
for (count = 0; count < 32; count++) {
sscanf(pos, "%2hhx", &host_targets[i * 32 + count]);
pos += 2;
}
}
}
hipMemcpyToSymbol(device_password_file_size, &password_file_size,
sizeof(size_t));
cudaCheckErrors("After hipMalloc -3");
hipMemcpyToSymbol(device_num_targets, &host_num_targets, sizeof(int));
cudaCheckErrors("After hipMalloc -2");
hipMemcpyToSymbol(device_num_passwords, &num_passwords, sizeof(int));
cudaCheckErrors("After hipMalloc -1.5");
hipMalloc((void**)&device_targets,
host_num_targets * 32 * sizeof(unsigned char));
cudaCheckErrors("After hipMalloc -1");
hipMemcpy(device_targets, host_targets,
32 * host_num_targets * sizeof(unsigned char),
hipMemcpyHostToDevice);
cudaCheckErrors("After hipMemcpy -1");
hipMalloc((void**)&device_password_lengths, num_passwords * sizeof(int));
cudaCheckErrors("After hipMalloc 0");
hipMemcpy(device_password_lengths, host_password_lengths,
num_passwords * sizeof(int), hipMemcpyHostToDevice);
cudaCheckErrors("After hipMemcpy 0");
hipMalloc((void**)&device_start_indexes, num_passwords * sizeof(int));
cudaCheckErrors("After hipMalloc 0.5");
hipMemcpy(device_start_indexes, host_start_indexes,
num_passwords * sizeof(int), hipMemcpyHostToDevice);
cudaCheckErrors("After hipMemcpy 0.5");
hipMalloc((void**)&device_passwords, password_file_size * sizeof(char));
cudaCheckErrors("After hipMalloc 1");
hipMemcpy(device_passwords, host_passwords,
password_file_size * sizeof(char), hipMemcpyHostToDevice);
cudaCheckErrors("After hipMemcpy 1");
long numGrid = num_passwords / 1024;
numGrid += 1;
dim3 dimGrid(32);
dim3 dimBlock(1024);
double n_threads = dimGrid.x * dimBlock.x;
printf("%d threads in each grid. Each thread calculating %f hashes \n",
numGrid, num_passwords / n_threads);
/* Timing */
hipEvent_t start_time, stop;
float elapsedTime;
hipEventCreate(&start_time);
hipEventRecord(start_time, 0);
hipLaunchKernelGGL(( compare_hashes), dim3(dimGrid), dim3(dimBlock), 0, 0, device_passwords,
device_password_lengths,
device_start_indexes, device_targets);
cudaCheckErrors("After kernel run ");
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start_time, stop);
printf("Elapsed time: %f\n", elapsedTime);
hipFree(device_passwords);
cudaCheckErrors("After free 1 ");
hipFree(device_start_indexes);
cudaCheckErrors("After free 2 ");
hipFree(device_password_lengths);
cudaCheckErrors("After free 3 ");
free(host_start_indexes);
free(host_password_lengths);
return EXIT_SUCCESS;
}
| 9351381577e5f3ccd8e7bb9f147e7b4961692dfa.cu | #include "sha256.cu"
#include <cuda.h>
#include <stdio.h>
#include <fcntl.h>
#include <sys/io.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", msg, \
cudaGetErrorString(__err), __FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
int num_passwords = 19922147;
int password_length = 30;
char* host_passwords;
int* host_password_lengths;
int* host_start_indexes;
char* device_passwords;
int* device_password_lengths;
int* device_start_indexes;
unsigned char* device_targets;
__device__ int device_num_targets;
__device__ int device_num_passwords;
__device__ size_t device_password_file_size;
__device__ void calculate_hash(unsigned char* pass_cleartext,
unsigned char* hash, int length) {
SHA256_CTX ctx;
sha256_init(&ctx);
sha256_update(&ctx, pass_cleartext, length);
sha256_final(&ctx, hash);
}
__global__ void compare_hashes(char* hashes, int* lengths, int* start_indexes,
unsigned char* targets) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
int numThreads = blockDim.x * gridDim.x;
int num_to_calculate = device_num_passwords / numThreads;
num_to_calculate += 1;
if (id == 1) {
printf("Device num: %d increment: %d \n", device_num_passwords,
numThreads);
}
bool ran = false;
int test = 0;
int i;
for (i = id; i < device_num_passwords; i += numThreads) {
int length = lengths[i];
int start = start_indexes[i];
if ((start + length) < device_password_file_size &&
i < device_num_passwords) {
if (length > 15306) {
printf("Thread id: %d Length at %d is over much %d \n", id, i,
length);
length = 5;
}
unsigned char pass_cleartext[15306];
memcpy(pass_cleartext, &hashes[start], length);
pass_cleartext[length] = '\0';
unsigned char hash[30];
for (int e = 0; e < 25; e++){
unsigned char pass_cleartext_with_salt[15306] = "";
if (e<10){
pass_cleartext_with_salt[0] = '0';
pass_cleartext_with_salt[1] = (48+e);
} else {
int first_int = (e/10);
pass_cleartext_with_salt[0] = (48+first_int);
pass_cleartext_with_salt[1] = (48 + (e-(first_int*10)));
}
for (int r = 0; r < length; r++){
pass_cleartext_with_salt[r+2] = pass_cleartext[r];
}
calculate_hash(pass_cleartext_with_salt, hash, length+2);
for (int k = 0; k < device_num_targets; k++) {
bool found = true;
for (int j = 0; j < 32; j++) {
if (hash[j] != targets[k * 32 + j]) {
found = false;
break;
}
}
if (found) {
printf("Thread %d found it! The password is %s \nsalt is: %s \n", id,
pass_cleartext, pass_cleartext_with_salt);
}
}
}
} else {
printf("Thread %d went over!: %d\n", id, i);
}
}
}
int main() {
FILE* fp = fopen("targetsShort.txt", "r");
const char* file_name =
"/datadrive/cracklist/top100/passwords_one_line.txt";
FILE* length_file =
fopen("/datadrive/cracklist/top100/password_lengths.txt", "r");
// Calculate file size
int fd = open(file_name, O_RDONLY);
size_t password_file_size;
password_file_size = lseek(fd, 0, SEEK_END);
printf("%zu \n", password_file_size);
host_password_lengths = (int*)malloc(num_passwords * sizeof(int));
host_start_indexes = (int*)malloc(num_passwords * sizeof(int));
host_passwords =
(char*)mmap(0, password_file_size, PROT_READ, MAP_PRIVATE, fd, 0);
// Copy all the lengths into host_password_lengths
int i = 0;
int counter = 0;
int start = 0;
fscanf(length_file, "%d", &i);
host_password_lengths[counter] = i;
host_start_indexes[counter] = 0;
start += i;
counter++;
while (!feof(length_file) && counter < num_passwords) {
fscanf(length_file, "%d", &i);
host_password_lengths[counter] = i;
host_start_indexes[counter] = start;
start += i;
counter++;
}
fclose(length_file);
int host_num_targets = 0;
int ch = 0;
while (!feof(fp)) {
ch = fgetc(fp);
if (ch == '\n') {
host_num_targets++;
}
}
unsigned char* host_targets =
(unsigned char*)malloc(32 * host_num_targets * sizeof(unsigned char));
char* pos;
char str[65];
fseek(fp, 0, SEEK_SET);
for (int i = 0; i < host_num_targets; i++) {
if (fgets(str, 100, fp) != NULL) {
printf("New hash: %s \n", str);
pos = str;
int count;
for (count = 0; count < 32; count++) {
sscanf(pos, "%2hhx", &host_targets[i * 32 + count]);
pos += 2;
}
}
}
cudaMemcpyToSymbol(device_password_file_size, &password_file_size,
sizeof(size_t));
cudaCheckErrors("After cudaMalloc -3");
cudaMemcpyToSymbol(device_num_targets, &host_num_targets, sizeof(int));
cudaCheckErrors("After cudaMalloc -2");
cudaMemcpyToSymbol(device_num_passwords, &num_passwords, sizeof(int));
cudaCheckErrors("After cudaMalloc -1.5");
cudaMalloc((void**)&device_targets,
host_num_targets * 32 * sizeof(unsigned char));
cudaCheckErrors("After cudaMalloc -1");
cudaMemcpy(device_targets, host_targets,
32 * host_num_targets * sizeof(unsigned char),
cudaMemcpyHostToDevice);
cudaCheckErrors("After cudaMemcpy -1");
cudaMalloc((void**)&device_password_lengths, num_passwords * sizeof(int));
cudaCheckErrors("After cudaMalloc 0");
cudaMemcpy(device_password_lengths, host_password_lengths,
num_passwords * sizeof(int), cudaMemcpyHostToDevice);
cudaCheckErrors("After cudaMemcpy 0");
cudaMalloc((void**)&device_start_indexes, num_passwords * sizeof(int));
cudaCheckErrors("After cudaMalloc 0.5");
cudaMemcpy(device_start_indexes, host_start_indexes,
num_passwords * sizeof(int), cudaMemcpyHostToDevice);
cudaCheckErrors("After cudaMemcpy 0.5");
cudaMalloc((void**)&device_passwords, password_file_size * sizeof(char));
cudaCheckErrors("After cudaMalloc 1");
cudaMemcpy(device_passwords, host_passwords,
password_file_size * sizeof(char), cudaMemcpyHostToDevice);
cudaCheckErrors("After cudaMemcpy 1");
long numGrid = num_passwords / 1024;
numGrid += 1;
dim3 dimGrid(32);
dim3 dimBlock(1024);
double n_threads = dimGrid.x * dimBlock.x;
printf("%d threads in each grid. Each thread calculating %f hashes \n",
numGrid, num_passwords / n_threads);
/* Timing */
cudaEvent_t start_time, stop;
float elapsedTime;
cudaEventCreate(&start_time);
cudaEventRecord(start_time, 0);
compare_hashes<<<dimGrid, dimBlock>>>(device_passwords,
device_password_lengths,
device_start_indexes, device_targets);
cudaCheckErrors("After kernel run ");
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start_time, stop);
printf("Elapsed time: %f\n", elapsedTime);
cudaFree(device_passwords);
cudaCheckErrors("After free 1 ");
cudaFree(device_start_indexes);
cudaCheckErrors("After free 2 ");
cudaFree(device_password_lengths);
cudaCheckErrors("After free 3 ");
free(host_start_indexes);
free(host_password_lengths);
return EXIT_SUCCESS;
}
|
27d40a45b60123b7779f6588dae7f9c574b7600b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <cutil.h>
#include "util.h"
#include "ref_2dhisto.h"
#include "opt_2dhisto.h"
__global__ void opt_2dhistoKernel(uint32_t *input_device, int input_size, uint32_t *device_bins)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
if (device_bins[input_device[i]] <255)
{
atomicAdd(&(device_bins[input_device[i]]), 1);
}
i += stride;
__syncthreads();
}
void opt_2dhisto(uint32_t *input, int height, int width, uint32_t *bins)
{
// size of the input image
int size = height * width;
dim3 dimBlock(BLOCK_SIZE, 1,1);
dim3 dimGrid( ((height * width + dimBlock.x - 1) / dimBlock.x) ,1,1);
hipMemset(bins, 0, sizeof(uint32_t) * HISTO_WIDTH);
int num_kernel_calls = (dimGrid.x)/GRID_SIZE_MAX;
for(int i=0; i<num_kernel_calls; i++)
{
hipLaunchKernelGGL(( opt_2dhistoKernel), dim3(GRID_SIZE_MAX), dim3(dimBlock), 0, 0, input, GRID_SIZE_MAX * BLOCK_SIZE, bins);
hipDeviceSynchronize();
// pass the next set of inputs (pointer, so just add the address)
input += GRID_SIZE_MAX * BLOCK_SIZE;
// keep track of elements left to compute
// to be used when only one kernel call to be done
size -= GRID_SIZE_MAX * BLOCK_SIZE;
dimGrid.x -= GRID_SIZE_MAX;
}
hipLaunchKernelGGL(( opt_2dhistoKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, input, size, bins);
hipDeviceSynchronize();
}
uint8_t *AllocateDeviceMemory(int histo_width, int histo_height, int size_of_element)
{
uint8_t *t_memory;
hipMalloc((void **)&t_memory, histo_width * histo_height * size_of_element);
return t_memory;
}
void* AllocateDevice(size_t size){
void* ret;
hipMalloc(&ret, size);
return ret;
}
void CopyToDevice(uint32_t *device, uint32_t *host, uint32_t input_height, uint32_t input_width, int size_of_element)
{
const size_t x_size_padded = (input_width + 128) & 0xFFFFFF80;
size_t row_size = input_width * size_of_element;
for(int i=0; i<input_height; i++)
{
hipMemcpy(device, host, row_size, hipMemcpyHostToDevice);
device += input_width;
host += (x_size_padded);
}
}
void CopyToHost(uint32_t *host, uint32_t *device, int size)
{
hipMemcpy(host,device, size, hipMemcpyDeviceToHost);
for(int i = 0; i < HISTO_WIDTH * HISTO_HEIGHT; i++)
host[i] = host[i]>255?255:host[i];
}
void cuda_memset(uint32_t *ptr, uint32_t value, uint32_t byte_count)
{
hipMemset((void *)ptr, value, (size_t)byte_count);
}
void FreeDevice(void* D_device){
hipFree(D_device);
}
/* Include below the implementation of any other functions you need */
| 27d40a45b60123b7779f6588dae7f9c574b7600b.cu | #include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <cutil.h>
#include "util.h"
#include "ref_2dhisto.h"
#include "opt_2dhisto.h"
__global__ void opt_2dhistoKernel(uint32_t *input_device, int input_size, uint32_t *device_bins)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
if (device_bins[input_device[i]] <255)
{
atomicAdd(&(device_bins[input_device[i]]), 1);
}
i += stride;
__syncthreads();
}
void opt_2dhisto(uint32_t *input, int height, int width, uint32_t *bins)
{
// size of the input image
int size = height * width;
dim3 dimBlock(BLOCK_SIZE, 1,1);
dim3 dimGrid( ((height * width + dimBlock.x - 1) / dimBlock.x) ,1,1);
cudaMemset(bins, 0, sizeof(uint32_t) * HISTO_WIDTH);
int num_kernel_calls = (dimGrid.x)/GRID_SIZE_MAX;
for(int i=0; i<num_kernel_calls; i++)
{
opt_2dhistoKernel<<<GRID_SIZE_MAX, dimBlock>>>(input, GRID_SIZE_MAX * BLOCK_SIZE, bins);
cudaDeviceSynchronize();
// pass the next set of inputs (pointer, so just add the address)
input += GRID_SIZE_MAX * BLOCK_SIZE;
// keep track of elements left to compute
// to be used when only one kernel call to be done
size -= GRID_SIZE_MAX * BLOCK_SIZE;
dimGrid.x -= GRID_SIZE_MAX;
}
opt_2dhistoKernel<<<dimGrid, dimBlock>>>(input, size, bins);
cudaDeviceSynchronize();
}
uint8_t *AllocateDeviceMemory(int histo_width, int histo_height, int size_of_element)
{
uint8_t *t_memory;
cudaMalloc((void **)&t_memory, histo_width * histo_height * size_of_element);
return t_memory;
}
void* AllocateDevice(size_t size){
void* ret;
cudaMalloc(&ret, size);
return ret;
}
void CopyToDevice(uint32_t *device, uint32_t *host, uint32_t input_height, uint32_t input_width, int size_of_element)
{
const size_t x_size_padded = (input_width + 128) & 0xFFFFFF80;
size_t row_size = input_width * size_of_element;
for(int i=0; i<input_height; i++)
{
cudaMemcpy(device, host, row_size, cudaMemcpyHostToDevice);
device += input_width;
host += (x_size_padded);
}
}
void CopyToHost(uint32_t *host, uint32_t *device, int size)
{
cudaMemcpy(host,device, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < HISTO_WIDTH * HISTO_HEIGHT; i++)
host[i] = host[i]>255?255:host[i];
}
void cuda_memset(uint32_t *ptr, uint32_t value, uint32_t byte_count)
{
cudaMemset((void *)ptr, value, (size_t)byte_count);
}
void FreeDevice(void* D_device){
cudaFree(D_device);
}
/* Include below the implementation of any other functions you need */
|
3ae34f40082dd97eeb86ecf501917cc37400b0b3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_calculate_sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *dev_array_sums = NULL;
hipMalloc(&dev_array_sums, XSIZE*YSIZE);
unsigned int array_size = 1;
double *dev_block_sums = NULL;
hipMalloc(&dev_block_sums, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_calculate_sum), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_array_sums,array_size,dev_block_sums);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_calculate_sum), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_array_sums,array_size,dev_block_sums);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_calculate_sum), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_array_sums,array_size,dev_block_sums);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3ae34f40082dd97eeb86ecf501917cc37400b0b3.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_calculate_sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *dev_array_sums = NULL;
cudaMalloc(&dev_array_sums, XSIZE*YSIZE);
unsigned int array_size = 1;
double *dev_block_sums = NULL;
cudaMalloc(&dev_block_sums, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_calculate_sum<<<gridBlock,threadBlock>>>(dev_array_sums,array_size,dev_block_sums);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_calculate_sum<<<gridBlock,threadBlock>>>(dev_array_sums,array_size,dev_block_sums);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_calculate_sum<<<gridBlock,threadBlock>>>(dev_array_sums,array_size,dev_block_sums);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5781ee4fa6ef075187476725b281a7f66b68bc88.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2017 Richard Forster
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*/
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include "StatusCUDA.h"
#include "CUDAGraph.h"
#include "cached_allocator.h"
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#include <iostream>
#include <fstream>
extern cached_allocator alloc;
__global__ void setHashKey(StatusCUDA status, int limit)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= limit)
return;
//Reuse hash_idx so we don't have to allocate more memory for the initializion
status.hash_idx[idx] = status.neighbourSource[status.startOfNeighbours[idx]];
}
__device__ float getDegreeForNodeAt(StatusCUDA &status, int i, int °Idx)
{
//***New degree computation***
float weight = 0;
for (int j = 0; j < status.nrOfNeighbours[i]; ++j)
{
int *temp = status.startOfNeighbours + i;
int temp2 = j;
float value = status.neighbourWeights[status.startOfNeighbours[i] + j];
if (value == 0)
value = 1;
if (status.neighbourSource[status.startOfNeighbours[i] + j] == status.neighbourTarget[status.startOfNeighbours[i] + j])
value *= 2;
weight += value;
}
//Will be in a hash table, the idx will come from there
degIdx = status.comSize[i];
return weight;
}
__device__ float getEdgeWeight(StatusCUDA &status, int node1, int node2, int &loopIdx)
{
//***Proposed new getEdgeWeight currently with erronous indexing***
float weight = 0;
for (int j = 0; j < status.nrOfNeighbours[node1]; ++j)
{
if (status.neighbourSource[status.startOfNeighbours[node1] + j] == status.neighbourTarget[status.startOfNeighbours[node2] + j])
weight = status.neighbourWeights[status.startOfNeighbours[node1] + j];
}
return weight;
}
__global__ void initCommsKernel(StatusCUDA status,/* CUDAGraph graph,*/ int limit)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= limit)
return;
status.degrees[i] = 0;
status.gdegrees[i] = 0;
status.loops[i] = 0;
status.internals[i] = 0;
__syncthreads();
//status.node[i] = graph.node[i];
status.hash_idx[i] = i;
status.nodesToCom[i] = i;
status.nodesToComPrev[i] = i;
status.nodesToComNext[i] = i;
}
__global__ void initValuesKernel(StatusCUDA status,/* CUDAGraph graph,*/ int limit)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= limit)
return;
int degIdx = i;
float deg = 0;
if (status.nrOfNeighbours[i] == 0)
return;
deg = getDegreeForNodeAt(status, i, degIdx);
//if (deg < 0)
// throw("A node has a negative degree. Use positive weights.");
status.degrees[degIdx] = deg;
status.gdegrees[degIdx] = deg;
int loopIdx = i;
status.loops[loopIdx] = getEdgeWeight(status, i, i, loopIdx);
status.internals[i] = status.loops[i];
}
void initCUDA(StatusCUDA &status, CUDAGraph &graph)
{
int threads = 512;
int blocks = graph.nodesSize / threads;
if (graph.nodesSize % threads != 0)
++blocks;
status.keys = graph.nodesSize;
clock_t t1 = clock();
initCommsKernel << <blocks, threads >> > (status, graph.nodesSize);
hipDeviceSynchronize();
clock_t t2 = clock();
//std::cout << "initCommsKernel " << (double)(t2 - t1) / CLOCKS_PER_SEC << std::endl;
t1 = clock();
//hipMemcpy(status.node, graph.node, sizeof(int) * graph.nodesSize, hipMemcpyDeviceToDevice);
t2 = clock();
//std::cout << "memcpy " << (double)(t2 - t1) / CLOCKS_PER_SEC << std::endl;
//int *temp = new int[status.keys];
t1 = clock();
cudppHashInsert(status.hash_table_handle, status.node,
status.hash_idx, status.keys);
hipDeviceSynchronize();
//std::cout << hipGetErrorName(hipGetLastError()) << std::endl;
t2 = clock();
cudppHashRetrieve(status.hash_table_handle, status.node,
status.hash_idx, status.keys);
hipDeviceSynchronize();
//std::cout << "hashinsert " << (double)(t2 - t1) / CLOCKS_PER_SEC << std::endl;
t1 = clock();
setHashKey << <blocks, threads >> > (status, graph.nodesSize);
hipDeviceSynchronize();
t2 = clock();
//hipMemcpy(temp, status.hash_idx, sizeof(int) * graph.nodesSize, hipMemcpyDeviceToHost);
//std::cout << "sethashkey " << (double)(t2 - t1) / CLOCKS_PER_SEC << std::endl;
t1 = clock();
cudppHashRetrieve(status.hash_table_handle, status.hash_idx,
status.comSize, status.keys);
hipDeviceSynchronize();
t2 = clock();
//std::cout << "hash retrieve " << (double)(t2 - t1) / CLOCKS_PER_SEC << std::endl;
t1 = clock();
initValuesKernel << <blocks, threads >> > (status, graph.nodesSize);
hipDeviceSynchronize();
t2 = clock();
static int go = 0;
//if (go == 0)
//{
// float *nr = new float[graph.nodesSize];
// hipMemcpy(nr, status.degrees, sizeof(float) * graph.nodesSize, hipMemcpyDeviceToHost);
// std::ofstream out("init_degrees.txt");
// for (int i = 0; i < graph.nodesSize; ++i)
// out << nr[i] << std::endl;
// out.close();
// ++go;
//}
//std::cout << "initValuesKernel " << (double)(t2 - t1) / CLOCKS_PER_SEC << std::endl;
} | 5781ee4fa6ef075187476725b281a7f66b68bc88.cu | /*
* Copyright 2017 Richard Forster
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*/
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "StatusCUDA.h"
#include "CUDAGraph.h"
#include "cached_allocator.h"
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#include <iostream>
#include <fstream>
extern cached_allocator alloc;
__global__ void setHashKey(StatusCUDA status, int limit)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= limit)
return;
//Reuse hash_idx so we don't have to allocate more memory for the initializion
status.hash_idx[idx] = status.neighbourSource[status.startOfNeighbours[idx]];
}
__device__ float getDegreeForNodeAt(StatusCUDA &status, int i, int °Idx)
{
//***New degree computation***
float weight = 0;
for (int j = 0; j < status.nrOfNeighbours[i]; ++j)
{
int *temp = status.startOfNeighbours + i;
int temp2 = j;
float value = status.neighbourWeights[status.startOfNeighbours[i] + j];
if (value == 0)
value = 1;
if (status.neighbourSource[status.startOfNeighbours[i] + j] == status.neighbourTarget[status.startOfNeighbours[i] + j])
value *= 2;
weight += value;
}
//Will be in a hash table, the idx will come from there
degIdx = status.comSize[i];
return weight;
}
__device__ float getEdgeWeight(StatusCUDA &status, int node1, int node2, int &loopIdx)
{
//***Proposed new getEdgeWeight currently with erronous indexing***
float weight = 0;
for (int j = 0; j < status.nrOfNeighbours[node1]; ++j)
{
if (status.neighbourSource[status.startOfNeighbours[node1] + j] == status.neighbourTarget[status.startOfNeighbours[node2] + j])
weight = status.neighbourWeights[status.startOfNeighbours[node1] + j];
}
return weight;
}
__global__ void initCommsKernel(StatusCUDA status,/* CUDAGraph graph,*/ int limit)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= limit)
return;
status.degrees[i] = 0;
status.gdegrees[i] = 0;
status.loops[i] = 0;
status.internals[i] = 0;
__syncthreads();
//status.node[i] = graph.node[i];
status.hash_idx[i] = i;
status.nodesToCom[i] = i;
status.nodesToComPrev[i] = i;
status.nodesToComNext[i] = i;
}
__global__ void initValuesKernel(StatusCUDA status,/* CUDAGraph graph,*/ int limit)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= limit)
return;
int degIdx = i;
float deg = 0;
if (status.nrOfNeighbours[i] == 0)
return;
deg = getDegreeForNodeAt(status, i, degIdx);
//if (deg < 0)
// throw("A node has a negative degree. Use positive weights.");
status.degrees[degIdx] = deg;
status.gdegrees[degIdx] = deg;
int loopIdx = i;
status.loops[loopIdx] = getEdgeWeight(status, i, i, loopIdx);
status.internals[i] = status.loops[i];
}
void initCUDA(StatusCUDA &status, CUDAGraph &graph)
{
int threads = 512;
int blocks = graph.nodesSize / threads;
if (graph.nodesSize % threads != 0)
++blocks;
status.keys = graph.nodesSize;
clock_t t1 = clock();
initCommsKernel << <blocks, threads >> > (status, graph.nodesSize);
cudaDeviceSynchronize();
clock_t t2 = clock();
//std::cout << "initCommsKernel " << (double)(t2 - t1) / CLOCKS_PER_SEC << std::endl;
t1 = clock();
//cudaMemcpy(status.node, graph.node, sizeof(int) * graph.nodesSize, cudaMemcpyDeviceToDevice);
t2 = clock();
//std::cout << "memcpy " << (double)(t2 - t1) / CLOCKS_PER_SEC << std::endl;
//int *temp = new int[status.keys];
t1 = clock();
cudppHashInsert(status.hash_table_handle, status.node,
status.hash_idx, status.keys);
cudaDeviceSynchronize();
//std::cout << cudaGetErrorName(cudaGetLastError()) << std::endl;
t2 = clock();
cudppHashRetrieve(status.hash_table_handle, status.node,
status.hash_idx, status.keys);
cudaThreadSynchronize();
//std::cout << "hashinsert " << (double)(t2 - t1) / CLOCKS_PER_SEC << std::endl;
t1 = clock();
setHashKey << <blocks, threads >> > (status, graph.nodesSize);
cudaDeviceSynchronize();
t2 = clock();
//cudaMemcpy(temp, status.hash_idx, sizeof(int) * graph.nodesSize, cudaMemcpyDeviceToHost);
//std::cout << "sethashkey " << (double)(t2 - t1) / CLOCKS_PER_SEC << std::endl;
t1 = clock();
cudppHashRetrieve(status.hash_table_handle, status.hash_idx,
status.comSize, status.keys);
cudaThreadSynchronize();
t2 = clock();
//std::cout << "hash retrieve " << (double)(t2 - t1) / CLOCKS_PER_SEC << std::endl;
t1 = clock();
initValuesKernel << <blocks, threads >> > (status, graph.nodesSize);
cudaDeviceSynchronize();
t2 = clock();
static int go = 0;
//if (go == 0)
//{
// float *nr = new float[graph.nodesSize];
// cudaMemcpy(nr, status.degrees, sizeof(float) * graph.nodesSize, cudaMemcpyDeviceToHost);
// std::ofstream out("init_degrees.txt");
// for (int i = 0; i < graph.nodesSize; ++i)
// out << nr[i] << std::endl;
// out.close();
// ++go;
//}
//std::cout << "initValuesKernel " << (double)(t2 - t1) / CLOCKS_PER_SEC << std::endl;
} |
6483ddd7dcaaab99d84a926e1553d8f533c5ac0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#include "samples/common/inc/helper_math.h"
#include "core/gpu/displacement_op_cuda_kernel.h"
#define GpuErrchk(ans) { GpuAssert((ans), __FILE__, __LINE__); }
inline void GpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert (error code %d): %s %s %d\n", code, hipGetErrorString(code), file, line);
if (code == hipErrorInsufficientDriver) {
printf("This probably means that no CUDA-compatible GPU has been detected. Consider setting the use_opencl flag to \"true\" in the bmd.toml file to use OpenCL instead.\n");
}
if (abort) exit(code);
}
}
inline __host__ __device__ double norm(double3 &v) {
return sqrt(v.x*v.x + v.y*v.y + v.z*v.z);
}
inline __host__ __device__ void operator*=(double3 &a, double b){
a.x *= b;
a.y *= b;
a.z *= b;
}
inline __host__ __device__ double3 operator*(double3 a, float b) {
return make_double3(a.x * b, a.y * b, a.z * b);
}
inline __host__ __device__ double3 operator/(double3 a, float b)
{
return make_double3(a.x / b, a.y / b, a.z / b);
}
inline __host__ __device__ void operator+=(double3 &a, double3 b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
}
inline __host__ __device__ double3 normalize(double3 v) {
return v / norm(v);
}
__device__ double squared_euclidian_distance(double* positions, uint32_t idx, uint32_t nidx) {
const double dx = positions[3*idx + 0] - positions[3*nidx + 0];
const double dy = positions[3*idx + 1] - positions[3*nidx + 1];
const double dz = positions[3*idx + 2] - positions[3*nidx + 2];
return (dx * dx + dy * dy + dz * dz);
}
__device__ int3 get_box_coordinates(double3 pos, int32_t* grid_dimensions, uint32_t box_length) {
int3 box_coords;
box_coords.x = (floor(pos.x) - grid_dimensions[0]) / box_length;
box_coords.y = (floor(pos.y) - grid_dimensions[1]) / box_length;
box_coords.z = (floor(pos.z) - grid_dimensions[2]) / box_length;
return box_coords;
}
__device__ int3 get_box_coordinates_2(uint32_t box_idx, uint32_t* num_boxes_axis_) {
int3 box_coord;
box_coord.z = box_idx / (num_boxes_axis_[0]*num_boxes_axis_[1]);
uint32_t remainder = box_idx % (num_boxes_axis_[0]*num_boxes_axis_[1]);
box_coord.y = remainder / num_boxes_axis_[0];
box_coord.x = remainder % num_boxes_axis_[0];
return box_coord;
}
__device__ uint32_t get_box_id_2(int3 bc, uint32_t* num_boxes_axis) {
return bc.z * num_boxes_axis[0]*num_boxes_axis[1] + bc.y * num_boxes_axis[0] + bc.x;
}
__device__ uint32_t get_box_id(double3 pos, uint32_t* num_boxes_axis, int32_t* grid_dimensions, uint32_t box_length) {
int3 box_coords = get_box_coordinates(pos, grid_dimensions, box_length);
return get_box_id_2(box_coords, num_boxes_axis);
}
__device__ void compute_force(double* positions, double* diameters, uint32_t idx, uint32_t nidx, double3* result) {
double r1 = 0.5 * diameters[idx];
double r2 = 0.5 * diameters[nidx];
// We take virtual bigger radii to have a distant interaction, to get a desired density.
double additional_radius = 10.0 * 0.15;
r1 += additional_radius;
r2 += additional_radius;
double comp1 = positions[3*idx + 0] - positions[3*nidx + 0];
double comp2 = positions[3*idx + 1] - positions[3*nidx + 1];
double comp3 = positions[3*idx + 2] - positions[3*nidx + 2];
double center_distance = sqrt(comp1 * comp1 + comp2 * comp2 + comp3 * comp3);
// the overlap distance (how much one penetrates in the other)
double delta = r1 + r2 - center_distance;
if (delta < 0) {
return;
}
// to avoid a division by 0 if the centers are (almost) at the same location
if (center_distance < 0.00000001) {
result->x += 42.0;
result->y += 42.0;
result->z += 42.0;
return;
}
// printf("Colliding cell [%d] and [%d]\n", idx, nidx);
// printf("Delta for neighbor [%d] = %f\n", nidx, delta);
// the force itself
double r = (r1 * r2) / (r1 + r2);
double gamma = 1; // attraction coeff
double k = 2; // repulsion coeff
double f = k * delta - gamma * sqrt(r * delta);
double module = f / center_distance;
result->x += module * comp1;
result->y += module * comp2;
result->z += module * comp3;
// printf("%f, %f, %f\n", module * comp1, module * comp2, module * comp3);
// printf("Force between cell (%u) [%f, %f, %f] & cell (%u) [%f, %f, %f] = %f, %f, %f\n", idx, positions[3*idx + 0], positions[3*idx + 1], positions[3*idx + 2], nidx, positions[3*nidx + 0], positions[3*nidx + 1], positions[3*nidx + 2], module * comp1, module * comp2, module * comp3);
}
__device__ void default_force(double* positions,
double* diameters,
uint32_t idx, uint32_t start, uint16_t length,
uint32_t* successors,
double* squared_radius,
double3* result) {
uint32_t nidx = start;
for (uint16_t nb = 0; nb < length; nb++) {
if (nidx != idx) {
if (squared_euclidian_distance(positions, idx, nidx) < squared_radius[0]) {
compute_force(positions, diameters, idx, nidx, result);
}
}
// traverse linked-list
nidx = successors[nidx];
}
}
__global__ void collide(
double* positions,
double* diameters,
double* tractor_force,
double* adherence,
uint32_t* box_id,
double* mass,
double* timestep,
double* max_displacement,
double* squared_radius,
uint32_t* num_objects,
uint32_t* starts,
uint16_t* lengths,
uint64_t* timestamps,
uint64_t* current_timestamp,
uint32_t* successors,
uint32_t* box_length,
uint32_t* num_boxes_axis,
int32_t* grid_dimensions,
double* result) {
uint32_t tidx = blockIdx.x * blockDim.x + threadIdx.x;
// printf("[Kernel] box_id[tidx] = %d\n", box_id[tidx]);
// printf("[Kernel] num_objects = %d\n", num_objects[0]);
// printf("[Kernel] blockDim.x = %d\n", blockDim.x);
// printf("[Kernel] positions = {");
// for (uint32_t i = 0; i < 3*num_objects[0]; i++) {
// printf("%p, ", &(positions[i]));
// }
// printf("}\n");
if (tidx < num_objects[0]) {
result[3*tidx + 0] = timestep[0] * tractor_force[3*tidx + 0];
result[3*tidx + 1] = timestep[0] * tractor_force[3*tidx + 1];
result[3*tidx + 2] = timestep[0] * tractor_force[3*tidx + 2];
double3 collision_force = make_double3(0, 0, 0);
double3 movement_at_next_step = make_double3(0, 0, 0);
// Moore neighborhood
int3 box_coords = get_box_coordinates_2(box_id[tidx], num_boxes_axis);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
uint32_t bidx = get_box_id_2(box_coords + make_int3(x, y, z), num_boxes_axis);
if (timestamps[bidx] == current_timestamp[0] && lengths[bidx] != 0) {
default_force(positions, diameters, tidx, starts[bidx], lengths[bidx], successors, squared_radius, &collision_force);
}
}
}
}
// Mass needs to non-zero!
double mh = timestep[0] / mass[tidx];
// printf("mh = %f\n", mh);
if (norm(collision_force) > adherence[tidx]) {
movement_at_next_step += collision_force * mh;
// printf("collision_force = (%f, %f, %f)\n", collision_force.x, collision_force.y, collision_force.z);
// printf("cell_movement (1) = (%f, %f, %f)\n", result[3*tidx + 0], result[3*tidx + 1], result[3*tidx + 2]);
if (norm(collision_force) * mh > max_displacement[0]) {
movement_at_next_step = normalize(movement_at_next_step);
movement_at_next_step *= max_displacement[0];
}
result[3*tidx + 0] = movement_at_next_step.x;
result[3*tidx + 1] = movement_at_next_step.y;
result[3*tidx + 2] = movement_at_next_step.z;
// printf("cell_movement (2) = (%f, %f, %f)\n", result[3*tidx + 0], result[3*tidx + 1], result[3*tidx + 2]);
}
}
}
bdm::DisplacementOpCudaKernel::DisplacementOpCudaKernel(uint32_t num_objects, uint32_t num_boxes) {
// printf("[DisplacementOpCudaKernel] num_objects = %u | num_boxes = %u\n", num_objects, num_boxes);
GpuErrchk(hipMalloc(&d_positions_, 3 * num_objects * sizeof(double)));
GpuErrchk(hipMalloc(&d_diameters_, num_objects * sizeof(double)));
GpuErrchk(hipMalloc(&d_tractor_force_, 3 * num_objects * sizeof(double)));
GpuErrchk(hipMalloc(&d_adherence_, num_objects * sizeof(double)));
GpuErrchk(hipMalloc(&d_box_id_, num_objects * sizeof(uint32_t)));
GpuErrchk(hipMalloc(&d_mass_, num_objects * sizeof(double)));
GpuErrchk(hipMalloc(&d_timestep_, sizeof(double)));
GpuErrchk(hipMalloc(&d_max_displacement_, sizeof(double)));
GpuErrchk(hipMalloc(&d_squared_radius_, sizeof(double)));
GpuErrchk(hipMalloc(&d_num_objects_, sizeof(uint32_t)));
GpuErrchk(hipMalloc(&d_starts_, num_boxes * sizeof(uint32_t)));
GpuErrchk(hipMalloc(&d_lengths_, num_boxes * sizeof(uint16_t)));
GpuErrchk(hipMalloc(&d_timestamps_, num_boxes * sizeof(uint64_t)));
GpuErrchk(hipMalloc(&d_current_timestamp_, sizeof(uint64_t)));
GpuErrchk(hipMalloc(&d_successors_, num_objects * sizeof(uint32_t)));
GpuErrchk(hipMalloc(&d_box_length_, sizeof(uint32_t)));
GpuErrchk(hipMalloc(&d_num_boxes_axis_, 3 * sizeof(uint32_t)));
GpuErrchk(hipMalloc(&d_grid_dimensions_, 3 * sizeof(int32_t)));
GpuErrchk(hipMalloc(&d_cell_movements_, 3 * num_objects * sizeof(double)));
}
void bdm::DisplacementOpCudaKernel::LaunchDisplacementKernel(const double* positions,
const double* diameters, const double* tractor_force, const double* adherence,
const uint32_t* box_id, const double* mass, const double* timestep, const double* max_displacement,
const double* squared_radius, const uint32_t* num_objects, uint32_t* starts,
uint16_t* lengths, uint64_t* timestamps, uint64_t* current_timestamp, uint32_t* successors, uint32_t* box_length,
uint32_t* num_boxes_axis, int32_t* grid_dimensions,
double* cell_movements) {
uint32_t num_boxes = num_boxes_axis[0] * num_boxes_axis[1] * num_boxes_axis[2];
// printf("[LaunchDisplacementKernel] num_objects = %u | num_boxes = %u\n", num_objects[0], num_boxes);
// printf("[LaunchDisplacementKernel] d_positions_ = %p | positions = %p\n", d_positions_, positions);
// printf("[LaunchDisplacementKernel] positions[0] = %f | positions[1] = %f\n", positions[0], positions[1]);
GpuErrchk(hipMemcpy(d_positions_, positions, 3 * num_objects[0] * sizeof(double), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_diameters_, diameters, num_objects[0] * sizeof(double), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_tractor_force_, tractor_force, 3 * num_objects[0] * sizeof(double), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_adherence_, adherence, num_objects[0] * sizeof(double), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_box_id_, box_id, num_objects[0] * sizeof(uint32_t), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_mass_, mass, num_objects[0] * sizeof(double), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_timestep_, timestep, sizeof(double), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_max_displacement_, max_displacement, sizeof(double), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_squared_radius_, squared_radius, sizeof(double), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_num_objects_, num_objects, sizeof(uint32_t), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_starts_, starts, num_boxes * sizeof(uint32_t), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_lengths_, lengths, num_boxes * sizeof(uint16_t), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_timestamps_, timestamps, num_boxes * sizeof(uint64_t), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_current_timestamp_, current_timestamp, sizeof(uint64_t), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_successors_, successors, num_objects[0] * sizeof(uint32_t), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_box_length_, box_length, sizeof(uint32_t), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_num_boxes_axis_, num_boxes_axis, 3 * sizeof(uint32_t), hipMemcpyHostToDevice));
GpuErrchk(hipMemcpy(d_grid_dimensions_, grid_dimensions, 3 * sizeof(uint32_t), hipMemcpyHostToDevice));
int blockSize = 128;
int minGridSize;
int gridSize;
// Get a near-optimal occupancy with the following thread organization
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, collide, 0, num_objects[0]);
gridSize = (num_objects[0] + blockSize - 1) / blockSize;
// printf("gridSize = %d | blockSize = %d\n", gridSize, blockSize);
hipLaunchKernelGGL(( collide), dim3(gridSize), dim3(blockSize), 0, 0, d_positions_, d_diameters_, d_tractor_force_,
d_adherence_, d_box_id_, d_mass_, d_timestep_, d_max_displacement_,
d_squared_radius_, d_num_objects_, d_starts_, d_lengths_, d_timestamps_,
d_current_timestamp_, d_successors_, d_box_length_, d_num_boxes_axis_,
d_grid_dimensions_, d_cell_movements_);
// We need to wait for the kernel to finish before reading back the result
hipDeviceSynchronize();
hipMemcpy(cell_movements, d_cell_movements_, 3 * num_objects[0] * sizeof(double), hipMemcpyDeviceToHost);
}
void bdm::DisplacementOpCudaKernel::ResizeCellBuffers(uint32_t num_cells) {
hipFree(d_positions_);
hipFree(d_diameters_);
hipFree(d_tractor_force_);
hipFree(d_adherence_);
hipFree(d_box_id_);
hipFree(d_mass_);
hipFree(d_successors_);
hipFree(d_cell_movements_);
hipMalloc(&d_positions_, 3 * num_cells * sizeof(double));
hipMalloc(&d_diameters_, num_cells * sizeof(double));
hipMalloc(&d_tractor_force_, 3 * num_cells * sizeof(double));
hipMalloc(&d_adherence_, num_cells * sizeof(double));
hipMalloc(&d_box_id_, num_cells * sizeof(uint32_t));
hipMalloc(&d_mass_, num_cells * sizeof(double));
hipMalloc(&d_successors_, num_cells * sizeof(uint32_t));
hipMalloc(&d_cell_movements_, 3 * num_cells * sizeof(double));
}
void bdm::DisplacementOpCudaKernel::ResizeGridBuffers(uint32_t num_boxes) {
hipFree(d_starts_);
hipFree(d_lengths_);
hipFree(d_timestamps_);
hipMalloc(&d_starts_, num_boxes * sizeof(uint32_t));
hipMalloc(&d_lengths_, num_boxes * sizeof(uint16_t));
hipMalloc(&d_timestamps_, num_boxes * sizeof(uint64_t));
}
bdm::DisplacementOpCudaKernel::~DisplacementOpCudaKernel() {
hipFree(d_positions_);
hipFree(d_diameters_);
hipFree(d_tractor_force_);
hipFree(d_adherence_);
hipFree(d_box_id_);
hipFree(d_mass_);
hipFree(d_timestep_);
hipFree(d_max_displacement_);
hipFree(d_squared_radius_);
hipFree(d_num_objects_);
hipFree(d_starts_);
hipFree(d_lengths_);
hipFree(d_timestamps_);
hipFree(d_current_timestamp_);
hipFree(d_successors_);
hipFree(d_num_boxes_axis_);
hipFree(d_grid_dimensions_);
hipFree(d_cell_movements_);
}
| 6483ddd7dcaaab99d84a926e1553d8f533c5ac0c.cu | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#include "samples/common/inc/helper_math.h"
#include "core/gpu/displacement_op_cuda_kernel.h"
#define GpuErrchk(ans) { GpuAssert((ans), __FILE__, __LINE__); }
inline void GpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert (error code %d): %s %s %d\n", code, cudaGetErrorString(code), file, line);
if (code == cudaErrorInsufficientDriver) {
printf("This probably means that no CUDA-compatible GPU has been detected. Consider setting the use_opencl flag to \"true\" in the bmd.toml file to use OpenCL instead.\n");
}
if (abort) exit(code);
}
}
inline __host__ __device__ double norm(double3 &v) {
return sqrt(v.x*v.x + v.y*v.y + v.z*v.z);
}
inline __host__ __device__ void operator*=(double3 &a, double b){
a.x *= b;
a.y *= b;
a.z *= b;
}
inline __host__ __device__ double3 operator*(double3 a, float b) {
return make_double3(a.x * b, a.y * b, a.z * b);
}
inline __host__ __device__ double3 operator/(double3 a, float b)
{
return make_double3(a.x / b, a.y / b, a.z / b);
}
inline __host__ __device__ void operator+=(double3 &a, double3 b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
}
inline __host__ __device__ double3 normalize(double3 v) {
return v / norm(v);
}
__device__ double squared_euclidian_distance(double* positions, uint32_t idx, uint32_t nidx) {
const double dx = positions[3*idx + 0] - positions[3*nidx + 0];
const double dy = positions[3*idx + 1] - positions[3*nidx + 1];
const double dz = positions[3*idx + 2] - positions[3*nidx + 2];
return (dx * dx + dy * dy + dz * dz);
}
__device__ int3 get_box_coordinates(double3 pos, int32_t* grid_dimensions, uint32_t box_length) {
int3 box_coords;
box_coords.x = (floor(pos.x) - grid_dimensions[0]) / box_length;
box_coords.y = (floor(pos.y) - grid_dimensions[1]) / box_length;
box_coords.z = (floor(pos.z) - grid_dimensions[2]) / box_length;
return box_coords;
}
__device__ int3 get_box_coordinates_2(uint32_t box_idx, uint32_t* num_boxes_axis_) {
int3 box_coord;
box_coord.z = box_idx / (num_boxes_axis_[0]*num_boxes_axis_[1]);
uint32_t remainder = box_idx % (num_boxes_axis_[0]*num_boxes_axis_[1]);
box_coord.y = remainder / num_boxes_axis_[0];
box_coord.x = remainder % num_boxes_axis_[0];
return box_coord;
}
__device__ uint32_t get_box_id_2(int3 bc, uint32_t* num_boxes_axis) {
return bc.z * num_boxes_axis[0]*num_boxes_axis[1] + bc.y * num_boxes_axis[0] + bc.x;
}
__device__ uint32_t get_box_id(double3 pos, uint32_t* num_boxes_axis, int32_t* grid_dimensions, uint32_t box_length) {
int3 box_coords = get_box_coordinates(pos, grid_dimensions, box_length);
return get_box_id_2(box_coords, num_boxes_axis);
}
__device__ void compute_force(double* positions, double* diameters, uint32_t idx, uint32_t nidx, double3* result) {
double r1 = 0.5 * diameters[idx];
double r2 = 0.5 * diameters[nidx];
// We take virtual bigger radii to have a distant interaction, to get a desired density.
double additional_radius = 10.0 * 0.15;
r1 += additional_radius;
r2 += additional_radius;
double comp1 = positions[3*idx + 0] - positions[3*nidx + 0];
double comp2 = positions[3*idx + 1] - positions[3*nidx + 1];
double comp3 = positions[3*idx + 2] - positions[3*nidx + 2];
double center_distance = sqrt(comp1 * comp1 + comp2 * comp2 + comp3 * comp3);
// the overlap distance (how much one penetrates in the other)
double delta = r1 + r2 - center_distance;
if (delta < 0) {
return;
}
// to avoid a division by 0 if the centers are (almost) at the same location
if (center_distance < 0.00000001) {
result->x += 42.0;
result->y += 42.0;
result->z += 42.0;
return;
}
// printf("Colliding cell [%d] and [%d]\n", idx, nidx);
// printf("Delta for neighbor [%d] = %f\n", nidx, delta);
// the force itself
double r = (r1 * r2) / (r1 + r2);
double gamma = 1; // attraction coeff
double k = 2; // repulsion coeff
double f = k * delta - gamma * sqrt(r * delta);
double module = f / center_distance;
result->x += module * comp1;
result->y += module * comp2;
result->z += module * comp3;
// printf("%f, %f, %f\n", module * comp1, module * comp2, module * comp3);
// printf("Force between cell (%u) [%f, %f, %f] & cell (%u) [%f, %f, %f] = %f, %f, %f\n", idx, positions[3*idx + 0], positions[3*idx + 1], positions[3*idx + 2], nidx, positions[3*nidx + 0], positions[3*nidx + 1], positions[3*nidx + 2], module * comp1, module * comp2, module * comp3);
}
__device__ void default_force(double* positions,
double* diameters,
uint32_t idx, uint32_t start, uint16_t length,
uint32_t* successors,
double* squared_radius,
double3* result) {
uint32_t nidx = start;
for (uint16_t nb = 0; nb < length; nb++) {
if (nidx != idx) {
if (squared_euclidian_distance(positions, idx, nidx) < squared_radius[0]) {
compute_force(positions, diameters, idx, nidx, result);
}
}
// traverse linked-list
nidx = successors[nidx];
}
}
__global__ void collide(
double* positions,
double* diameters,
double* tractor_force,
double* adherence,
uint32_t* box_id,
double* mass,
double* timestep,
double* max_displacement,
double* squared_radius,
uint32_t* num_objects,
uint32_t* starts,
uint16_t* lengths,
uint64_t* timestamps,
uint64_t* current_timestamp,
uint32_t* successors,
uint32_t* box_length,
uint32_t* num_boxes_axis,
int32_t* grid_dimensions,
double* result) {
uint32_t tidx = blockIdx.x * blockDim.x + threadIdx.x;
// printf("[Kernel] box_id[tidx] = %d\n", box_id[tidx]);
// printf("[Kernel] num_objects = %d\n", num_objects[0]);
// printf("[Kernel] blockDim.x = %d\n", blockDim.x);
// printf("[Kernel] positions = {");
// for (uint32_t i = 0; i < 3*num_objects[0]; i++) {
// printf("%p, ", &(positions[i]));
// }
// printf("}\n");
if (tidx < num_objects[0]) {
result[3*tidx + 0] = timestep[0] * tractor_force[3*tidx + 0];
result[3*tidx + 1] = timestep[0] * tractor_force[3*tidx + 1];
result[3*tidx + 2] = timestep[0] * tractor_force[3*tidx + 2];
double3 collision_force = make_double3(0, 0, 0);
double3 movement_at_next_step = make_double3(0, 0, 0);
// Moore neighborhood
int3 box_coords = get_box_coordinates_2(box_id[tidx], num_boxes_axis);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
uint32_t bidx = get_box_id_2(box_coords + make_int3(x, y, z), num_boxes_axis);
if (timestamps[bidx] == current_timestamp[0] && lengths[bidx] != 0) {
default_force(positions, diameters, tidx, starts[bidx], lengths[bidx], successors, squared_radius, &collision_force);
}
}
}
}
// Mass needs to non-zero!
double mh = timestep[0] / mass[tidx];
// printf("mh = %f\n", mh);
if (norm(collision_force) > adherence[tidx]) {
movement_at_next_step += collision_force * mh;
// printf("collision_force = (%f, %f, %f)\n", collision_force.x, collision_force.y, collision_force.z);
// printf("cell_movement (1) = (%f, %f, %f)\n", result[3*tidx + 0], result[3*tidx + 1], result[3*tidx + 2]);
if (norm(collision_force) * mh > max_displacement[0]) {
movement_at_next_step = normalize(movement_at_next_step);
movement_at_next_step *= max_displacement[0];
}
result[3*tidx + 0] = movement_at_next_step.x;
result[3*tidx + 1] = movement_at_next_step.y;
result[3*tidx + 2] = movement_at_next_step.z;
// printf("cell_movement (2) = (%f, %f, %f)\n", result[3*tidx + 0], result[3*tidx + 1], result[3*tidx + 2]);
}
}
}
bdm::DisplacementOpCudaKernel::DisplacementOpCudaKernel(uint32_t num_objects, uint32_t num_boxes) {
// printf("[DisplacementOpCudaKernel] num_objects = %u | num_boxes = %u\n", num_objects, num_boxes);
GpuErrchk(cudaMalloc(&d_positions_, 3 * num_objects * sizeof(double)));
GpuErrchk(cudaMalloc(&d_diameters_, num_objects * sizeof(double)));
GpuErrchk(cudaMalloc(&d_tractor_force_, 3 * num_objects * sizeof(double)));
GpuErrchk(cudaMalloc(&d_adherence_, num_objects * sizeof(double)));
GpuErrchk(cudaMalloc(&d_box_id_, num_objects * sizeof(uint32_t)));
GpuErrchk(cudaMalloc(&d_mass_, num_objects * sizeof(double)));
GpuErrchk(cudaMalloc(&d_timestep_, sizeof(double)));
GpuErrchk(cudaMalloc(&d_max_displacement_, sizeof(double)));
GpuErrchk(cudaMalloc(&d_squared_radius_, sizeof(double)));
GpuErrchk(cudaMalloc(&d_num_objects_, sizeof(uint32_t)));
GpuErrchk(cudaMalloc(&d_starts_, num_boxes * sizeof(uint32_t)));
GpuErrchk(cudaMalloc(&d_lengths_, num_boxes * sizeof(uint16_t)));
GpuErrchk(cudaMalloc(&d_timestamps_, num_boxes * sizeof(uint64_t)));
GpuErrchk(cudaMalloc(&d_current_timestamp_, sizeof(uint64_t)));
GpuErrchk(cudaMalloc(&d_successors_, num_objects * sizeof(uint32_t)));
GpuErrchk(cudaMalloc(&d_box_length_, sizeof(uint32_t)));
GpuErrchk(cudaMalloc(&d_num_boxes_axis_, 3 * sizeof(uint32_t)));
GpuErrchk(cudaMalloc(&d_grid_dimensions_, 3 * sizeof(int32_t)));
GpuErrchk(cudaMalloc(&d_cell_movements_, 3 * num_objects * sizeof(double)));
}
void bdm::DisplacementOpCudaKernel::LaunchDisplacementKernel(const double* positions,
const double* diameters, const double* tractor_force, const double* adherence,
const uint32_t* box_id, const double* mass, const double* timestep, const double* max_displacement,
const double* squared_radius, const uint32_t* num_objects, uint32_t* starts,
uint16_t* lengths, uint64_t* timestamps, uint64_t* current_timestamp, uint32_t* successors, uint32_t* box_length,
uint32_t* num_boxes_axis, int32_t* grid_dimensions,
double* cell_movements) {
uint32_t num_boxes = num_boxes_axis[0] * num_boxes_axis[1] * num_boxes_axis[2];
// printf("[LaunchDisplacementKernel] num_objects = %u | num_boxes = %u\n", num_objects[0], num_boxes);
// printf("[LaunchDisplacementKernel] d_positions_ = %p | positions = %p\n", d_positions_, positions);
// printf("[LaunchDisplacementKernel] positions[0] = %f | positions[1] = %f\n", positions[0], positions[1]);
GpuErrchk(cudaMemcpy(d_positions_, positions, 3 * num_objects[0] * sizeof(double), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_diameters_, diameters, num_objects[0] * sizeof(double), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_tractor_force_, tractor_force, 3 * num_objects[0] * sizeof(double), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_adherence_, adherence, num_objects[0] * sizeof(double), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_box_id_, box_id, num_objects[0] * sizeof(uint32_t), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_mass_, mass, num_objects[0] * sizeof(double), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_timestep_, timestep, sizeof(double), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_max_displacement_, max_displacement, sizeof(double), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_squared_radius_, squared_radius, sizeof(double), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_num_objects_, num_objects, sizeof(uint32_t), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_starts_, starts, num_boxes * sizeof(uint32_t), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_lengths_, lengths, num_boxes * sizeof(uint16_t), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_timestamps_, timestamps, num_boxes * sizeof(uint64_t), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_current_timestamp_, current_timestamp, sizeof(uint64_t), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_successors_, successors, num_objects[0] * sizeof(uint32_t), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_box_length_, box_length, sizeof(uint32_t), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_num_boxes_axis_, num_boxes_axis, 3 * sizeof(uint32_t), cudaMemcpyHostToDevice));
GpuErrchk(cudaMemcpy(d_grid_dimensions_, grid_dimensions, 3 * sizeof(uint32_t), cudaMemcpyHostToDevice));
int blockSize = 128;
int minGridSize;
int gridSize;
// Get a near-optimal occupancy with the following thread organization
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, collide, 0, num_objects[0]);
gridSize = (num_objects[0] + blockSize - 1) / blockSize;
// printf("gridSize = %d | blockSize = %d\n", gridSize, blockSize);
collide<<<gridSize, blockSize>>>(d_positions_, d_diameters_, d_tractor_force_,
d_adherence_, d_box_id_, d_mass_, d_timestep_, d_max_displacement_,
d_squared_radius_, d_num_objects_, d_starts_, d_lengths_, d_timestamps_,
d_current_timestamp_, d_successors_, d_box_length_, d_num_boxes_axis_,
d_grid_dimensions_, d_cell_movements_);
// We need to wait for the kernel to finish before reading back the result
cudaDeviceSynchronize();
cudaMemcpy(cell_movements, d_cell_movements_, 3 * num_objects[0] * sizeof(double), cudaMemcpyDeviceToHost);
}
void bdm::DisplacementOpCudaKernel::ResizeCellBuffers(uint32_t num_cells) {
cudaFree(d_positions_);
cudaFree(d_diameters_);
cudaFree(d_tractor_force_);
cudaFree(d_adherence_);
cudaFree(d_box_id_);
cudaFree(d_mass_);
cudaFree(d_successors_);
cudaFree(d_cell_movements_);
cudaMalloc(&d_positions_, 3 * num_cells * sizeof(double));
cudaMalloc(&d_diameters_, num_cells * sizeof(double));
cudaMalloc(&d_tractor_force_, 3 * num_cells * sizeof(double));
cudaMalloc(&d_adherence_, num_cells * sizeof(double));
cudaMalloc(&d_box_id_, num_cells * sizeof(uint32_t));
cudaMalloc(&d_mass_, num_cells * sizeof(double));
cudaMalloc(&d_successors_, num_cells * sizeof(uint32_t));
cudaMalloc(&d_cell_movements_, 3 * num_cells * sizeof(double));
}
void bdm::DisplacementOpCudaKernel::ResizeGridBuffers(uint32_t num_boxes) {
cudaFree(d_starts_);
cudaFree(d_lengths_);
cudaFree(d_timestamps_);
cudaMalloc(&d_starts_, num_boxes * sizeof(uint32_t));
cudaMalloc(&d_lengths_, num_boxes * sizeof(uint16_t));
cudaMalloc(&d_timestamps_, num_boxes * sizeof(uint64_t));
}
bdm::DisplacementOpCudaKernel::~DisplacementOpCudaKernel() {
cudaFree(d_positions_);
cudaFree(d_diameters_);
cudaFree(d_tractor_force_);
cudaFree(d_adherence_);
cudaFree(d_box_id_);
cudaFree(d_mass_);
cudaFree(d_timestep_);
cudaFree(d_max_displacement_);
cudaFree(d_squared_radius_);
cudaFree(d_num_objects_);
cudaFree(d_starts_);
cudaFree(d_lengths_);
cudaFree(d_timestamps_);
cudaFree(d_current_timestamp_);
cudaFree(d_successors_);
cudaFree(d_num_boxes_axis_);
cudaFree(d_grid_dimensions_);
cudaFree(d_cell_movements_);
}
|
e73dc955783dc57eb301f4e88070e793bc9cf133.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************||********************************************
Genetic algorithm optimizer *
genA.cu *
Runs iterations of genetic algoirthm to optimize molecular mechanics dihedral parameters *
@author James Maier, Kellon Belfon, Chuan Tian *
@lab Carlos Simmerling lab, Stony Brook University *
@version 3.0 2019 Aug *
********************************************||*******************************************/
/*****************************************************************************************
* ---------------LOAD LIBRARIES------------- *
*****************************************************************************************/
#include <chrono>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <math.h>
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/generate.h>
#include <thrust/device_ptr.h>
#include <list>
#include <map>
#include "load.cpp"
#include "parse.cpp"
using namespace std;
/******** Number of threads for a given block, 256 block threads (index 0 to 255) *******/
const int BLOCK_SIZE=256;
#define HANDLE_ERROR(x) x;
/*****************************************************************************************
* Defining the six pivotal functions for the genetic algorithm *
* (1) mateIt, (2) mutateIt, (3) scoreIt, (4) calcAreas, (5) moveEm, (6) getSumAreas *
* note: getSumAreas uses two other functions sumEm and sumEmIndex *
******************************************************************************************
******************************************************************************************
* | function1: mateIt | *
* *
* @purpose creates offspring from a population, generating crossovers according to pCross*
* @param Vs a global array of all the parent and child genomes (Amplitude parameters) *
* @param ptrs array of pointers from logical indices to actual indices into Vs for *
* each individual *
* @param areas the probabilities for choosing each individual for mating *
* @param sumArea pointer to the sum of all the individual areas *
* @param rands array of random numbers for crossover *
* @param pCross probability that crossover occurs *
* @param pSize number of individuals in the population (possible amplitudes solutions) *
* @param genomeSize number of genes in a genome (number of dihedral * periodicity) *
*****************************************************************************************/
__global__ void mateIt(float *Vs, int *ptrs, const float *areas, const float *sumArea,
const float *rands, const float pCross, const int pSize, const int genomeSize)
{
/* figure out index for threads blockId.x is the index for blocks,
blockDIM.x is the elements per blocks (# of threads in a block)
threadIdx is the index for threads */
int i=blockIdx.x * blockDim.x + threadIdx.x;
/* random numbers for crossover */
int randi=i*3;
/* multiply i by 2, as we will have 2 parents and 2 offspring using a left bitwise
(<<) by 1*/
i<<=1;
/* if we're in the population (sometimes warps may go past, don't waste threads) */
if (i<pSize) {
int parent[2];
int j;
/* figure out parents */
parent[0]=parent[1]=-1;
/* find parent where cumulative (cum) area (A) is less than random target (tgt) area
selection of parents depends on cumulative probability being less than the
random probabilities (random numbers).
The random probabilities (tgtA) is random numbers multiply by sum of all the
individual probabilities*/
float cumA=0.0f, tgtA=rands[randi++]* *sumArea; //tgtA random number from 0 to the sumArea
while(cumA<=tgtA){
++parent[0];
cumA+=areas[ptrs[parent[0]]/genomeSize]; // areas (probabilities) is based on mWo option
/* rands[randi-1] is the index back to zero since it is the first set of parents */
}
#if DEBUG>2
printf("rands[%d] ; %f ; %f=%f * %f\n",randi, cumA, tgtA, rands[randi-1], *sumArea);
printf("first parent\n");
#endif
/* This substract 1st parent area from sum of area */
cumA=0.0f; tgtA=rands[randi++]* (*sumArea-areas[ptrs[parent[0]]/genomeSize]);
while (cumA<=tgtA){
++parent[1];
if (parent[1]==parent[0]) //Ensure you don't pick the same parents
++parent[1];
cumA+=areas[ptrs[parent[1]]/genomeSize];
}
#if DEBUG>2
printf("Make offspring %d from %d and %d (%f=%f*(%f-%f)) %d\n", i, parent[0],
parent[1], tgtA, rands[randi-1], *sumArea, areas[ptrs[parent[0]]/genomeSize], randi);
#endif
/* add offset of pSize to i because it is an offspring (next population) */
i+=pSize;
/* use ptrs to get indices into Vs */
int i0=ptrs[i], i1=ptrs[i+1];
parent[0]=ptrs[parent[0]];
parent[1]=ptrs[parent[1]];
/* set j to index for the next set of Vs */
j=i0+genomeSize;
/* put parent[0], parent[1], and i1 relative to i0, so we can just add i0 for index */
parent[0]-=i0;
parent[1]-=i0;
i1-=i0;
/* start with crossover pt at the end (no crossover) */
int crossPt=j;
/* check if we need to do crossover,
only do crossover if random number is less than pCross */
if(rands[randi]<pCross){
crossPt=i0+1+(int)(rands[randi]/pCross*(float)(genomeSize-1));
}
while(i0<crossPt){
/* load next bit from parent and increment i */
Vs[i0]=Vs[parent[0]+i0];
Vs[i1+i0]=Vs[parent[1]+i0];
++i0;
}
while(i0<j){
Vs[i0]=Vs[parent[1]+i0];
Vs[i1+i0]=Vs[parent[0]+i0];
++i0;
} //end of while loop
} // end of if i<pSize loop
}
/*****************************************************************************************
| function 2: mutateIt |
* @brief introduces mutations to the genomes in Vs, according to probability pMut,
with a max perturbation of max
*
* @param Vs a global array of all the parent and child genomes
* @param ptrs array of pointers from logical indices to actual indices into Vs for
each individual
@param rands array of random numbers
* @param pSize number of individuals in the population
* @param pMut probability that a mutation occurs, evaluated for each gene
* @param max maximum perturbation to an allele
* @param genomeSize number of genes in a genome
*******************************************************************************************/
__global__ void mutateIt(float *Vs, int *ptrs, const float *rands, const int pSize, const float pMut, const float max, const int genomeSize)
{
/* figure out index */
int i=blockIdx.x * blockDim.x + threadIdx.x;
if(i<pSize){
// get index into random number array
int r=i*genomeSize;
// bounds for the begnining of the chromosome of Vs
i=ptrs[i];
// bounds for the end of the chromsome of Vs
int j=i+genomeSize;
// want random numbers from [-max, max). will subtract max later
float scale=2.0f*max/pMut;
while(i<j){
// if random number is less than the probability of mutation then
if(rands[r]<pMut){
// mutate the amplitude(Vs) by adding perturbation based on max, random number and pMut
Vs[i]+=rands[r]*scale-max;
}
++i;
++r;
} // end of while loop
}
}
/************************************************************************************************
| function 3: scoreIt |
* @brief calculates a score indicating the closeness of fit for each individual/chromosome
(set of parameters) against the training set
* @param scores score for each conformation, calculated here, output array
* @param areas weighting for each conformation, no longer need
* @param Vs a global array of all the parent and child genomes (amplitudes)
* @param ptrs array of pointers from logical indices to actual indices into Vs for each individual
* @param tset training set
* @param tgts targets for training
* @param wts weights of each point in the training set
* @param breaks breaks in training set, where different data should not be compared across breaks
* @param nConf number of conformations in training set
* @param pSize number of individuals in the population
* @param genomeSize number of genes in a genome
* @param xx space to store energy differences for each conformation with test parameters
************************************************************************************************/
__global__ void scoreIt(float *scores, float *areas, const float *Vs, const int *ptrs, const int *ptrsV, const int *ptrsT, const int *ptrsD, const int *allFginDs, const int *nVperFg, const float *tset, const float *tgts, const float *wts, const int *breaks, const int nConf, const int pSize, const int trainingSize, const int genomeSize, const int nFg, const int *nCosperFg, float *xx )
{
// i represent a chromosome , a set of amplitude parameters, this function will be done for each i (chromosome) at the same time
int i=blockIdx.x * blockDim.x + threadIdx.x;
if(i<pSize){
float *x=xx+i*nConf; // for the error of each conformation
// get reference to score, S is the AAE
float *S=scores+i;
// set score to 0
*S=0.0f;
// accumulate little s (AAE) for each set
float s;
int t;
int i0;
/* start at break 0 */
int b=0;
/* loop over conformations c */
int c=0;
int d=0; // index into dataset
int fg,tg,beg,end;
int tindx;
int pt = ptrs[i]; // set the pointer index into the first element of Vs array
while(c<nConf){
//s is the sum of REE
s=0.0f;
/* loop only over in conformations within a dataset */
while(c<breaks[b+1]){
/* start with delta E (tgts) for a given conformation (c) within a break; see load.cpp
conf (c) goes through until it reach a break. the loop will set delta E */
// get first index in genome
i0=pt;
#if DEBUG>2
printf("i0: %d ", i0);
#endif
// get dE for that conformation
x[c]=tgts[c];
// Get the number of dihedral in the dataset
// loop throught the dihedrals of a given conformation
#if DEBUG>2
printf("ptrsD ??: ptrsD[d] = %d, ptrsD[d+1] = %d, d = %d\n", ptrsD[d],ptrsD[d+1],d);
#endif
tindx=0; //index into the ptrsT array 0 to number of dihedral columns in a given dataset
for (int dih=ptrsD[d];dih<ptrsD[d+1];dih++,tindx++){
//Get the fitting group for that dihedral
fg=allFginDs[dih];
#if DEBUG>2
printf("Fitting group = %d for dih index %d\n", allFginDs[dih], dih);
#endif
//get the index into Vs and tset
beg=i0+ptrsV[fg];
end=beg+nVperFg[fg];
tg=ptrsT[(c*trainingSize)+tindx]; //index into prtsT
t=(c*trainingSize)+tg;
#if DEBUG>2
printf("beg = %d, end = %d, tg = %d, tindx = %d t = %d \n", beg,end,tg,tindx,t);
#endif
//loop through the number of cosines
for (int i=beg;i<end;i++,t++) {
/* subtract contributions from each parameter for conformation c for each conformation
e.g deltaE - cos (dihedral * periodicity) * parameter generated from chromosomes
Therefore, it is delta E - sum of cosines for each dihedral */
x[c]-=Vs[i] * tset[t]; // Vs* tset is cos(n * dih)
#if DEBUG>2
printf("scoreIt: i = %d, c = %d, dih = %d, beg = %d, end = %d, t = %d, x[c] = %f, Vs[i] = %f, tset[t] = %f \n",i,c,dih,beg,end,t,x[c],Vs[i],tset[t]);
#endif
}
}
/* add differences in this error from all other errors */
#if DEBUG>2
printf("outside loopscore for x[c] = %f\n", x[c]);
#endif
for(int c2=breaks[b];c2<c;c2++){
#if DEBUG>2
printf("In loop score for x[c] = %f\n", x[c]);
printf("%d - %d\n",c,c2); //print the pairs index
#endif
// calculate the absolute error for each pairs
float err=x[c]-x[c2];
// sum the absolute of the errors (err) - -err = + err ; +err = +err
//s+=(err<0.0f?-err:err); //ternary operator, condition is err < 0.0; if true err is negative, if false error is positive
s+=abs(err);
}
/* next conformation */
++c;
}
/* add little error to big error S, weighted by number of pairs, wt is 2 / nconf*(nconf-1) */
*S+=s*wts[b];
/* go to next breakpoint (data set) */
++b;
++d;
}
} //end if in Psize
}
/**************************************************************************************************
* | function 4: calcAreas | *
* *
* calculates the areas (the probability) each individual has of mating *
*___________________________________Parameters____________________________________________________*
* @param scores scores for each individual (set of parameters) *
* @param areas fitness for each individual, in terms of probability of mating *
* @param ptrs array of pointers from logical indices to actual indices into Vs for each individual*
* @param pSize number of individuals in the population *
* @param genomeSize number of genes in a genome *
**************************************************************************************************/
__global__ void calcAreas(float *scores, float *areas, const int *ptrs, const int pSize, const int genomeSize, const int weight_flag, float temperature) {
int i=blockIdx.x * blockDim.x + threadIdx.x;
float b_k = 0.001987204; // kcal/mol/K boltzmann constant
float kt = b_k * temperature; // K
if(i<pSize){
// if the weight flag is 1 then use a heavy weight
if (weight_flag==1){
areas[ptrs[i]/genomeSize]=__expf(-scores[i]/scores[0]);
}
// use 1/1+si
else if (weight_flag==2){
areas[ptrs[i]/genomeSize]= 1/(1 + scores[i]);
}
// use same as 1 but with kt instead so you can adjust the probabilities
else if (weight_flag==3){
areas[ptrs[i]/genomeSize]=__expf(-scores[i]/kt);
}
}
}
/*****************************************************************************************
* | function 5: moveEm |
*
* @brief simple helper function for copying data from oldF, oldI to neWF, newI
*
* @param newF pointer to new float array
* @param newI pointer to new int array
* @param oldF pointer to old float array
* @param oldI pointer to old int array
* @param N number of floats/ints to copy
*****************************************************************************************/
__global__ void moveEm(float * newF, int *newI, float *oldF, int *oldI, int N) {
int i=blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
newF[i]=oldF[i];
newI[i]=oldI[i];
}
}
/******************************| function 5 ends |***************************************/
/*****************************************************************************************
| sumEm and sumEmIndex : helper function for getSumAreas |
* @brief performs a sum of each successive pair of N numbers in source and stores the sums
in sums. intended to be run multiple times to sum over a whole array. if N is odd,
the last sum index will be N/2-1 and contain the sum of the last 3 numbers
*
* @param sums where to store the sums
* @param source where to get the numbers to sum together
* @param N the dimension of source
*
* @return ********************************************************/
__global__ void sumEm(float *sums, float *source, int N){
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=(i<<1);
if(j+3<N)sums[i]=source[j]+source[j+1];
else if(j+3==N) sums[i]=source[j]+source[j+1]+source[j+2];
else if(j+2==N) sums[i]=source[j]+source[j+1];
}
/*
* @brief performs a sum of pairs of N numbers in source, using locations indicated
by pointers. pointers has indices multiplied by genomeSize. intended to be
run multiple times to sum over a whole array. if N is odd, the last sum index
will be N/2-1 and contain the sum of the last 3 numbers
*
* @param sums where to store the sums
* @param source an array where to get the numbers to sum together
* @param N the dimension of source
* @param ptrs the indices to use when gathering pairs for summation
* @param genomeSize the number by which the indices in ptrs are scaled
*
* @return
*/
__global__ void sumEmIndex(float *sums, float *source, int N, const int *ptrs, const int genomeSize){
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=(i<<1); // j = i*2 (mutiplication using a left bitwise shift)
if(j+3<N)sums[i]=source[ptrs[j]/genomeSize]+source[ptrs[j+1]/genomeSize];
else if(j+3==N) sums[i]=source[ptrs[j]/genomeSize]+source[ptrs[j+1]/genomeSize]+source[ptrs[j+2]/genomeSize];
else if(j+2==N) sums[i]=source[ptrs[j]/genomeSize]+source[ptrs[j+1]/genomeSize];
#if DEBUG>1
if(j+2<=N)printf(" %d:%f",i,sums[i]);
#endif
}
/*******************************| end of helper function |*******************************/
/*****************************************************************************************
* | function 6: getSumAreas | *
* ---------uses sumEmIndex and sumEM-------- *
* *
* @brief get sum of all areas *
* @param areas_d pointer to areas on device *
* @param ptrs_d pointer to indices for each individual in population *
* @param pSize population size *
* @param temp_d pointer to temporary array on device *
* @param genomeSize number of alleles in genome *
*****************************************************************************************/
float *getSumAreas(float *areas_d, int *ptrs_d, int pSize, float *temp_d, const int & genomeSize){
int dim=pSize; //Set dim to pSize
int offset=0;
// return an array of sums (temp_d), sum up the probabilities in areas_d array
hipLaunchKernelGGL(( sumEmIndex) , dim3(((dim>>1)+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, temp_d, areas_d, dim, ptrs_d, genomeSize);
pSize >>= 1;
while((dim>>=1)>1){ // while pSize/2 is greater than 1: Keep dividing (1/2 psize) by 2
offset^=pSize; //bitwise XOR offest is 1/2 pSize then 0, then 1/2 pSize, then 0...
// doing this switch the source to be (temp+pSize/2) then the source changes to (temp_d+0), then back and forth
hipLaunchKernelGGL(( sumEm) , dim3(((dim>>1)+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, temp_d+offset, temp_d+(offset^pSize), dim);
}
return temp_d+offset;
}
/*
/////////////////////////////////////////////////////// `
////////////////////////////////// `
///////////////////// | |
///////////// ~ ~ ~ ~ ~ ~ ~
//////// | |
///// ____| |____
/// | |
// ___| J.M |___
/ | K.B |
/ PROGRAM BEGINS HERE | C.T |
*****************************************************************************************/
/*****************************************************************************************
argc is a vairable with the number of arguments passed to GenA
argv is a vector of strings representing the the arguments the GenA takes
input file: parametersfitting data using the following format:
_____________________________________________________________________
|-<dihedral> <AMBER atom type for dihedral 1> -Fg_0 periodicities |
|-<dihedral> <AMBER atom type for dihedral 2> -Fg_1 periodicities |
|<name of data set> <weights <dihedral 1> <dihedral 2> ndih> |
| <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> |
| <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> |
| ... |
|/ |
|<name of data set> <weights <dihedral 1> <dihedral 2> ndih> |
| <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> |
| <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> |
| ... |
|/ |
|_____________________________________________________________________|
<dihedral> is the name of dihedral e.g phi, psi, chi1, chi2, chi3, etc
ndih> is the number of dihedral in the dataset
<AMBER atom type for dihedral 1> e.g chi1 is N -CX-2C-2C for Met, get from frcmod file
<name of data set> is any name, e.g Metalpha, Metbeta, Metcharge
<dihedral 1 value> this is the dihedral value (deg) of the optimized QM structures
e.g 105.62
<E_QM> the QM energy of conformation i with restraint dihedral
<E_MM> the MM energy of conformation i with with zeroed dihedral parameters in the
frcmod
... repeat for all conformations within a break
/ (refer to as break (brk))
a break seperate conformations that are different database
e.g alpha backbone, beta backbone, charge amino acids
GOODLUCK!!!
[ O O ]
[ b ' ]
[ ----- ]
contact: kellonbelfon@gmail.com with RAGTAG title for help
*****************************************************************************************/
int main(int argc, char *argv[]){
/* start the timer */
auto t1=std::chrono::high_resolution_clock::now();
/*specify the string name of the savefile, scorefile, loadfile etc */
std::string saveFile, loadFile, scoreFile, logFile, frcmodFile, inputFile, fitFile;
/* genetic algorithm parameters initiated */
int pSize, nGen, rseed, peng, ncp, nCos, nChrom, nDih, nFg, mWo;
float pMut, max, pCross, keep, tempe;
/* getting the filenames from the commands -r, -c, -s, -o, -f -y -a */
for (int i=1;i<argc;i++){
if(i+1<argc){
if(argv[i][0]=='-'&&argv[i][1]=='r')saveFile=argv[++i]; //file that save amplitudes parameter (Vs)
else if(argv[i][0]=='-'&&argv[i][1]=='c')loadFile=argv[++i]; //file with Vs for restart or from other forcefields
else if(argv[i][0]=='-'&&argv[i][1]=='s')scoreFile=argv[++i]; // file that save the scores
else if(argv[i][0]=='-'&&argv[i][1]=='f')frcmodFile=argv[++i]; //file that save frcmod file
else if(argv[i][0]=='-'&&argv[i][1]=='o')logFile=argv[++i]; //file that save outputs
else if(argv[i][0]=='-'&&argv[i][1]=='i')inputFile=argv[++i]; // input file with dihedral info
else if(argv[i][0]=='-'&&argv[i][1]=='y')fitFile=argv[++i]; // file with and idea of how your target energy change
}
}
/* open the output file which is the log file */
std::ofstream logfile;
logfile.open (logFile.c_str(), ios::out);
/* open the score file to store scores */
std::ofstream scorefile;
scorefile.open (scoreFile.c_str(), ios::out);
scorefile << "#Generation" << std::setw(14) << "Chromosomes" << std::setw(12) << "Scores" << std::setw(14) << "areas\n";
/* Now load genA parameters, from the parmfile -p */
for (int i=1;i<argc;i++){
if(i+1<argc){
if(argv[i][0]=='-'&&argv[i][1]=='p'){
ConfigFile cfg(argv[++i]); //file that has the genetic algorithm parameters
// check if keys exixt and set a message to the user that we are using the default
if (!(cfg.keyExists("pSize"))) std::cout << "pSize was not specified, using default of 2000\n";
if (!(cfg.keyExists("nGen"))) std::cout << "nGen was not specified, using default of 1000\n";
if (!(cfg.keyExists("pMut"))) std::cout << "pMut was not specified, using default of 0.01\n";
if (!(cfg.keyExists("max"))) std::cout << "max was not specified, using default of 0.5\n";
if (!(cfg.keyExists("pCross"))) std::cout << "pCross was not specified, using default of 0.8\n";
if (!(cfg.keyExists("peng"))) std::cout << "peng was not specified, using default of 10\n";
if (!(cfg.keyExists("ncp"))) std::cout << "ncp was not specified, using default of 2\n";
if (!(cfg.keyExists("keep"))) std::cout << "keep was not specified, using default of 0.2\n";
if (!(cfg.keyExists("nDih"))) std::cout << "nDih was not specified, using default of 1\n";
if (!(cfg.keyExists("nFg"))) std::cout << "nFg was not specified, using default of 1\n";
if (!(cfg.keyExists("mWo"))) std::cout << "mWo was not specified, using default of 1\n";
// Retreive the value of keys
pSize = cfg.getValueOfKey<int>("pSize", 2000);
logfile << "Population Size (pSize): " << pSize << "\n\n";
nGen = cfg.getValueOfKey<int>("nGen", 1000);
logfile << "Number of Generations (nGen): " << nGen << "\n\n";
pMut = cfg.getValueOfKey<float>("pMut", 0.01);
logfile << "Probability of Mutations (pMut): " << pMut << "\n\n";
max = cfg.getValueOfKey<float>("max", 0.5);
logfile << "Maximal permissible mutation (max): " << max << "\n\n";
pCross = cfg.getValueOfKey<float>("pCross", 0.8);
logfile << "Probability of crossover (pCross): " << pCross << "\n\n";
rseed = cfg.getValueOfKey<int>("rseed", 314245);
logfile << "Random seed (rseed): " << rseed << "\n\n";
peng = cfg.getValueOfKey<int>("peng", 10);
logfile << "Print scores every " << peng << "generations (peng)\n\n";
ncp = cfg.getValueOfKey<int>("ncp", 2);
logfile << "Print scores of only " << ncp << " chromosomes every peng \n\n";
nCos = cfg.getValueOfKey<int>("nCos", 4);
logfile << "Periodicity (nCos): " << nCos << "\n\n";
keep = cfg.getValueOfKey<float>("keep", 0.2);
logfile << "We will use " << keep << " for the elitist regime\n\n";
nDih = cfg.getValueOfKey<int>("nDih", 1);
logfile << "Number of dihedral(s) (nDih): " << nDih << "\n\n";
nFg = cfg.getValueOfKey<int>("nFg", 1);
logfile << "Number of Fitting groups (nFg): " << nFg << "\n\n";
mWo = cfg.getValueOfKey<int>("mWo", 1);
logfile << "Mating weight option flag " << mWo << "\n\n";
// it the mating weight option is 3 then read temperature
if (mWo==3) {
tempe = cfg.getValueOfKey<int>("tempe", 298.0);
logfile << "Temperature (K) " << tempe << "\n\n";
}
if(!loadFile.empty()) {
nChrom = cfg.getValueOfKey<int>("nChrom", 100);
logfile << "Number of chromosome reported is : " << nChrom << "\n\n";
}
}
}
}
/* initializing GPU (_d) and CPU arrays */
hipError_t error;
size_t nRands;
hiprandGenerator_t gen;
float *Vs, *Vs_d, *rands, *rands_d, *tset, *tset_d, *tgts, *tgts_d, *wts, *wts_d, *xx_d;
float *scores, *scores_d, *areas, *areas_d, *EMM0;
int genomeSize, trainingSize, g, totdih, *ptrs_d, *ptrs, N, nConf=0, nDataset=0, *breaks, *breaks_d, nBreaks;
int *ptrsT, *ptrsV, *ptrsD, *ptrsT_d, *ptrsV_d, *ptrsD_d, *allFginDs, *allFginDs_d;
int *nCosperFg, *nCosperFg_d, *nVperFg, *nVperFg_d, *nDihperDs, *nDihperDs_d, *DihFgindx;
int save=pSize*keep; //save is number of chromosome we will keep as elitist
/***************************| load data from load.cpp |***********************************
* check load.cpp for this section *
* map is a way to create a dictionary, correction map is an array with key *
*****************************************************************************************/
/* initiating container with key and values name correctionMap */
std::map<std::string,DihCorrection> correctionMap;
/* input file open, with dihedral info */
std::ifstream inputfile;
inputfile.open (inputFile.c_str(), std::ios::in);
/* load in arrays generated from load.cpp, check it out for further comments */
load(inputfile, &tset, &ptrsV, &ptrsT, &ptrsD, &allFginDs, &nDihperDs, &tgts, &wts, &nConf, &nDataset, &breaks, &nBreaks, &trainingSize, &genomeSize,
correctionMap, &nVperFg, &nCosperFg, nCos, nFg, nDih, &totdih, &DihFgindx, &EMM0);
logfile << "Input file loaded ('_')" << "\n\n";
/****************************************************************************************/
/*************************| memory allocation |*******************************************
* Declare and allocate host and device memory, copy data arrays from CPU host
(breaks,tset,
* tgts,wts) to device GPU (breaks_d, etc)
*****************************************************************************************/
#if DEBUG && 0
for(int i=0;i<nConf;i++){
for(int j=0;j<trainingSize;j++)
std::cerr << ' ' << tset[i*trainingSize+j];
std::cerr << std::endl;
}
std::cerr << tgts[0] << ' ' << tgts[1] << ' ' << tgts[2] << ' ' << tgts[3] << std::endl;
std::cerr << "first hipMalloc, " << nBreaks << " breaks" << std::endl;
#endif
// memory allocation onf the GPU
hipMalloc(&nCosperFg_d, nFg*sizeof(int));
hipMalloc(&ptrsV_d, nFg*sizeof(int));
hipMalloc(&ptrsT_d, nConf*trainingSize*sizeof(int));
hipMalloc(&ptrsD_d, (nDataset+1)*sizeof(int));
hipMalloc(&nDihperDs_d, (nDataset+1)*sizeof(int));
hipMalloc(&allFginDs_d, totdih*sizeof(int));
hipMalloc(&nVperFg_d, nFg*sizeof(int));
// Some cuda copies, here TODO: Copy all at the same time, to reduce time
hipMemcpy(ptrsV_d, ptrsV, nFg*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(ptrsT_d, ptrsT, nConf*trainingSize*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(ptrsD_d, ptrsD, (nDataset+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(nDihperDs_d, nDihperDs, (nDataset+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(allFginDs_d, allFginDs, totdih*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(nVperFg_d, nVperFg, nFg*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(nCosperFg_d, nCosperFg, nFg*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&breaks_d, nBreaks*sizeof(int));
hipMalloc((void **)&tgts_d, (nBreaks-1+nConf*(1+trainingSize))*sizeof(float));
wts_d=tgts_d+nConf;
tset_d=wts_d+nBreaks-1;
#if DEBUG
std::cerr << "COPY" << std::endl;
#endif
/* Copying over the arrays from the CPU to GPU
nbreaks is the # of dataset + 1. e.g if you are doing alpha and beta backbone set then nbreaks=3
genomesize is the # of fitting dihedral * periodicity, e.g 3 set of dihedral * 4 periodicity = 12
nconf is the # of conformations you are fitting
tgts is (E_QMi-E_MMi) + (E_MMref-E_QMref) for each conformation, which = nconf, see load.cpp
tset is the cos(dih*periodicity) for 4 periodicity for a dihedral for each conformation
so 20 conf will give tgts of 20 (nconf) * 12 (# of dih * periodicity) = 120
*/
hipMemcpy(breaks_d, breaks, nBreaks*sizeof(breaks[0]), hipMemcpyHostToDevice);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error:(Memcpy breaks) %s\n", hipGetErrorString(error));}
hipMemcpy(tset_d, tset, nConf*trainingSize*sizeof(float), hipMemcpyHostToDevice);
printf("trainingSize is %d after cuda copy\n", trainingSize);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error:(Memcpy tset) %s\n", hipGetErrorString(error));}
hipMemcpy(tgts_d, tgts, nConf*sizeof(float), hipMemcpyHostToDevice);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: (Memcpy tgts) %s\n", hipGetErrorString(error));}
hipMemcpy(wts_d, wts, (nBreaks-1)*sizeof(*wts), hipMemcpyHostToDevice);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: (Memcpy wts) %s\n", hipGetErrorString(error));}
/**********************| initiate GPU blocks and # of random variable |***************************
* we need randoms, new pop 3xcrossover, genomeSizexmut *
* genome size is the number of genes which is all the parameters, *
* e.g for 4 periodicity and three dihedral fitting, then genomesize will be 4 * 3 = 12 *
* nRands is number of randoms we need for each set of parameters *
* e.g if psize (population size) is 10, then number of random number we will need is *
* (3+(# of periodicity x # of dihedral)) * psize *
* so for 4 periodicity and 3 dihedral fitting (chi1 chi2 chi3), then nRands = 3+12 * 10 = 150 *
*________________________________________________________________________________________________*
* nBlocks is dependent on the population size, it is use to figure out how many GPU blocks *
* we need to initialize the arrays for calculations. Each block has 256 threads. *
* one thread represent one individual (chromosome with soln parameters) from the population *
* e.g population size of 2000 will require (2000+256-1)/256 = 8.81 => 8 blocks *
* *
*************************************************************************************************/
nRands=(3+genomeSize)*pSize;
int nBlocks=(pSize+BLOCK_SIZE-1)/BLOCK_SIZE;
/*******************************| initializing host and device variables|************************
* N (bitwise operation below) is the pSize (1st input) multiply by 2; *
* initiating the chromosomes which have the solns *
************************************************************************************************/
rands=(float *)malloc(nRands*sizeof(float));
N=(pSize<<1);
HANDLE_ERROR(hipMalloc((void **)&Vs_d, (N*(genomeSize+4)+pSize*nConf+nRands)*sizeof(float)));
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: (Malloc Vs_d) %s\n", hipGetErrorString(error));}
rands_d=Vs_d+N*genomeSize;
scores_d=rands_d+nRands;
areas_d=scores_d+(N<<1);
xx_d=areas_d+(N<<1);
scores=(float *)malloc(sizeof(*scores)*N);
float *scores_ds[2];
scores_ds[0]=scores_d;
scores_ds[1]=scores_d+N;
printf("GENOMESIZE: %d \n", genomeSize);
// allocate memory to host Vs (amplitudes or barrier height for the cosine function)
Vs=(float *)malloc(N*genomeSize*sizeof(float));
areas=(float *)malloc(N*sizeof(float));
/* allocate the memory space to hold array of pointers (prts) of size N (2*pSize)
these pointers point to the individuals (chromosome) in the population */
ptrs=(int *)malloc(sizeof(int)*N);
ptrs[0]=0;
for(g=1;g<N;g++)ptrs[g]=ptrs[g-1]+genomeSize;
HANDLE_ERROR(hipMalloc((void **)&ptrs_d, N*2*sizeof(int)));
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: (Malloc ptrs_d) %s\n", hipGetErrorString(error));}
int *ptrs_ds[2];
ptrs_ds[0]=ptrs_d;
ptrs_ds[1]=ptrs_d+N;
hipMemcpy(ptrs_d, ptrs, sizeof(int)*N, hipMemcpyHostToDevice);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: (Memcpy ptrs_d) %s\n", hipGetErrorString(error));}
int curList=0;
/* thrust is a c++ template library for CUDA similar to STL it have two containers:
thrust::host_vector<type> and thrust::device_vector<type>
The containers make common operations such as hipMalloc, hipFree, hipMemcpy, more concise
e.g thrust::host_vector<int> vec_h(2) will allocate host vector with 2 elements
thrust::device_vectore<int> vec_d = vec_h will copy host vector to device
This will allow you to directly manipulate device values from the host
so vec_d[0] = 5; can be done from host and once you output vector memory is
automatically released
it have a few algorithms, we use thrust::sort(), */
thrust::device_ptr<int> dPtrs(ptrs_d), dPtrs_save(ptrs_d+save);
thrust::device_ptr<float> dScores(scores_d), dVs(Vs_d);
thrust::device_ptr<float> dScores_save(scores_d+save),
dScores_pSize(scores_d+pSize),
dScores_N(scores_d+N);
/**************************| Create a random generator |********************************************
*hiprandSetPseudoRandomGeneratorSeed takes two parameters (1) the generator (gen) & (2) seed value *
* seed value # is used to initialize the generator and control the set of random numbers; *
* same seed will the give same set of random numbers of the psuedorandom generator *
* rseed is the random number specified from the 6th input) *
*__________________________________________________________________________________________________*
* hiprandGenerateNormal take 5 parameters: *
* (1) generator - Generator to use *
* (2) outputPtr - Pointer to device memory to store CUDA-generated results, *
or Pointer to host memory to store CPU-generated resluts *
* (3) num - Number of floats to generate *
* (4) mean - Mean of normal distribution *
* (5) stddev - Standard deviation of normal distribution *
* Results are 32-bit floating point values with mean and standard deviation. *
***************************************************************************************************/
// create the generator name gen
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
// initiate the generator with the random seed (rseed) for natural distribution of random numbers
hiprandSetPseudoRandomGeneratorSeed(gen, rseed);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (seed)\n", hipGetErrorString(error));}
// Vs_d is the amplitudes which is random numbers but can be overwritten with preloaded Vs
hiprandGenerateNormal(gen, Vs_d, N*genomeSize, 0, 1);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (normal)\n", hipGetErrorString(error));}
#if DEBUG
hipMemcpy(Vs, Vs_d, sizeof(float)*genomeSize*N, hipMemcpyDeviceToHost);
/// print the three Vs from the first two chromosomes.
std::cout << "random Vs, created on GPU" << std::endl;
for(int i=0;i<1;i++){
std::cout << Vs[ptrs[i]] << " " << Vs[ptrs[i]+1] << " " << Vs[ptrs[i]+2] << std::endl;
}
#endif
/***** if we have a load file copy Vs (amplitude parameters) from the loaded file and populate Vs ***********/
if(!loadFile.empty()) {
std::ifstream loadfile;
loadfile.open (loadFile.c_str(), std::ios::in);
// copy the random Vs to add previous chromosome of nChrom
hipMemcpy(Vs, Vs_d, sizeof(float)*genomeSize*N, hipMemcpyDeviceToHost);
if (loadfile.is_open()) {
for (int i=0;i<nChrom;i++) {
for (int j=0;j<genomeSize;j++) {
loadfile >> Vs[ptrs[i]+j];
}
}
}
// print the two Vs from the first two chromosomes, to ensure your Vs were loaded.
logfile << "Here is your loaded Vs(amplitudes) for first three chromosomes: \n\n" << std::endl;
for(int i=0;i<3;i++){
for(int j=0;j<genomeSize;j++){
logfile << Vs[ptrs[i]+j] << " ";
}
logfile << "\n";
}
// copy loaded Vs to the GPU and overwrite random Vs. If user only create two chromosomes or
// previous Vs then the rest of the chromosome will be random
hipMemcpy(Vs_d, Vs, N*genomeSize*sizeof(*Vs), hipMemcpyHostToDevice);// copy to GPU
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: (loadingVs) %s\n", hipGetErrorString(error));}
}
#if DEBUG
// check to see if Vs was transfer to gpu successful
/// print the three Vs from the first two chromosomes.
std::cout << "loaded Vs" << std::endl;
for(int i=0;i<1;i++){
std::cout << Vs[ptrs[i]] << " " << Vs[ptrs[i]+1] << " " << Vs[ptrs[i]+2] << std::endl;
}
hipMemcpy(Vs_d, Vs, pSize*genomeSize*sizeof(*Vs), hipMemcpyHostToDevice);// copy to GPU
hipMemcpy(Vs, Vs_d, sizeof(float)*genomeSize*N, hipMemcpyDeviceToHost); // copy back to CPU
/// print the three Vs from the first two chromosomes.
std::cout << "After transfer of loaded Vs to GPU" << std::endl;
for(int i=0;i<1;i++){
std::cout << Vs[ptrs[i]] << " " << Vs[ptrs[i]+1] << " " << Vs[ptrs[i]+2] << std::endl;
}
#endif
/***************************| score of the first set of chromosomes |*******************************
* Here we score the two arrays of parents with solution parameters in the initial population *
***************************************** *******************************************************/
// lauch first kernel to score the initial set of chromsomes (Vs_d) and output scores in scores_ds
hipLaunchKernelGGL(( scoreIt) , dim3((N+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList], areas_d, Vs_d, ptrs_ds[curList],
ptrsV_d, ptrsT_d, ptrsD_d, allFginDs_d, nVperFg_d, tset_d, tgts_d, wts_d, breaks_d,
nConf, pSize, trainingSize, genomeSize, nFg, nCosperFg_d, xx_d);
// score of chromosomes outside of psize since we initiated 2 times psize
hipLaunchKernelGGL(( scoreIt) , dim3((N+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList]+pSize, areas_d, Vs_d, ptrs_ds[curList]+pSize,
ptrsV_d, ptrsT_d, ptrsD_d, allFginDs_d, nVperFg_d, tset_d, tgts_d, wts_d, breaks_d,
nConf, pSize, trainingSize, genomeSize, nFg, nCosperFg_d, xx_d);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (1stscore)\n", hipGetErrorString(error));}
// print the initial scores based on ncp as Initial, doing this before sorting so as to score the loaded Vs parameters
hipMemcpy(scores, scores_ds[curList], sizeof(*scores)*ncp, hipMemcpyDeviceToHost);
for(int m=0;m<ncp;m++){
scorefile << std::setw(6) << "Initial" << std::setw(14) << m << std::setw(18) << scores[m]/nDataset << "\n";
}
/* sort the scores from each chromosome of the initial population */
thrust::sort_by_key(thrust::device_pointer_cast(scores_ds[curList]), thrust::device_pointer_cast(scores_ds[curList]+N), thrust::device_pointer_cast(ptrs_ds[curList]));
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (1stsort)\n", hipGetErrorString(error));}
// print the initial scores based on ncp as -1, doing this after sorting so we can see how good the best ones are
hipMemcpy(scores, scores_ds[curList], sizeof(*scores)*ncp, hipMemcpyDeviceToHost);
for(int m=0;m<ncp;m++){
scorefile << std::setw(6) << "Init_after_sort" << std::setw(14) << m << std::setw(18) << scores[m]/nDataset << "\n";
}
#if DEBUG>2
hipMemcpy(scores, scores_ds[curList], sizeof(*scores)*N, hipMemcpyDeviceToHost);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n (memcpy scores)", hipGetErrorString(error));}
hipMemcpy(Vs, Vs_d, sizeof(*Vs)*N*genomeSize, hipMemcpyDeviceToHost);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));}
hipMemcpy(ptrs, ptrs_ds[curList], sizeof(*ptrs)*N, hipMemcpyDeviceToHost);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));}
/* i is each chromosome, scores[i] is scores, Vs[ptrs[i]] is the amplitude parameters;
Vs[ptrs[i]]+n specifies the next n amplitude. e.g chromosome i have genomesize amplitude parms
e.g Vs[ptrs[i]]+1 is the amplitude term when the periodicity is 3 for the 1st dihedral being
fitted, and Vs[ptrs[i]]+4, the amplitude term when the periodicity is 4 for the 2nd dihedral */
for(int i=0;i<N;i++){
std::cerr << i << ": [" << ptrs[i] << "] = " << scores[i] << " {"<<Vs[ptrs[i]]<<" "<<Vs[ptrs[i]+1]<<" "<<Vs[ptrs[i]+2]<<" "<<Vs[ptrs[i]+3]<<"}\n";
}
#endif
/****************************| Let us begin the iterations through generations |********************
Genetic algorithm iterations through the number of generations (nGen: 2nd input)
****************************************************************************************************/
/* for loop for the generation */
for(g=0;g<nGen;g++){
// create an array of random numbers (rands_d) used for mutations and crossover where the number of random #s is nRands
hiprandGenerateUniform(gen, rands_d, nRands);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: (GenerateUniform)%s\n", hipGetErrorString(error));}
// Step2: calculate the probabilities (areas) each individual (chromosome) has of mating
hipLaunchKernelGGL(( calcAreas) , dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList], areas_d, ptrs_d, pSize, genomeSize, mWo, tempe);
// Step3: mate the individuals (chromosomes,Parent[0],[1]) selected for the next generation
hipLaunchKernelGGL(( mateIt) , dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, Vs_d, ptrs_ds[curList], areas_d,
getSumAreas(areas_d, ptrs_ds[curList], pSize, areas_d+N, genomeSize),
rands_d, pCross, pSize, genomeSize);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (mate)\n", hipGetErrorString(error));}
// Step4: mutate individuals generated after mating
hipLaunchKernelGGL(( mutateIt) , dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, Vs_d, ptrs_ds[curList]+pSize, rands_d+pSize*3, pSize, pMut, max, genomeSize);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (mutate)\n", hipGetErrorString(error));}
// Step5: Score the individuals to select for the next generation
hipLaunchKernelGGL(( scoreIt) , dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList]+pSize, areas_d, Vs_d, ptrs_ds[curList]+pSize,
ptrsV_d, ptrsT_d, ptrsD_d, allFginDs_d, nVperFg_d, tset_d, tgts_d, wts_d, breaks_d,
nConf, pSize, trainingSize, genomeSize, nFg, nCosperFg_d, xx_d);
if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (score)\n", hipGetErrorString(error));}
// Step6: Sort the scored chromosomes (individuals) & select for mating for next generation
// curList^1 change curList to 1
// move the scores and pointers to the chromosome for the elitist parents)
hipLaunchKernelGGL(( moveEm) , dim3((save+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList^1], ptrs_ds[curList^1], scores_ds[curList], ptrs_ds[curList], save);
// curList^1 change curList to 0
// move the scores and pointers to the chromosome for the offsprings
hipLaunchKernelGGL(( moveEm) , dim3((pSize+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList^1]+save, ptrs_ds[curList^1]+save, scores_ds[curList]+pSize, ptrs_ds[curList]+pSize, pSize);//nOffspring);
// curList^1 change curList to 1
// move the scores and pointers to the chromosome of the left over parent
hipLaunchKernelGGL(( moveEm) , dim3((pSize-save+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList^1]+save+pSize, ptrs_ds[curList^1]+save+pSize, scores_ds[curList]+save, ptrs_ds[curList]+save, pSize-save);
// curList back to 0
curList^=1;
/* first sort only the offspring */
#if DEBUG>1
std::cerr << "Selection sort (" << N << " items, less " << save << ")" << std::endl;
#endif
thrust::sort_by_key(thrust::device_pointer_cast(scores_ds[curList]+save), thrust::device_pointer_cast(scores_ds[curList]+pSize+save), thrust::device_pointer_cast(ptrs_ds[curList]+save));
/* second sort is to sort the elitist parent and the offsprings (psize-save) that fall into pSize */
#if DEBUG>1
std::cerr << "Rank sort" << std::endl;
#endif
thrust::sort_by_key(thrust::device_pointer_cast(scores_ds[curList]), thrust::device_pointer_cast(scores_ds[curList]+pSize), thrust::device_pointer_cast(ptrs_ds[curList]));
/****************************************************************************************************
* Here you can print the score of chromosomes (total is 2 x population size) to score file (-s) *
****************************************************************************************************/
//peng --> print every n generation
//ncp --> number of chromosomes to print
//if generation is divisable by peng
if(g%peng==0) {
//scorefile << "#Generation" << std::setw(14) << "Chromosomes" << std::setw(12) << "Scores\n";
hipMemcpy(scores, scores_ds[curList], sizeof(*scores)*ncp, hipMemcpyDeviceToHost); //copy over ncp scores
hipMemcpy(areas, areas_d, sizeof(*areas)*ncp, hipMemcpyDeviceToHost); //copy over ncp areas
// divide score by the number of datasets to print the average of the datasets since score is sum of each dataset score
for(int m=0;m<ncp;m++){
scorefile << std::setw(6) << g << std::setw(14) << m << std::setw(18) << scores[m]/nDataset << std::setw(18) << areas[m] << "\n";
}
}
/* END GENETIC ALGORITM */
}
scorefile.close();
/****************************************************************************************************
* TERMINATION, LAST RESULTS < SCORES AND PARAMETERS FOR EACH INDIVIDUAL
****************************************************************************************************/
/***************************************************************************************************/
/* copy over the results from GPU to the CPU to save the scores and parameters */
hipMemcpy(Vs, Vs_d, sizeof(float)*genomeSize*N, hipMemcpyDeviceToHost);
hipMemcpy(ptrs, ptrs_ds[curList], sizeof(int)*N, hipMemcpyDeviceToHost);
hipMemcpy(scores, scores_ds[curList], sizeof(float)*N, hipMemcpyDeviceToHost);
hipMemcpy(tgts, tgts_d, sizeof(float)*nConf, hipMemcpyDeviceToHost);
hipMemcpy(tset, tset_d, nConf*trainingSize*sizeof(float), hipMemcpyDeviceToHost);
/****************************************************************************************************/
// Here we will move the parameters back into the dihedral space
// TODO: Write these out as functions in a seperate files
// kept the parameters sorted so first Vs in ptrs[i] is best in Vs = Vs_dih[0]
float *Vs_dih;
Vs_dih=(float *)malloc(N*trainingSize*sizeof(float));
int fg,begv,endv;
// For a given chromosome (set of Vs)
for(int i=0;i<N;i++){
int pt=ptrs[i]; // set the pointer index into the first element of Vs array
int kN=i*trainingSize;
// for a given dihedral define in the input file
int k=0;
for (int dih=0;dih<nDih;dih++){
// get the fitting group it belongs to
fg=DihFgindx[dih]; //get the fittting group of that dihedal
//std::cout << "fg = " << fg << " for dih = " << dih << std::endl;
// get the pointers into the Vs for that fg
begv=pt+ptrsV[fg];
endv=begv+nVperFg[fg];
for (int v=begv;v<endv;v++){
Vs_dih[kN+k]=Vs[v];
//printf("begv = %d, endv = %d, v = %d, kN+k = %d, Vs[v] = %f\n", begv,endv,v,kN+k,Vs[v]);
k++;
}
}
}
/****************************************************************************************************/
// Here I am writing out the initial dE and the final dE, see load.cpp for description
/* file that stores initial dE */
std::ofstream fitfile;
fitfile.open (fitFile.c_str(), ios::out);
int i0, t;
int b=0;
int d=0; // index into dataset
int c=0; // conformation index
float DS_score[nDataset]; //hold the dataset scores
float x[nConf]; // for the error of each conformation
float *S=scores+0;
// set score to 0
*S=0.0f;
// accumulate little s for each set
float s;
int tg,beg,end;
int tindx;
int pt = ptrs[0]; //only want the best, is it sorted as yet??? set the pointer index into the first element of Vs array
fitfile << "DATASET "<< d << ":" << "\n";
while(c<nConf){
//s is the sum of REE
s=0.0f;
/* loop only over in conformations within a dataset */
while(c<breaks[b+1]){
float parm=0;
/* start with delta E (tgts) for a given conformation (c) within a break; see load.cpp
conf (c) goes through until it reach a break. the loop will set delta E */
// get first index in genome
i0=pt;
//printf("i0: %d ", i0);
// get dE for that conformation
x[c]=tgts[c];
// Get the number of dihedral in the dataset
// loop throught the dihedrals of a given conformation
//printf("ptrsD ??: ptrsD[d] = %d, ptrsD[d+1] = %d, d = %d\n", ptrsD[d],ptrsD[d+1],d);
tindx=0; //index into the ptrsT array 0 to number of dihedral columns in a given dataset
float holdc = x[c];
fitfile << "dE for Conf "<< c << ": " << holdc;
for (int dih=ptrsD[d];dih<ptrsD[d+1];dih++,tindx++){
//Get the fitting group for that dihedral
fg=allFginDs[dih];
//printf("Fitting group = %d for dih index %d\n", allFginDs[dih], dih);
//get the index into Vs and tset
beg=i0+ptrsV[fg];
end=beg+nVperFg[fg];
tg=ptrsT[(c*trainingSize)+tindx]; //index into prtsT
t=(c*trainingSize)+tg;
//printf("beg = %d, end = %d, tg = %d, tindx = %d t = %d \n", beg,end,tg,tindx,t);
//loop through the number of cosines
for (int i=beg;i<end;i++,t++) {
/* subtract contributions from each parameter for conformation c for each conformation
e.g deltaE - cos (dihedral * periodicity) * parameter generated from chromosomes
Therefore, it is delta E - sum of cosines for each dihedral */
x[c]-=Vs[i] * tset[t]; // Vs* tset is cos(n * dih)
parm += Vs[i] * (1+tset[t]);
//#if DEBUG>2
//printf("scoreIt: i = %d, c = %d, dih = %d, beg = %d, end = %d, t = %d, x[c] = %f, Vs[i] = %f, tset[t] = %f \n",i,c,dih,beg,end,t,x[c],Vs[i],tset[t]);
//#endif
}
}
fitfile << "; Parameters Energy for Conf "<< c << ": " << parm << "\n";
fitfile << "; MM0 Energy for Conf "<< c << ": " << EMM0[c] << "\n";
fitfile << "; MM Energy for Conf "<< c << ": " << (EMM0[c] + parm) << "\n";
/* add differences in this error from all other errors */
//printf("outside loopscore for x[c] = %f\n", x[c]);
for(int c2=breaks[b];c2<c;c2++){
#if DEBUG>2
printf("In loop score for x[c] = %f\n", x[c]);
printf("%d - %d\n",c,c2); //print the pairs index
#endif
// calculate the absolute error for each pairs
float err=x[c]-x[c2];
// sum the absolute of the errors (err) - -err = + err ; +err = +err
//s+=(err<0.0f?-err:err); //ternary operator, condition is err < 0.0; if true err is negative, if false error is positive
s+=abs(err);
fitfile << "REE for Conf " << c << " and " << c2 << ": " << abs(err) << "\n";
}
//printf("score for c %d = %f\n", c,s);
/* next conformation */
++c;
}
/* add little error to big error S, weighted by number of pairs, wt is 2 / nconf*(nconf-1) */
*S+=s*wts[b];
DS_score[d] = s*wts[b];
/* go to next breakpoint (data set) */
++b;
++d;
}
fitfile << "Scores per Datasets:" << "\n";
for(int d=0;d<nDataset;d++){
fitfile << std::setw(6) << d << std::setw(18) << DS_score[d] << "\n\n";
}
fitfile.close();
/****************************************************************************************************/
/* saving all of the scores, with dihedral parameters to the logfile */
logfile << "\n";
logfile << "Printing all of the final dihedral parameters, check your -f file for the best one \n\n";
logfile << "The first one is the best score, best parameters\n\n";
/* loop through the population */
for(int i=0;i<pSize;i++){
// these are the final scores for each individual in the population, print in the output file
// divide score by the number of datasets to print the average of the datasets since score is sum of each dataset score
logfile << std::fixed << "chromosome: " << ptrs[i]/genomeSize << std::endl;
logfile << std::fixed << "Average Score: " << scores[i]/nDataset << std::endl;
for(std::map<std::string,DihCorrection>::iterator it=correctionMap.begin(); it!=correctionMap.end(); ++it){
// second.setGenome(Vs+ptrs[i]) is the dihedral parameters for each individual in the population
//print in the output file
//logfile << it->second.setGenome(Vs+ptrs[i]);
logfile << it->second.setGenome(Vs_dih+(i*trainingSize));
}
}
/****************************************************************************************************/
/* Save a frcmod file to use in Amber */
if(!frcmodFile.empty()){
std::ofstream frcmodfile;
frcmodfile.open (frcmodFile.c_str(), ios::out);
frcmodfile << "frcmod from GenA.cu \n";
frcmodfile << "DIHE\n";
int holdFG[nFg] = {-1};
// loop through all dihedral DihCorrection map (this is the dihedrals names/atomtypes in the input file)
for(std::map<std::string,DihCorrection>::iterator it=correctionMap.begin(); it!=correctionMap.end(); ++it){
// loop through fitting groups to check if this fitting group is already printed
for(int f=0;f<nFg;f++){
// if it is not already printed
if (holdFG[f] != f) {
if (it->second.fitgrpindx == f) {
//frcmodfile << it->first << "\n"; // dihedral name
frcmodfile << it->second.setGenome(Vs_dih+0); //the best parameters
holdFG[f]=f;
}
}
}
}
frcmodfile.close();
}
/****************************************************************************************************/
/* Save the amplitudes to a restart file */
if(!saveFile.empty()){
std::ofstream savefile;
savefile.open (saveFile.c_str(), ios::out);
// Write restart in parameter space
for(int i=0;i<N;i++){
for(int j=0;j<genomeSize;j++){
//savefile << std::setw(9) << ptrs[i]+j << " ";
savefile << std::setw(9) << Vs[ptrs[i]+j] << " ";
}
savefile <<"\n";
}
// write to file in dihedral space
savefile <<"\n\n\n\n\n";
for(int i=0;i<N;i++){
int kN=i*trainingSize;
for(int j=0;j<trainingSize;j++){
savefile << std::setw(9) << Vs_dih[kN+j] << " ";
}
savefile << "\n";
}
savefile.close();
}
/****************************************************************************************************/
//END timing and report time in log file
auto t2=std::chrono::high_resolution_clock::now();
logfile <<"\n\n";
logfile << "RAGTAG took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()
<< " milli seconds to obtain your parameters" << "\n";
logfile.close(); //close log file
/*****************| Free up Memory |*******************************************************/
free(ptrs);
hiprandDestroyGenerator(gen);
//hipFree(xx_d);
hipFree(Vs_d);
hipFree(ptrs_d);
hipFree(breaks_d);
hipFree(tgts_d);
free(Vs);
free(scores);
//hipFree(rands_d);
free(rands);
return 0;
}
| e73dc955783dc57eb301f4e88070e793bc9cf133.cu | /*******************************************||********************************************
Genetic algorithm optimizer *
genA.cu *
Runs iterations of genetic algoirthm to optimize molecular mechanics dihedral parameters *
@author James Maier, Kellon Belfon, Chuan Tian *
@lab Carlos Simmerling lab, Stony Brook University *
@version 3.0 2019 Aug *
********************************************||*******************************************/
/*****************************************************************************************
* ---------------LOAD LIBRARIES------------- *
*****************************************************************************************/
#include <chrono>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <math.h>
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/generate.h>
#include <thrust/device_ptr.h>
#include <list>
#include <map>
#include "load.cpp"
#include "parse.cpp"
using namespace std;
/******** Number of threads for a given block, 256 block threads (index 0 to 255) *******/
const int BLOCK_SIZE=256;
#define HANDLE_ERROR(x) x;
/*****************************************************************************************
* Defining the six pivotal functions for the genetic algorithm *
* (1) mateIt, (2) mutateIt, (3) scoreIt, (4) calcAreas, (5) moveEm, (6) getSumAreas *
* note: getSumAreas uses two other functions sumEm and sumEmIndex *
******************************************************************************************
******************************************************************************************
* | function1: mateIt | *
* *
* @purpose creates offspring from a population, generating crossovers according to pCross*
* @param Vs a global array of all the parent and child genomes (Amplitude parameters) *
* @param ptrs array of pointers from logical indices to actual indices into Vs for *
* each individual *
* @param areas the probabilities for choosing each individual for mating *
* @param sumArea pointer to the sum of all the individual areas *
* @param rands array of random numbers for crossover *
* @param pCross probability that crossover occurs *
* @param pSize number of individuals in the population (possible amplitudes solutions) *
* @param genomeSize number of genes in a genome (number of dihedral * periodicity) *
*****************************************************************************************/
__global__ void mateIt(float *Vs, int *ptrs, const float *areas, const float *sumArea,
const float *rands, const float pCross, const int pSize, const int genomeSize)
{
/* figure out index for threads blockId.x is the index for blocks,
blockDIM.x is the elements per blocks (# of threads in a block)
threadIdx is the index for threads */
int i=blockIdx.x * blockDim.x + threadIdx.x;
/* random numbers for crossover */
int randi=i*3;
/* multiply i by 2, as we will have 2 parents and 2 offspring using a left bitwise
(<<) by 1*/
i<<=1;
/* if we're in the population (sometimes warps may go past, don't waste threads) */
if (i<pSize) {
int parent[2];
int j;
/* figure out parents */
parent[0]=parent[1]=-1;
/* find parent where cumulative (cum) area (A) is less than random target (tgt) area
selection of parents depends on cumulative probability being less than the
random probabilities (random numbers).
The random probabilities (tgtA) is random numbers multiply by sum of all the
individual probabilities*/
float cumA=0.0f, tgtA=rands[randi++]* *sumArea; //tgtA random number from 0 to the sumArea
while(cumA<=tgtA){
++parent[0];
cumA+=areas[ptrs[parent[0]]/genomeSize]; // areas (probabilities) is based on mWo option
/* rands[randi-1] is the index back to zero since it is the first set of parents */
}
#if DEBUG>2
printf("rands[%d] ; %f ; %f=%f * %f\n",randi, cumA, tgtA, rands[randi-1], *sumArea);
printf("first parent\n");
#endif
/* This substract 1st parent area from sum of area */
cumA=0.0f; tgtA=rands[randi++]* (*sumArea-areas[ptrs[parent[0]]/genomeSize]);
while (cumA<=tgtA){
++parent[1];
if (parent[1]==parent[0]) //Ensure you don't pick the same parents
++parent[1];
cumA+=areas[ptrs[parent[1]]/genomeSize];
}
#if DEBUG>2
printf("Make offspring %d from %d and %d (%f=%f*(%f-%f)) %d\n", i, parent[0],
parent[1], tgtA, rands[randi-1], *sumArea, areas[ptrs[parent[0]]/genomeSize], randi);
#endif
/* add offset of pSize to i because it is an offspring (next population) */
i+=pSize;
/* use ptrs to get indices into Vs */
int i0=ptrs[i], i1=ptrs[i+1];
parent[0]=ptrs[parent[0]];
parent[1]=ptrs[parent[1]];
/* set j to index for the next set of Vs */
j=i0+genomeSize;
/* put parent[0], parent[1], and i1 relative to i0, so we can just add i0 for index */
parent[0]-=i0;
parent[1]-=i0;
i1-=i0;
/* start with crossover pt at the end (no crossover) */
int crossPt=j;
/* check if we need to do crossover,
only do crossover if random number is less than pCross */
if(rands[randi]<pCross){
crossPt=i0+1+(int)(rands[randi]/pCross*(float)(genomeSize-1));
}
while(i0<crossPt){
/* load next bit from parent and increment i */
Vs[i0]=Vs[parent[0]+i0];
Vs[i1+i0]=Vs[parent[1]+i0];
++i0;
}
while(i0<j){
Vs[i0]=Vs[parent[1]+i0];
Vs[i1+i0]=Vs[parent[0]+i0];
++i0;
} //end of while loop
} // end of if i<pSize loop
}
/*****************************************************************************************
| function 2: mutateIt |
* @brief introduces mutations to the genomes in Vs, according to probability pMut,
with a max perturbation of max
*
* @param Vs a global array of all the parent and child genomes
* @param ptrs array of pointers from logical indices to actual indices into Vs for
each individual
@param rands array of random numbers
* @param pSize number of individuals in the population
* @param pMut probability that a mutation occurs, evaluated for each gene
* @param max maximum perturbation to an allele
* @param genomeSize number of genes in a genome
*******************************************************************************************/
__global__ void mutateIt(float *Vs, int *ptrs, const float *rands, const int pSize, const float pMut, const float max, const int genomeSize)
{
/* figure out index */
int i=blockIdx.x * blockDim.x + threadIdx.x;
if(i<pSize){
// get index into random number array
int r=i*genomeSize;
// bounds for the begnining of the chromosome of Vs
i=ptrs[i];
// bounds for the end of the chromsome of Vs
int j=i+genomeSize;
// want random numbers from [-max, max). will subtract max later
float scale=2.0f*max/pMut;
while(i<j){
// if random number is less than the probability of mutation then
if(rands[r]<pMut){
// mutate the amplitude(Vs) by adding perturbation based on max, random number and pMut
Vs[i]+=rands[r]*scale-max;
}
++i;
++r;
} // end of while loop
}
}
/************************************************************************************************
| function 3: scoreIt |
* @brief calculates a score indicating the closeness of fit for each individual/chromosome
(set of parameters) against the training set
* @param scores score for each conformation, calculated here, output array
* @param areas weighting for each conformation, no longer need
* @param Vs a global array of all the parent and child genomes (amplitudes)
* @param ptrs array of pointers from logical indices to actual indices into Vs for each individual
* @param tset training set
* @param tgts targets for training
* @param wts weights of each point in the training set
* @param breaks breaks in training set, where different data should not be compared across breaks
* @param nConf number of conformations in training set
* @param pSize number of individuals in the population
* @param genomeSize number of genes in a genome
* @param xx space to store energy differences for each conformation with test parameters
************************************************************************************************/
__global__ void scoreIt(float *scores, float *areas, const float *Vs, const int *ptrs, const int *ptrsV, const int *ptrsT, const int *ptrsD, const int *allFginDs, const int *nVperFg, const float *tset, const float *tgts, const float *wts, const int *breaks, const int nConf, const int pSize, const int trainingSize, const int genomeSize, const int nFg, const int *nCosperFg, float *xx )
{
// i represent a chromosome , a set of amplitude parameters, this function will be done for each i (chromosome) at the same time
int i=blockIdx.x * blockDim.x + threadIdx.x;
if(i<pSize){
float *x=xx+i*nConf; // for the error of each conformation
// get reference to score, S is the AAE
float *S=scores+i;
// set score to 0
*S=0.0f;
// accumulate little s (AAE) for each set
float s;
int t;
int i0;
/* start at break 0 */
int b=0;
/* loop over conformations c */
int c=0;
int d=0; // index into dataset
int fg,tg,beg,end;
int tindx;
int pt = ptrs[i]; // set the pointer index into the first element of Vs array
while(c<nConf){
//s is the sum of REE
s=0.0f;
/* loop only over in conformations within a dataset */
while(c<breaks[b+1]){
/* start with delta E (tgts) for a given conformation (c) within a break; see load.cpp
conf (c) goes through until it reach a break. the loop will set delta E */
// get first index in genome
i0=pt;
#if DEBUG>2
printf("i0: %d ", i0);
#endif
// get dE for that conformation
x[c]=tgts[c];
// Get the number of dihedral in the dataset
// loop throught the dihedrals of a given conformation
#if DEBUG>2
printf("ptrsD ??: ptrsD[d] = %d, ptrsD[d+1] = %d, d = %d\n", ptrsD[d],ptrsD[d+1],d);
#endif
tindx=0; //index into the ptrsT array 0 to number of dihedral columns in a given dataset
for (int dih=ptrsD[d];dih<ptrsD[d+1];dih++,tindx++){
//Get the fitting group for that dihedral
fg=allFginDs[dih];
#if DEBUG>2
printf("Fitting group = %d for dih index %d\n", allFginDs[dih], dih);
#endif
//get the index into Vs and tset
beg=i0+ptrsV[fg];
end=beg+nVperFg[fg];
tg=ptrsT[(c*trainingSize)+tindx]; //index into prtsT
t=(c*trainingSize)+tg;
#if DEBUG>2
printf("beg = %d, end = %d, tg = %d, tindx = %d t = %d \n", beg,end,tg,tindx,t);
#endif
//loop through the number of cosines
for (int i=beg;i<end;i++,t++) {
/* subtract contributions from each parameter for conformation c for each conformation
e.g deltaE - cos (dihedral * periodicity) * parameter generated from chromosomes
Therefore, it is delta E - sum of cosines for each dihedral */
x[c]-=Vs[i] * tset[t]; // Vs* tset is cos(n * dih)
#if DEBUG>2
printf("scoreIt: i = %d, c = %d, dih = %d, beg = %d, end = %d, t = %d, x[c] = %f, Vs[i] = %f, tset[t] = %f \n",i,c,dih,beg,end,t,x[c],Vs[i],tset[t]);
#endif
}
}
/* add differences in this error from all other errors */
#if DEBUG>2
printf("outside loopscore for x[c] = %f\n", x[c]);
#endif
for(int c2=breaks[b];c2<c;c2++){
#if DEBUG>2
printf("In loop score for x[c] = %f\n", x[c]);
printf("%d - %d\n",c,c2); //print the pairs index
#endif
// calculate the absolute error for each pairs
float err=x[c]-x[c2];
// sum the absolute of the errors (err) - -err = + err ; +err = +err
//s+=(err<0.0f?-err:err); //ternary operator, condition is err < 0.0; if true err is negative, if false error is positive
s+=abs(err);
}
/* next conformation */
++c;
}
/* add little error to big error S, weighted by number of pairs, wt is 2 / nconf*(nconf-1) */
*S+=s*wts[b];
/* go to next breakpoint (data set) */
++b;
++d;
}
} //end if in Psize
}
/**************************************************************************************************
* | function 4: calcAreas | *
* *
* calculates the areas (the probability) each individual has of mating *
*___________________________________Parameters____________________________________________________*
* @param scores scores for each individual (set of parameters) *
* @param areas fitness for each individual, in terms of probability of mating *
* @param ptrs array of pointers from logical indices to actual indices into Vs for each individual*
* @param pSize number of individuals in the population *
* @param genomeSize number of genes in a genome *
**************************************************************************************************/
__global__ void calcAreas(float *scores, float *areas, const int *ptrs, const int pSize, const int genomeSize, const int weight_flag, float temperature) {
int i=blockIdx.x * blockDim.x + threadIdx.x;
float b_k = 0.001987204; // kcal/mol/K boltzmann constant
float kt = b_k * temperature; // K
if(i<pSize){
// if the weight flag is 1 then use a heavy weight
if (weight_flag==1){
areas[ptrs[i]/genomeSize]=__expf(-scores[i]/scores[0]);
}
// use 1/1+si
else if (weight_flag==2){
areas[ptrs[i]/genomeSize]= 1/(1 + scores[i]);
}
// use same as 1 but with kt instead so you can adjust the probabilities
else if (weight_flag==3){
areas[ptrs[i]/genomeSize]=__expf(-scores[i]/kt);
}
}
}
/*****************************************************************************************
* | function 5: moveEm |
*
* @brief simple helper function for copying data from oldF, oldI to neWF, newI
*
* @param newF pointer to new float array
* @param newI pointer to new int array
* @param oldF pointer to old float array
* @param oldI pointer to old int array
* @param N number of floats/ints to copy
*****************************************************************************************/
__global__ void moveEm(float * newF, int *newI, float *oldF, int *oldI, int N) {
int i=blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
newF[i]=oldF[i];
newI[i]=oldI[i];
}
}
/******************************| function 5 ends |***************************************/
/*****************************************************************************************
| sumEm and sumEmIndex : helper function for getSumAreas |
* @brief performs a sum of each successive pair of N numbers in source and stores the sums
in sums. intended to be run multiple times to sum over a whole array. if N is odd,
the last sum index will be N/2-1 and contain the sum of the last 3 numbers
*
* @param sums where to store the sums
* @param source where to get the numbers to sum together
* @param N the dimension of source
*
* @return ********************************************************/
__global__ void sumEm(float *sums, float *source, int N){
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=(i<<1);
if(j+3<N)sums[i]=source[j]+source[j+1];
else if(j+3==N) sums[i]=source[j]+source[j+1]+source[j+2];
else if(j+2==N) sums[i]=source[j]+source[j+1];
}
/*
* @brief performs a sum of pairs of N numbers in source, using locations indicated
by pointers. pointers has indices multiplied by genomeSize. intended to be
run multiple times to sum over a whole array. if N is odd, the last sum index
will be N/2-1 and contain the sum of the last 3 numbers
*
* @param sums where to store the sums
* @param source an array where to get the numbers to sum together
* @param N the dimension of source
* @param ptrs the indices to use when gathering pairs for summation
* @param genomeSize the number by which the indices in ptrs are scaled
*
* @return
*/
__global__ void sumEmIndex(float *sums, float *source, int N, const int *ptrs, const int genomeSize){
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=(i<<1); // j = i*2 (mutiplication using a left bitwise shift)
if(j+3<N)sums[i]=source[ptrs[j]/genomeSize]+source[ptrs[j+1]/genomeSize];
else if(j+3==N) sums[i]=source[ptrs[j]/genomeSize]+source[ptrs[j+1]/genomeSize]+source[ptrs[j+2]/genomeSize];
else if(j+2==N) sums[i]=source[ptrs[j]/genomeSize]+source[ptrs[j+1]/genomeSize];
#if DEBUG>1
if(j+2<=N)printf(" %d:%f",i,sums[i]);
#endif
}
/*******************************| end of helper function |*******************************/
/*****************************************************************************************
* | function 6: getSumAreas | *
* ---------uses sumEmIndex and sumEM-------- *
* *
* @brief get sum of all areas *
* @param areas_d pointer to areas on device *
* @param ptrs_d pointer to indices for each individual in population *
* @param pSize population size *
* @param temp_d pointer to temporary array on device *
* @param genomeSize number of alleles in genome *
*****************************************************************************************/
float *getSumAreas(float *areas_d, int *ptrs_d, int pSize, float *temp_d, const int & genomeSize){
int dim=pSize; //Set dim to pSize
int offset=0;
// return an array of sums (temp_d), sum up the probabilities in areas_d array
sumEmIndex <<<((dim>>1)+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (temp_d, areas_d, dim, ptrs_d, genomeSize);
pSize >>= 1;
while((dim>>=1)>1){ // while pSize/2 is greater than 1: Keep dividing (1/2 psize) by 2
offset^=pSize; //bitwise XOR offest is 1/2 pSize then 0, then 1/2 pSize, then 0...
// doing this switch the source to be (temp+pSize/2) then the source changes to (temp_d+0), then back and forth
sumEm <<<((dim>>1)+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (temp_d+offset, temp_d+(offset^pSize), dim);
}
return temp_d+offset;
}
/*
/////////////////////////////////////////////////////// `
////////////////////////////////// `
///////////////////// | |
///////////// ~ ~ ~ ~ ~ ~ ~
//////// | |
///// ____| |____
/// | |
// ___| J.M |___
/ | K.B |
/ PROGRAM BEGINS HERE | C.T |
*****************************************************************************************/
/*****************************************************************************************
argc is a vairable with the number of arguments passed to GenA
argv is a vector of strings representing the the arguments the GenA takes
input file: parametersfitting data using the following format:
_____________________________________________________________________
|-<dihedral> <AMBER atom type for dihedral 1> -Fg_0 periodicities |
|-<dihedral> <AMBER atom type for dihedral 2> -Fg_1 periodicities |
|<name of data set> <weights <dihedral 1> <dihedral 2> ndih> |
| <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> |
| <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> |
| ... |
|/ |
|<name of data set> <weights <dihedral 1> <dihedral 2> ndih> |
| <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> |
| <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> |
| ... |
|/ |
|_____________________________________________________________________|
<dihedral> is the name of dihedral e.g phi, psi, chi1, chi2, chi3, etc
ndih> is the number of dihedral in the dataset
<AMBER atom type for dihedral 1> e.g chi1 is N -CX-2C-2C for Met, get from frcmod file
<name of data set> is any name, e.g Metalpha, Metbeta, Metcharge
<dihedral 1 value> this is the dihedral value (deg) of the optimized QM structures
e.g 105.62
<E_QM> the QM energy of conformation i with restraint dihedral
<E_MM> the MM energy of conformation i with with zeroed dihedral parameters in the
frcmod
... repeat for all conformations within a break
/ (refer to as break (brk))
a break seperate conformations that are different database
e.g alpha backbone, beta backbone, charge amino acids
GOODLUCK!!!
[ O O ]
[ b ' ]
[ ----- ]
contact: kellonbelfon@gmail.com with RAGTAG title for help
*****************************************************************************************/
int main(int argc, char *argv[]){
/* start the timer */
auto t1=std::chrono::high_resolution_clock::now();
/*specify the string name of the savefile, scorefile, loadfile etc */
std::string saveFile, loadFile, scoreFile, logFile, frcmodFile, inputFile, fitFile;
/* genetic algorithm parameters initiated */
int pSize, nGen, rseed, peng, ncp, nCos, nChrom, nDih, nFg, mWo;
float pMut, max, pCross, keep, tempe;
/* getting the filenames from the commands -r, -c, -s, -o, -f -y -a */
for (int i=1;i<argc;i++){
if(i+1<argc){
if(argv[i][0]=='-'&&argv[i][1]=='r')saveFile=argv[++i]; //file that save amplitudes parameter (Vs)
else if(argv[i][0]=='-'&&argv[i][1]=='c')loadFile=argv[++i]; //file with Vs for restart or from other forcefields
else if(argv[i][0]=='-'&&argv[i][1]=='s')scoreFile=argv[++i]; // file that save the scores
else if(argv[i][0]=='-'&&argv[i][1]=='f')frcmodFile=argv[++i]; //file that save frcmod file
else if(argv[i][0]=='-'&&argv[i][1]=='o')logFile=argv[++i]; //file that save outputs
else if(argv[i][0]=='-'&&argv[i][1]=='i')inputFile=argv[++i]; // input file with dihedral info
else if(argv[i][0]=='-'&&argv[i][1]=='y')fitFile=argv[++i]; // file with and idea of how your target energy change
}
}
/* open the output file which is the log file */
std::ofstream logfile;
logfile.open (logFile.c_str(), ios::out);
/* open the score file to store scores */
std::ofstream scorefile;
scorefile.open (scoreFile.c_str(), ios::out);
scorefile << "#Generation" << std::setw(14) << "Chromosomes" << std::setw(12) << "Scores" << std::setw(14) << "areas\n";
/* Now load genA parameters, from the parmfile -p */
for (int i=1;i<argc;i++){
if(i+1<argc){
if(argv[i][0]=='-'&&argv[i][1]=='p'){
ConfigFile cfg(argv[++i]); //file that has the genetic algorithm parameters
// check if keys exixt and set a message to the user that we are using the default
if (!(cfg.keyExists("pSize"))) std::cout << "pSize was not specified, using default of 2000\n";
if (!(cfg.keyExists("nGen"))) std::cout << "nGen was not specified, using default of 1000\n";
if (!(cfg.keyExists("pMut"))) std::cout << "pMut was not specified, using default of 0.01\n";
if (!(cfg.keyExists("max"))) std::cout << "max was not specified, using default of 0.5\n";
if (!(cfg.keyExists("pCross"))) std::cout << "pCross was not specified, using default of 0.8\n";
if (!(cfg.keyExists("peng"))) std::cout << "peng was not specified, using default of 10\n";
if (!(cfg.keyExists("ncp"))) std::cout << "ncp was not specified, using default of 2\n";
if (!(cfg.keyExists("keep"))) std::cout << "keep was not specified, using default of 0.2\n";
if (!(cfg.keyExists("nDih"))) std::cout << "nDih was not specified, using default of 1\n";
if (!(cfg.keyExists("nFg"))) std::cout << "nFg was not specified, using default of 1\n";
if (!(cfg.keyExists("mWo"))) std::cout << "mWo was not specified, using default of 1\n";
// Retreive the value of keys
pSize = cfg.getValueOfKey<int>("pSize", 2000);
logfile << "Population Size (pSize): " << pSize << "\n\n";
nGen = cfg.getValueOfKey<int>("nGen", 1000);
logfile << "Number of Generations (nGen): " << nGen << "\n\n";
pMut = cfg.getValueOfKey<float>("pMut", 0.01);
logfile << "Probability of Mutations (pMut): " << pMut << "\n\n";
max = cfg.getValueOfKey<float>("max", 0.5);
logfile << "Maximal permissible mutation (max): " << max << "\n\n";
pCross = cfg.getValueOfKey<float>("pCross", 0.8);
logfile << "Probability of crossover (pCross): " << pCross << "\n\n";
rseed = cfg.getValueOfKey<int>("rseed", 314245);
logfile << "Random seed (rseed): " << rseed << "\n\n";
peng = cfg.getValueOfKey<int>("peng", 10);
logfile << "Print scores every " << peng << "generations (peng)\n\n";
ncp = cfg.getValueOfKey<int>("ncp", 2);
logfile << "Print scores of only " << ncp << " chromosomes every peng \n\n";
nCos = cfg.getValueOfKey<int>("nCos", 4);
logfile << "Periodicity (nCos): " << nCos << "\n\n";
keep = cfg.getValueOfKey<float>("keep", 0.2);
logfile << "We will use " << keep << " for the elitist regime\n\n";
nDih = cfg.getValueOfKey<int>("nDih", 1);
logfile << "Number of dihedral(s) (nDih): " << nDih << "\n\n";
nFg = cfg.getValueOfKey<int>("nFg", 1);
logfile << "Number of Fitting groups (nFg): " << nFg << "\n\n";
mWo = cfg.getValueOfKey<int>("mWo", 1);
logfile << "Mating weight option flag " << mWo << "\n\n";
// it the mating weight option is 3 then read temperature
if (mWo==3) {
tempe = cfg.getValueOfKey<int>("tempe", 298.0);
logfile << "Temperature (K) " << tempe << "\n\n";
}
if(!loadFile.empty()) {
nChrom = cfg.getValueOfKey<int>("nChrom", 100);
logfile << "Number of chromosome reported is : " << nChrom << "\n\n";
}
}
}
}
/* initializing GPU (_d) and CPU arrays */
cudaError_t error;
size_t nRands;
curandGenerator_t gen;
float *Vs, *Vs_d, *rands, *rands_d, *tset, *tset_d, *tgts, *tgts_d, *wts, *wts_d, *xx_d;
float *scores, *scores_d, *areas, *areas_d, *EMM0;
int genomeSize, trainingSize, g, totdih, *ptrs_d, *ptrs, N, nConf=0, nDataset=0, *breaks, *breaks_d, nBreaks;
int *ptrsT, *ptrsV, *ptrsD, *ptrsT_d, *ptrsV_d, *ptrsD_d, *allFginDs, *allFginDs_d;
int *nCosperFg, *nCosperFg_d, *nVperFg, *nVperFg_d, *nDihperDs, *nDihperDs_d, *DihFgindx;
int save=pSize*keep; //save is number of chromosome we will keep as elitist
/***************************| load data from load.cpp |***********************************
* check load.cpp for this section *
* map is a way to create a dictionary, correction map is an array with key *
*****************************************************************************************/
/* initiating container with key and values name correctionMap */
std::map<std::string,DihCorrection> correctionMap;
/* input file open, with dihedral info */
std::ifstream inputfile;
inputfile.open (inputFile.c_str(), std::ios::in);
/* load in arrays generated from load.cpp, check it out for further comments */
load(inputfile, &tset, &ptrsV, &ptrsT, &ptrsD, &allFginDs, &nDihperDs, &tgts, &wts, &nConf, &nDataset, &breaks, &nBreaks, &trainingSize, &genomeSize,
correctionMap, &nVperFg, &nCosperFg, nCos, nFg, nDih, &totdih, &DihFgindx, &EMM0);
logfile << "Input file loaded ('_')" << "\n\n";
/****************************************************************************************/
/*************************| memory allocation |*******************************************
* Declare and allocate host and device memory, copy data arrays from CPU host
(breaks,tset,
* tgts,wts) to device GPU (breaks_d, etc)
*****************************************************************************************/
#if DEBUG && 0
for(int i=0;i<nConf;i++){
for(int j=0;j<trainingSize;j++)
std::cerr << ' ' << tset[i*trainingSize+j];
std::cerr << std::endl;
}
std::cerr << tgts[0] << ' ' << tgts[1] << ' ' << tgts[2] << ' ' << tgts[3] << std::endl;
std::cerr << "first cudaMalloc, " << nBreaks << " breaks" << std::endl;
#endif
// memory allocation onf the GPU
cudaMalloc(&nCosperFg_d, nFg*sizeof(int));
cudaMalloc(&ptrsV_d, nFg*sizeof(int));
cudaMalloc(&ptrsT_d, nConf*trainingSize*sizeof(int));
cudaMalloc(&ptrsD_d, (nDataset+1)*sizeof(int));
cudaMalloc(&nDihperDs_d, (nDataset+1)*sizeof(int));
cudaMalloc(&allFginDs_d, totdih*sizeof(int));
cudaMalloc(&nVperFg_d, nFg*sizeof(int));
// Some cuda copies, here TODO: Copy all at the same time, to reduce time
cudaMemcpy(ptrsV_d, ptrsV, nFg*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(ptrsT_d, ptrsT, nConf*trainingSize*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(ptrsD_d, ptrsD, (nDataset+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(nDihperDs_d, nDihperDs, (nDataset+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(allFginDs_d, allFginDs, totdih*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(nVperFg_d, nVperFg, nFg*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(nCosperFg_d, nCosperFg, nFg*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&breaks_d, nBreaks*sizeof(int));
cudaMalloc((void **)&tgts_d, (nBreaks-1+nConf*(1+trainingSize))*sizeof(float));
wts_d=tgts_d+nConf;
tset_d=wts_d+nBreaks-1;
#if DEBUG
std::cerr << "COPY" << std::endl;
#endif
/* Copying over the arrays from the CPU to GPU
nbreaks is the # of dataset + 1. e.g if you are doing alpha and beta backbone set then nbreaks=3
genomesize is the # of fitting dihedral * periodicity, e.g 3 set of dihedral * 4 periodicity = 12
nconf is the # of conformations you are fitting
tgts is (E_QMi-E_MMi) + (E_MMref-E_QMref) for each conformation, which = nconf, see load.cpp
tset is the cos(dih*periodicity) for 4 periodicity for a dihedral for each conformation
so 20 conf will give tgts of 20 (nconf) * 12 (# of dih * periodicity) = 120
*/
cudaMemcpy(breaks_d, breaks, nBreaks*sizeof(breaks[0]), cudaMemcpyHostToDevice);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error:(Memcpy breaks) %s\n", cudaGetErrorString(error));}
cudaMemcpy(tset_d, tset, nConf*trainingSize*sizeof(float), cudaMemcpyHostToDevice);
printf("trainingSize is %d after cuda copy\n", trainingSize);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error:(Memcpy tset) %s\n", cudaGetErrorString(error));}
cudaMemcpy(tgts_d, tgts, nConf*sizeof(float), cudaMemcpyHostToDevice);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: (Memcpy tgts) %s\n", cudaGetErrorString(error));}
cudaMemcpy(wts_d, wts, (nBreaks-1)*sizeof(*wts), cudaMemcpyHostToDevice);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: (Memcpy wts) %s\n", cudaGetErrorString(error));}
/**********************| initiate GPU blocks and # of random variable |***************************
* we need randoms, new pop 3xcrossover, genomeSizexmut *
* genome size is the number of genes which is all the parameters, *
* e.g for 4 periodicity and three dihedral fitting, then genomesize will be 4 * 3 = 12 *
* nRands is number of randoms we need for each set of parameters *
* e.g if psize (population size) is 10, then number of random number we will need is *
* (3+(# of periodicity x # of dihedral)) * psize *
* so for 4 periodicity and 3 dihedral fitting (chi1 chi2 chi3), then nRands = 3+12 * 10 = 150 *
*________________________________________________________________________________________________*
* nBlocks is dependent on the population size, it is use to figure out how many GPU blocks *
* we need to initialize the arrays for calculations. Each block has 256 threads. *
* one thread represent one individual (chromosome with soln parameters) from the population *
* e.g population size of 2000 will require (2000+256-1)/256 = 8.81 => 8 blocks *
* *
*************************************************************************************************/
nRands=(3+genomeSize)*pSize;
int nBlocks=(pSize+BLOCK_SIZE-1)/BLOCK_SIZE;
/*******************************| initializing host and device variables|************************
* N (bitwise operation below) is the pSize (1st input) multiply by 2; *
* initiating the chromosomes which have the solns *
************************************************************************************************/
rands=(float *)malloc(nRands*sizeof(float));
N=(pSize<<1);
HANDLE_ERROR(cudaMalloc((void **)&Vs_d, (N*(genomeSize+4)+pSize*nConf+nRands)*sizeof(float)));
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: (Malloc Vs_d) %s\n", cudaGetErrorString(error));}
rands_d=Vs_d+N*genomeSize;
scores_d=rands_d+nRands;
areas_d=scores_d+(N<<1);
xx_d=areas_d+(N<<1);
scores=(float *)malloc(sizeof(*scores)*N);
float *scores_ds[2];
scores_ds[0]=scores_d;
scores_ds[1]=scores_d+N;
printf("GENOMESIZE: %d \n", genomeSize);
// allocate memory to host Vs (amplitudes or barrier height for the cosine function)
Vs=(float *)malloc(N*genomeSize*sizeof(float));
areas=(float *)malloc(N*sizeof(float));
/* allocate the memory space to hold array of pointers (prts) of size N (2*pSize)
these pointers point to the individuals (chromosome) in the population */
ptrs=(int *)malloc(sizeof(int)*N);
ptrs[0]=0;
for(g=1;g<N;g++)ptrs[g]=ptrs[g-1]+genomeSize;
HANDLE_ERROR(cudaMalloc((void **)&ptrs_d, N*2*sizeof(int)));
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: (Malloc ptrs_d) %s\n", cudaGetErrorString(error));}
int *ptrs_ds[2];
ptrs_ds[0]=ptrs_d;
ptrs_ds[1]=ptrs_d+N;
cudaMemcpy(ptrs_d, ptrs, sizeof(int)*N, cudaMemcpyHostToDevice);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: (Memcpy ptrs_d) %s\n", cudaGetErrorString(error));}
int curList=0;
/* thrust is a c++ template library for CUDA similar to STL it have two containers:
thrust::host_vector<type> and thrust::device_vector<type>
The containers make common operations such as cudaMalloc, cudaFree, cudaMemcpy, more concise
e.g thrust::host_vector<int> vec_h(2) will allocate host vector with 2 elements
thrust::device_vectore<int> vec_d = vec_h will copy host vector to device
This will allow you to directly manipulate device values from the host
so vec_d[0] = 5; can be done from host and once you output vector memory is
automatically released
it have a few algorithms, we use thrust::sort(), */
thrust::device_ptr<int> dPtrs(ptrs_d), dPtrs_save(ptrs_d+save);
thrust::device_ptr<float> dScores(scores_d), dVs(Vs_d);
thrust::device_ptr<float> dScores_save(scores_d+save),
dScores_pSize(scores_d+pSize),
dScores_N(scores_d+N);
/**************************| Create a random generator |********************************************
*curandSetPseudoRandomGeneratorSeed takes two parameters (1) the generator (gen) & (2) seed value *
* seed value # is used to initialize the generator and control the set of random numbers; *
* same seed will the give same set of random numbers of the psuedorandom generator *
* rseed is the random number specified from the 6th input) *
*__________________________________________________________________________________________________*
* curandGenerateNormal take 5 parameters: *
* (1) generator - Generator to use *
* (2) outputPtr - Pointer to device memory to store CUDA-generated results, *
or Pointer to host memory to store CPU-generated resluts *
* (3) num - Number of floats to generate *
* (4) mean - Mean of normal distribution *
* (5) stddev - Standard deviation of normal distribution *
* Results are 32-bit floating point values with mean and standard deviation. *
***************************************************************************************************/
// create the generator name gen
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
// initiate the generator with the random seed (rseed) for natural distribution of random numbers
curandSetPseudoRandomGeneratorSeed(gen, rseed);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (seed)\n", cudaGetErrorString(error));}
// Vs_d is the amplitudes which is random numbers but can be overwritten with preloaded Vs
curandGenerateNormal(gen, Vs_d, N*genomeSize, 0, 1);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (normal)\n", cudaGetErrorString(error));}
#if DEBUG
cudaMemcpy(Vs, Vs_d, sizeof(float)*genomeSize*N, cudaMemcpyDeviceToHost);
/// print the three Vs from the first two chromosomes.
std::cout << "random Vs, created on GPU" << std::endl;
for(int i=0;i<1;i++){
std::cout << Vs[ptrs[i]] << " " << Vs[ptrs[i]+1] << " " << Vs[ptrs[i]+2] << std::endl;
}
#endif
/***** if we have a load file copy Vs (amplitude parameters) from the loaded file and populate Vs ***********/
if(!loadFile.empty()) {
std::ifstream loadfile;
loadfile.open (loadFile.c_str(), std::ios::in);
// copy the random Vs to add previous chromosome of nChrom
cudaMemcpy(Vs, Vs_d, sizeof(float)*genomeSize*N, cudaMemcpyDeviceToHost);
if (loadfile.is_open()) {
for (int i=0;i<nChrom;i++) {
for (int j=0;j<genomeSize;j++) {
loadfile >> Vs[ptrs[i]+j];
}
}
}
// print the two Vs from the first two chromosomes, to ensure your Vs were loaded.
logfile << "Here is your loaded Vs(amplitudes) for first three chromosomes: \n\n" << std::endl;
for(int i=0;i<3;i++){
for(int j=0;j<genomeSize;j++){
logfile << Vs[ptrs[i]+j] << " ";
}
logfile << "\n";
}
// copy loaded Vs to the GPU and overwrite random Vs. If user only create two chromosomes or
// previous Vs then the rest of the chromosome will be random
cudaMemcpy(Vs_d, Vs, N*genomeSize*sizeof(*Vs), cudaMemcpyHostToDevice);// copy to GPU
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: (loadingVs) %s\n", cudaGetErrorString(error));}
}
#if DEBUG
// check to see if Vs was transfer to gpu successful
/// print the three Vs from the first two chromosomes.
std::cout << "loaded Vs" << std::endl;
for(int i=0;i<1;i++){
std::cout << Vs[ptrs[i]] << " " << Vs[ptrs[i]+1] << " " << Vs[ptrs[i]+2] << std::endl;
}
cudaMemcpy(Vs_d, Vs, pSize*genomeSize*sizeof(*Vs), cudaMemcpyHostToDevice);// copy to GPU
cudaMemcpy(Vs, Vs_d, sizeof(float)*genomeSize*N, cudaMemcpyDeviceToHost); // copy back to CPU
/// print the three Vs from the first two chromosomes.
std::cout << "After transfer of loaded Vs to GPU" << std::endl;
for(int i=0;i<1;i++){
std::cout << Vs[ptrs[i]] << " " << Vs[ptrs[i]+1] << " " << Vs[ptrs[i]+2] << std::endl;
}
#endif
/***************************| score of the first set of chromosomes |*******************************
* Here we score the two arrays of parents with solution parameters in the initial population *
***************************************** *******************************************************/
// lauch first kernel to score the initial set of chromsomes (Vs_d) and output scores in scores_ds
scoreIt <<<(N+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (scores_ds[curList], areas_d, Vs_d, ptrs_ds[curList],
ptrsV_d, ptrsT_d, ptrsD_d, allFginDs_d, nVperFg_d, tset_d, tgts_d, wts_d, breaks_d,
nConf, pSize, trainingSize, genomeSize, nFg, nCosperFg_d, xx_d);
// score of chromosomes outside of psize since we initiated 2 times psize
scoreIt <<<(N+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (scores_ds[curList]+pSize, areas_d, Vs_d, ptrs_ds[curList]+pSize,
ptrsV_d, ptrsT_d, ptrsD_d, allFginDs_d, nVperFg_d, tset_d, tgts_d, wts_d, breaks_d,
nConf, pSize, trainingSize, genomeSize, nFg, nCosperFg_d, xx_d);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (1stscore)\n", cudaGetErrorString(error));}
// print the initial scores based on ncp as Initial, doing this before sorting so as to score the loaded Vs parameters
cudaMemcpy(scores, scores_ds[curList], sizeof(*scores)*ncp, cudaMemcpyDeviceToHost);
for(int m=0;m<ncp;m++){
scorefile << std::setw(6) << "Initial" << std::setw(14) << m << std::setw(18) << scores[m]/nDataset << "\n";
}
/* sort the scores from each chromosome of the initial population */
thrust::sort_by_key(thrust::device_pointer_cast(scores_ds[curList]), thrust::device_pointer_cast(scores_ds[curList]+N), thrust::device_pointer_cast(ptrs_ds[curList]));
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (1stsort)\n", cudaGetErrorString(error));}
// print the initial scores based on ncp as -1, doing this after sorting so we can see how good the best ones are
cudaMemcpy(scores, scores_ds[curList], sizeof(*scores)*ncp, cudaMemcpyDeviceToHost);
for(int m=0;m<ncp;m++){
scorefile << std::setw(6) << "Init_after_sort" << std::setw(14) << m << std::setw(18) << scores[m]/nDataset << "\n";
}
#if DEBUG>2
cudaMemcpy(scores, scores_ds[curList], sizeof(*scores)*N, cudaMemcpyDeviceToHost);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n (memcpy scores)", cudaGetErrorString(error));}
cudaMemcpy(Vs, Vs_d, sizeof(*Vs)*N*genomeSize, cudaMemcpyDeviceToHost);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));}
cudaMemcpy(ptrs, ptrs_ds[curList], sizeof(*ptrs)*N, cudaMemcpyDeviceToHost);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));}
/* i is each chromosome, scores[i] is scores, Vs[ptrs[i]] is the amplitude parameters;
Vs[ptrs[i]]+n specifies the next n amplitude. e.g chromosome i have genomesize amplitude parms
e.g Vs[ptrs[i]]+1 is the amplitude term when the periodicity is 3 for the 1st dihedral being
fitted, and Vs[ptrs[i]]+4, the amplitude term when the periodicity is 4 for the 2nd dihedral */
for(int i=0;i<N;i++){
std::cerr << i << ": [" << ptrs[i] << "] = " << scores[i] << " {"<<Vs[ptrs[i]]<<" "<<Vs[ptrs[i]+1]<<" "<<Vs[ptrs[i]+2]<<" "<<Vs[ptrs[i]+3]<<"}\n";
}
#endif
/****************************| Let us begin the iterations through generations |********************
Genetic algorithm iterations through the number of generations (nGen: 2nd input)
****************************************************************************************************/
/* for loop for the generation */
for(g=0;g<nGen;g++){
// create an array of random numbers (rands_d) used for mutations and crossover where the number of random #s is nRands
curandGenerateUniform(gen, rands_d, nRands);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: (GenerateUniform)%s\n", cudaGetErrorString(error));}
// Step2: calculate the probabilities (areas) each individual (chromosome) has of mating
calcAreas <<<nBlocks, BLOCK_SIZE>>> (scores_ds[curList], areas_d, ptrs_d, pSize, genomeSize, mWo, tempe);
// Step3: mate the individuals (chromosomes,Parent[0],[1]) selected for the next generation
mateIt <<<nBlocks, BLOCK_SIZE>>> (Vs_d, ptrs_ds[curList], areas_d,
getSumAreas(areas_d, ptrs_ds[curList], pSize, areas_d+N, genomeSize),
rands_d, pCross, pSize, genomeSize);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (mate)\n", cudaGetErrorString(error));}
// Step4: mutate individuals generated after mating
mutateIt <<<nBlocks, BLOCK_SIZE>>> (Vs_d, ptrs_ds[curList]+pSize, rands_d+pSize*3, pSize, pMut, max, genomeSize);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (mutate)\n", cudaGetErrorString(error));}
// Step5: Score the individuals to select for the next generation
scoreIt <<<nBlocks, BLOCK_SIZE>>> (scores_ds[curList]+pSize, areas_d, Vs_d, ptrs_ds[curList]+pSize,
ptrsV_d, ptrsT_d, ptrsD_d, allFginDs_d, nVperFg_d, tset_d, tgts_d, wts_d, breaks_d,
nConf, pSize, trainingSize, genomeSize, nFg, nCosperFg_d, xx_d);
if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (score)\n", cudaGetErrorString(error));}
// Step6: Sort the scored chromosomes (individuals) & select for mating for next generation
// curList^1 change curList to 1
// move the scores and pointers to the chromosome for the elitist parents)
moveEm <<<(save+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (scores_ds[curList^1], ptrs_ds[curList^1], scores_ds[curList], ptrs_ds[curList], save);
// curList^1 change curList to 0
// move the scores and pointers to the chromosome for the offsprings
moveEm <<<(pSize+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (scores_ds[curList^1]+save, ptrs_ds[curList^1]+save, scores_ds[curList]+pSize, ptrs_ds[curList]+pSize, pSize);//nOffspring);
// curList^1 change curList to 1
// move the scores and pointers to the chromosome of the left over parent
moveEm <<<(pSize-save+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (scores_ds[curList^1]+save+pSize, ptrs_ds[curList^1]+save+pSize, scores_ds[curList]+save, ptrs_ds[curList]+save, pSize-save);
// curList back to 0
curList^=1;
/* first sort only the offspring */
#if DEBUG>1
std::cerr << "Selection sort (" << N << " items, less " << save << ")" << std::endl;
#endif
thrust::sort_by_key(thrust::device_pointer_cast(scores_ds[curList]+save), thrust::device_pointer_cast(scores_ds[curList]+pSize+save), thrust::device_pointer_cast(ptrs_ds[curList]+save));
/* second sort is to sort the elitist parent and the offsprings (psize-save) that fall into pSize */
#if DEBUG>1
std::cerr << "Rank sort" << std::endl;
#endif
thrust::sort_by_key(thrust::device_pointer_cast(scores_ds[curList]), thrust::device_pointer_cast(scores_ds[curList]+pSize), thrust::device_pointer_cast(ptrs_ds[curList]));
/****************************************************************************************************
* Here you can print the score of chromosomes (total is 2 x population size) to score file (-s) *
****************************************************************************************************/
//peng --> print every n generation
//ncp --> number of chromosomes to print
//if generation is divisable by peng
if(g%peng==0) {
//scorefile << "#Generation" << std::setw(14) << "Chromosomes" << std::setw(12) << "Scores\n";
cudaMemcpy(scores, scores_ds[curList], sizeof(*scores)*ncp, cudaMemcpyDeviceToHost); //copy over ncp scores
cudaMemcpy(areas, areas_d, sizeof(*areas)*ncp, cudaMemcpyDeviceToHost); //copy over ncp areas
// divide score by the number of datasets to print the average of the datasets since score is sum of each dataset score
for(int m=0;m<ncp;m++){
scorefile << std::setw(6) << g << std::setw(14) << m << std::setw(18) << scores[m]/nDataset << std::setw(18) << areas[m] << "\n";
}
}
/* END GENETIC ALGORITM */
}
scorefile.close();
/****************************************************************************************************
* TERMINATION, LAST RESULTS < SCORES AND PARAMETERS FOR EACH INDIVIDUAL
****************************************************************************************************/
/***************************************************************************************************/
/* copy over the results from GPU to the CPU to save the scores and parameters */
cudaMemcpy(Vs, Vs_d, sizeof(float)*genomeSize*N, cudaMemcpyDeviceToHost);
cudaMemcpy(ptrs, ptrs_ds[curList], sizeof(int)*N, cudaMemcpyDeviceToHost);
cudaMemcpy(scores, scores_ds[curList], sizeof(float)*N, cudaMemcpyDeviceToHost);
cudaMemcpy(tgts, tgts_d, sizeof(float)*nConf, cudaMemcpyDeviceToHost);
cudaMemcpy(tset, tset_d, nConf*trainingSize*sizeof(float), cudaMemcpyDeviceToHost);
/****************************************************************************************************/
// Here we will move the parameters back into the dihedral space
// TODO: Write these out as functions in a seperate files
// kept the parameters sorted so first Vs in ptrs[i] is best in Vs = Vs_dih[0]
float *Vs_dih;
Vs_dih=(float *)malloc(N*trainingSize*sizeof(float));
int fg,begv,endv;
// For a given chromosome (set of Vs)
for(int i=0;i<N;i++){
int pt=ptrs[i]; // set the pointer index into the first element of Vs array
int kN=i*trainingSize;
// for a given dihedral define in the input file
int k=0;
for (int dih=0;dih<nDih;dih++){
// get the fitting group it belongs to
fg=DihFgindx[dih]; //get the fittting group of that dihedal
//std::cout << "fg = " << fg << " for dih = " << dih << std::endl;
// get the pointers into the Vs for that fg
begv=pt+ptrsV[fg];
endv=begv+nVperFg[fg];
for (int v=begv;v<endv;v++){
Vs_dih[kN+k]=Vs[v];
//printf("begv = %d, endv = %d, v = %d, kN+k = %d, Vs[v] = %f\n", begv,endv,v,kN+k,Vs[v]);
k++;
}
}
}
/****************************************************************************************************/
// Here I am writing out the initial dE and the final dE, see load.cpp for description
/* file that stores initial dE */
std::ofstream fitfile;
fitfile.open (fitFile.c_str(), ios::out);
int i0, t;
int b=0;
int d=0; // index into dataset
int c=0; // conformation index
float DS_score[nDataset]; //hold the dataset scores
float x[nConf]; // for the error of each conformation
float *S=scores+0;
// set score to 0
*S=0.0f;
// accumulate little s for each set
float s;
int tg,beg,end;
int tindx;
int pt = ptrs[0]; //only want the best, is it sorted as yet??? set the pointer index into the first element of Vs array
fitfile << "DATASET "<< d << ":" << "\n";
while(c<nConf){
//s is the sum of REE
s=0.0f;
/* loop only over in conformations within a dataset */
while(c<breaks[b+1]){
float parm=0;
/* start with delta E (tgts) for a given conformation (c) within a break; see load.cpp
conf (c) goes through until it reach a break. the loop will set delta E */
// get first index in genome
i0=pt;
//printf("i0: %d ", i0);
// get dE for that conformation
x[c]=tgts[c];
// Get the number of dihedral in the dataset
// loop throught the dihedrals of a given conformation
//printf("ptrsD ??: ptrsD[d] = %d, ptrsD[d+1] = %d, d = %d\n", ptrsD[d],ptrsD[d+1],d);
tindx=0; //index into the ptrsT array 0 to number of dihedral columns in a given dataset
float holdc = x[c];
fitfile << "dE for Conf "<< c << ": " << holdc;
for (int dih=ptrsD[d];dih<ptrsD[d+1];dih++,tindx++){
//Get the fitting group for that dihedral
fg=allFginDs[dih];
//printf("Fitting group = %d for dih index %d\n", allFginDs[dih], dih);
//get the index into Vs and tset
beg=i0+ptrsV[fg];
end=beg+nVperFg[fg];
tg=ptrsT[(c*trainingSize)+tindx]; //index into prtsT
t=(c*trainingSize)+tg;
//printf("beg = %d, end = %d, tg = %d, tindx = %d t = %d \n", beg,end,tg,tindx,t);
//loop through the number of cosines
for (int i=beg;i<end;i++,t++) {
/* subtract contributions from each parameter for conformation c for each conformation
e.g deltaE - cos (dihedral * periodicity) * parameter generated from chromosomes
Therefore, it is delta E - sum of cosines for each dihedral */
x[c]-=Vs[i] * tset[t]; // Vs* tset is cos(n * dih)
parm += Vs[i] * (1+tset[t]);
//#if DEBUG>2
//printf("scoreIt: i = %d, c = %d, dih = %d, beg = %d, end = %d, t = %d, x[c] = %f, Vs[i] = %f, tset[t] = %f \n",i,c,dih,beg,end,t,x[c],Vs[i],tset[t]);
//#endif
}
}
fitfile << "; Parameters Energy for Conf "<< c << ": " << parm << "\n";
fitfile << "; MM0 Energy for Conf "<< c << ": " << EMM0[c] << "\n";
fitfile << "; MM Energy for Conf "<< c << ": " << (EMM0[c] + parm) << "\n";
/* add differences in this error from all other errors */
//printf("outside loopscore for x[c] = %f\n", x[c]);
for(int c2=breaks[b];c2<c;c2++){
#if DEBUG>2
printf("In loop score for x[c] = %f\n", x[c]);
printf("%d - %d\n",c,c2); //print the pairs index
#endif
// calculate the absolute error for each pairs
float err=x[c]-x[c2];
// sum the absolute of the errors (err) - -err = + err ; +err = +err
//s+=(err<0.0f?-err:err); //ternary operator, condition is err < 0.0; if true err is negative, if false error is positive
s+=abs(err);
fitfile << "REE for Conf " << c << " and " << c2 << ": " << abs(err) << "\n";
}
//printf("score for c %d = %f\n", c,s);
/* next conformation */
++c;
}
/* add little error to big error S, weighted by number of pairs, wt is 2 / nconf*(nconf-1) */
*S+=s*wts[b];
DS_score[d] = s*wts[b];
/* go to next breakpoint (data set) */
++b;
++d;
}
fitfile << "Scores per Datasets:" << "\n";
for(int d=0;d<nDataset;d++){
fitfile << std::setw(6) << d << std::setw(18) << DS_score[d] << "\n\n";
}
fitfile.close();
/****************************************************************************************************/
/* saving all of the scores, with dihedral parameters to the logfile */
logfile << "\n";
logfile << "Printing all of the final dihedral parameters, check your -f file for the best one \n\n";
logfile << "The first one is the best score, best parameters\n\n";
/* loop through the population */
for(int i=0;i<pSize;i++){
// these are the final scores for each individual in the population, print in the output file
// divide score by the number of datasets to print the average of the datasets since score is sum of each dataset score
logfile << std::fixed << "chromosome: " << ptrs[i]/genomeSize << std::endl;
logfile << std::fixed << "Average Score: " << scores[i]/nDataset << std::endl;
for(std::map<std::string,DihCorrection>::iterator it=correctionMap.begin(); it!=correctionMap.end(); ++it){
// second.setGenome(Vs+ptrs[i]) is the dihedral parameters for each individual in the population
//print in the output file
//logfile << it->second.setGenome(Vs+ptrs[i]);
logfile << it->second.setGenome(Vs_dih+(i*trainingSize));
}
}
/****************************************************************************************************/
/* Save a frcmod file to use in Amber */
if(!frcmodFile.empty()){
std::ofstream frcmodfile;
frcmodfile.open (frcmodFile.c_str(), ios::out);
frcmodfile << "frcmod from GenA.cu \n";
frcmodfile << "DIHE\n";
int holdFG[nFg] = {-1};
// loop through all dihedral DihCorrection map (this is the dihedrals names/atomtypes in the input file)
for(std::map<std::string,DihCorrection>::iterator it=correctionMap.begin(); it!=correctionMap.end(); ++it){
// loop through fitting groups to check if this fitting group is already printed
for(int f=0;f<nFg;f++){
// if it is not already printed
if (holdFG[f] != f) {
if (it->second.fitgrpindx == f) {
//frcmodfile << it->first << "\n"; // dihedral name
frcmodfile << it->second.setGenome(Vs_dih+0); //the best parameters
holdFG[f]=f;
}
}
}
}
frcmodfile.close();
}
/****************************************************************************************************/
/* Save the amplitudes to a restart file */
if(!saveFile.empty()){
std::ofstream savefile;
savefile.open (saveFile.c_str(), ios::out);
// Write restart in parameter space
for(int i=0;i<N;i++){
for(int j=0;j<genomeSize;j++){
//savefile << std::setw(9) << ptrs[i]+j << " ";
savefile << std::setw(9) << Vs[ptrs[i]+j] << " ";
}
savefile <<"\n";
}
// write to file in dihedral space
savefile <<"\n\n\n\n\n";
for(int i=0;i<N;i++){
int kN=i*trainingSize;
for(int j=0;j<trainingSize;j++){
savefile << std::setw(9) << Vs_dih[kN+j] << " ";
}
savefile << "\n";
}
savefile.close();
}
/****************************************************************************************************/
//END timing and report time in log file
auto t2=std::chrono::high_resolution_clock::now();
logfile <<"\n\n";
logfile << "RAGTAG took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()
<< " milli seconds to obtain your parameters" << "\n";
logfile.close(); //close log file
/*****************| Free up Memory |*******************************************************/
free(ptrs);
curandDestroyGenerator(gen);
//cudaFree(xx_d);
cudaFree(Vs_d);
cudaFree(ptrs_d);
cudaFree(breaks_d);
cudaFree(tgts_d);
free(Vs);
free(scores);
//cudaFree(rands_d);
free(rands);
return 0;
}
|
afa93d049127fce1794d0d2c0b50f2950ba8ae65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <smoothedMG/aggregators/misHelpers.h>
//#include "thrust/detail/device_ptr.inl"
__global__ void findAdjacencySizesKernel(int size, int *adjIndexes, int *output)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
output[idx] = adjIndexes[idx + 1] - adjIndexes[idx];
}
}
__global__ void allocateNodesKernel(int size, int *adjIndexes, int *adjacency, int *partIn, int *partOut, int *aggregated)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
if(aggregated[idx] == 0)
{
int start = adjIndexes[idx];
int end = adjIndexes[idx + 1];
// Storage for possible aggregations.
int candidates[10];
int candidateCounts[10];
for(int i = 0; i < 10; i++)
{
candidates[i] = -1;
candidateCounts[i] = 0;
}
// Going through neighbors to aggregate:
for(int i = start; i < end; i++)
{
int candidate = partIn[ adjacency[i] ];
if(candidate != -1)
{
for(int j = 0; j < 10 && candidate != -1; j++)
{
if(candidates[j] == -1)
{
candidates[j] = candidate;
candidateCounts[j] = 1;
}
else
{
if(candidates[j] == candidate)
{
candidateCounts[j] += 1;
candidate = -1;
}
}
}
}
}
// Finding the most adjacent aggregate and adding node to it:
int addTo = candidates[0];
int count = candidateCounts[0];
for(int i = 1; i < 10; i++)
{
if(candidateCounts[i] > count)
{
count = candidateCounts[i];
addTo = candidates[i];
}
}
partOut[idx] = addTo;
if(addTo != -1)
{
aggregated[idx] = 1;
}
}
}
}
__global__ void findPartIndicesKernel(int size, int *array, int *partIndices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
int value = array[idx];
int nextValue = (idx != size - 1) ? array[idx + 1] : -1;
if (value != nextValue)
{
partIndices[value + 1] = idx + 1;
}
}
}
__global__ void findPartIndicesNegStartKernel(int size, int *array, int *partIndices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x + 1;
if(idx < size)
{
int value = array[idx];
int nextValue = array[idx + 1];
if(value != nextValue)
partIndices[value + 1] = idx;
}
}
__global__ void fillWithIndexKernel(int size, int *array)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
array[idx] = idx;
}
}
__global__ void getInversePermutationKernel(int size, int *original, int *inverse)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
inverse[original[idx]] = idx;
}
}
__global__ void permuteInitialAdjacencyKernel(int size, int *adjIndexesIn, int *adjacencyIn, int *permutedAdjIndexesIn, int *permutedAdjacencyIn, int *ipermutation, int *fineAggregate)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int oldBegin = adjIndexesIn[ipermutation[idx]];
int oldEnd = adjIndexesIn[ipermutation[idx] + 1];
int runSize = oldEnd - oldBegin;
int newBegin = permutedAdjIndexesIn[idx];
//int newEnd = permutedAdjIndexesIn[idx + 1];
//int newRunSize = newEnd - newBegin;
//printf("Thread %d is copying from %d through %d into %d through %d\n", idx, oldBegin, oldEnd, newBegin, newEnd);
// Transfer old adjacency into new, while changing node id's with partition id's
for(int i = 0; i < runSize; i++)
{
permutedAdjacencyIn[newBegin + i] = fineAggregate[ adjacencyIn[oldBegin + i] ];
}
}
}
__global__ void getInducedGraphNeighborCountsKernel(int size, int *aggregateIdx, int *adjIndexesOut, int *permutedAdjIndexes, int *permutedAdjacencyIn)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int Begin = permutedAdjIndexes[ aggregateIdx[idx] ];
int End = permutedAdjIndexes[ aggregateIdx[idx + 1] ];
// Sort each section of the adjacency:
for(int i = Begin; i < End - 1; i++)
{
for(int ii = i + 1; ii < End; ii++)
{
if(permutedAdjacencyIn[i] < permutedAdjacencyIn[ii])
{
int temp = permutedAdjacencyIn[i];
permutedAdjacencyIn[i] = permutedAdjacencyIn[ii];
permutedAdjacencyIn[ii] = temp;
}
}
}
// Scan through the sorted adjacency to get the condensed adjacency:
int neighborCount = 1;
if(permutedAdjacencyIn[Begin] == idx)
neighborCount = 0;
for(int i = Begin + 1; i < End; i++)
{
if(permutedAdjacencyIn[i] != permutedAdjacencyIn[i - 1] && permutedAdjacencyIn[i] != idx)
{
permutedAdjacencyIn[neighborCount + Begin] = permutedAdjacencyIn[i];
neighborCount++;
}
}
// Store the size
adjIndexesOut[idx] = neighborCount;
}
}
__global__ void fillCondensedAdjacencyKernel(int size, int *aggregateIdx, int *adjIndexesOut, int *adjacencyOut, int *permutedAdjIndexesIn, int *permutedAdjacencyIn)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int oldBegin = permutedAdjIndexesIn[ aggregateIdx[idx] ];
int newBegin = adjIndexesOut[idx];
int runSize = adjIndexesOut[idx + 1] - newBegin;
// Copy adjacency over
for(int i = 0; i < runSize; i++)
{
adjacencyOut[newBegin + i] = permutedAdjacencyIn[oldBegin + i];
}
}
}
__global__ void fillPartitionLabelKernel(int size, int *coarseAggregate, int *fineAggregateSort, int *partitionLabel)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
partitionLabel[idx] = coarseAggregate[ fineAggregateSort[idx] ];
}
}
__global__ void getAggregateStartIndicesKernel(int size, int *fineAggregateSort, int *aggregateRemapIndex)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
if(idx == 0 || fineAggregateSort[idx] != fineAggregateSort[idx - 1])
{
aggregateRemapIndex[fineAggregateSort[idx]] = idx;
}
}
}
__global__ void remapAggregateIdxKernel(int size, int *fineAggregateSort, int *aggregateRemapId)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
fineAggregateSort[idx] = aggregateRemapId[fineAggregateSort[idx]];
}
}
__global__ void mapAdjacencyToBlockKernel(int size, int *adjIndexes, int *adjacency, int *adjacencyBlockLabel, int *blockMappedAdjacency, int *fineAggregate)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int begin = adjIndexes[idx];
int end = adjIndexes[idx + 1];
int thisBlock = fineAggregate[idx];
// Fill block labeled adjacency and block mapped adjacency vectors
for(int i = begin; i < end; i++)
{
int neighbor = fineAggregate[adjacency[i]];
if(thisBlock == neighbor)
{
adjacencyBlockLabel[i] = -1;
blockMappedAdjacency[i] = -1;
}
else
{
adjacencyBlockLabel[i] = thisBlock;
blockMappedAdjacency[i] = neighbor;
}
}
}
}
__global__ void removeRuntyPartsKernel(int size, int *partition, int *removeStencil, int *subtractions)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int currentNode = partition[idx];
if(removeStencil[currentNode] == 1)
partition[idx] = -1;
else
partition[idx] -= subtractions[currentNode];
}
}
__global__ void accumulatedPartSizesKernel(int size, int *part, int *weights, int *accumulatedSize)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx == size - 1)
accumulatedSize[part[idx]] = weights[idx];
if(idx < size - 1)
{
int thisPart = part[idx];
if(thisPart != part[idx + 1])
accumulatedSize[thisPart] = weights[idx];
}
}
__global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx == 0)
sizes[idx] = accumulatedSize[0];
else if(idx < size)
{
sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1];
}
}
__global__ void findDesirabilityKernel(int size, int optimalSize, int *adjIndexes, int *adjacency, int *partition, int *partSizes, int *nodeWeights, int *swap_to, int *swap_from, int *swap_index, float *desirability)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int currentPart = partition[idx];
int currentPartSize = partSizes[currentPart];
int nodeSize = nodeWeights[idx];
int selfAdjacency = 0;
int addTo = -1;
float bestDesirability = 0;
// The currentWeightFactor is higher the farther the count is from average
float currentWeightFactor = (float)abs(currentPartSize - optimalSize) / optimalSize;
// The self improvement is a measure of how much better this partitions size will be if the node is gone.
float selfImprovement = (abs(currentPartSize - optimalSize) - abs((currentPartSize - nodeSize) - optimalSize)) * currentWeightFactor;
if(selfImprovement > 0)
{
int start = adjIndexes[idx];
int end = adjIndexes[idx + 1];
// Arrays to store info about neighboring aggregates
int candidates[10];
int candidateCounts[10];
for(int i = 0; i < 10; i++)
{
candidates[i] = -1;
candidateCounts[i] = 0;
}
// Going through the neighbors:
for(int i = start; i < end; i++)
{
int candidate = partition[ adjacency[i] ];
if(candidate == currentPart)
selfAdjacency++;
else
for(int j = 0; j < 10; j++)
{
if(candidate != -1 && candidates[j] == -1)
{
candidates[j] = candidate;
candidateCounts[j] = 1;
candidate = -1;
}
else if(candidates[j] == candidate)
{
candidateCounts[j] += 1;
candidate = -1;
}
}
}
// Finding the best possible swap:
for(int i = 1; i < 10; i++)
{
if(candidates[i] != -1)
{
int neighborPart = candidates[i];
int neighborPartSize = partSizes[neighborPart];
float neighborWeightFactor = (float)abs(neighborPartSize - optimalSize) / optimalSize;
float neighborImprovement = ((float)(abs(neighborPartSize - optimalSize) - abs((neighborPartSize + nodeSize) - optimalSize))) * neighborWeightFactor;
// Combining with self improvement to get net
neighborImprovement += selfImprovement;
// Multiplying by adjacency factor
neighborImprovement *= (float)candidateCounts[i] / selfAdjacency;
if(neighborImprovement > bestDesirability)
{
addTo = neighborPart;
bestDesirability = neighborImprovement;
}
}
}
}
swap_from[idx] = currentPart;
swap_index[idx] = idx;
swap_to[idx] = addTo;
desirability[idx] = bestDesirability;
}
}
__global__ void makeSwapsKernel(int size, int *partition, int *partSizes, int *nodeWeights, int *swap_to, int *swap_from, int *swap_index, float *desirability)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx == size - 1)
{
if(desirability[idx] > .1)
{
int swapTo = swap_to[idx];
int swapFrom = swap_from[idx];
int swapIndex = swap_index[idx];
int nodeWeight = nodeWeights[swapIndex];
partition[swapIndex] = swapTo;
atomicAdd(&partSizes[swapTo], nodeWeight);
atomicAdd(&partSizes[swapFrom], -nodeWeight);
//printf("Swapping node: %d, %d from part: %d, %d to part: %d, %d desirability: %f\n", swapIndex, nodeWeight, swapFrom, partSizes[swapFrom], swapTo, partSizes[swapTo], desirability[idx]);
}
}
else if(idx < size - 1)
{
if(desirability[idx] > .1 && swap_from[idx] != swap_from[idx + 1])
{
int swapTo = swap_to[idx];
int swapFrom = swap_from[idx];
int swapIndex = swap_index[idx];
int nodeWeight = nodeWeights[swapIndex];
partition[swapIndex] = swapTo;
atomicAdd(&partSizes[swapTo], nodeWeight);
atomicAdd(&partSizes[swapFrom], -nodeWeight);
//printf("Swapping node: %d, %d from part: %d, %d to part: %d, %d desirability: %f\n", swapIndex, nodeWeight, swapFrom, partSizes[swapFrom], swapTo, partSizes[swapTo], desirability[idx]);
}
}
}
void misHelpers::getMIS(IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &misStencil, int depth)
{
IdxVector_d mtxValues_d(adjacency.size(), 1);
int vSize = adjIndexes.size() - 1;
// IdxVector_h tmp = misStencil;
// for(int i=0; i<10; i++)
// {
// printf("%d\n", tmp[i]);
// }
// Creating a matrix with the vectors supplied:
// devMtx graphy(vSize, vSize, adjacency.size());
// graphy.column_indices = adjacency;
// cusp::detail::offsets_to_indices(adjIndexes , graphy.row_indices);
// graphy.values = mtxValues_d;
// cusp::print(graphy);
cusp::csr_matrix<int, int, cusp::device_memory> graphy(vSize, vSize, adjacency.size());
graphy.column_indices = adjacency;
graphy.row_offsets = adjIndexes;
cusp::graph::maximal_independent_set(graphy, misStencil, depth);
// tmp = misStencil;
// for(int i=0; i<50; i++)
// {
// printf("%d\n", tmp[i]);
// }
graphy.resize(0, 0, 0);
}
void misHelpers::getAdjacency(TriMesh *meshPtr, IdxVector_d &adjIndexes, IdxVector_d &adjacency)
{
int vSize = meshPtr->vertices.size();
meshPtr->need_neighbors();
// Finding total size of adjacency list:
int adjacencySize = 0;
for(int i = 0; i < vSize; i++)
{
adjacencySize += meshPtr->neighbors[i].size();
}
// Vectors to hold the adjacency:
IdxVector_h adjIndexes_h(vSize + 1);
IdxVector_h adjacency_h(adjacencySize);
// Populating adjacency
adjIndexes_h[0] = 0;
;
int nextOffset = 0;
for(int i = 0; i < vSize; i++)
{
for(int j = 0; j < meshPtr->neighbors[i].size(); j++)
adjacency_h[nextOffset + j] = meshPtr->neighbors[i][j];
nextOffset += meshPtr->neighbors[i].size();
adjIndexes_h[i + 1] = nextOffset;
}
// Copying to device vectors
adjIndexes = adjIndexes_h;
adjacency = adjacency_h;
}
void misHelpers::getAdjacency(TetMesh *meshPtr, IdxVector_d &adjIndexes, IdxVector_d &adjacency)
{
int vSize = meshPtr->vertices.size();
meshPtr->need_neighbors();
// Finding total size of adjacency list:
int adjacencySize = 0;
for(int i = 0; i < vSize; i++)
{
adjacencySize += meshPtr->neighbors[i].size();
}
// Vectors to hold the adjacency:
IdxVector_h adjIndexes_h(vSize + 1);
IdxVector_h adjacency_h(adjacencySize);
// Populating adjacency
adjIndexes_h[0] = 0;
;
int nextOffset = 0;
for(int i = 0; i < vSize; i++)
{
for(int j = 0; j < meshPtr->neighbors[i].size(); j++)
adjacency_h[nextOffset + j] = meshPtr->neighbors[i][j];
nextOffset += meshPtr->neighbors[i].size();
adjIndexes_h[i + 1] = nextOffset;
}
// Copying to device vectors
adjIndexes = adjIndexes_h;
adjacency = adjacency_h;
}
void misHelpers::aggregateGraph(int minSize, int depth, IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &partIn, bool verbose)
{
int size = adjIndexes.size() - 1;
// Get an MIS for the graph:
// getMIS(adjIndexes, adjacency, partIn, depth);
randomizedMIS(adjIndexes, adjacency, partIn, depth);
IdxVector_d aggregated = partIn;
IdxVector_d partOut;
// Prefix sum to number aggregate roots:
thrust::inclusive_scan(partIn.begin(), partIn.end(), partIn.begin());
if( verbose ) std::cout << "Finished aggregateGraph inclusive_scan." << std::endl;
int misCount = partIn.back();
// DataRecorder::Add("Fine MIS Count", misCount);
// Transform non root nodes to -1
thrust::transform(partIn.begin(), partIn.end(), aggregated.begin(), partIn.begin(), ifLabelOne());
partOut = partIn;
if( verbose ) std::cout << "Finished aggregateGraph thrust::transform." << std::endl;
// Preparing to call aggregate kernel:
int *partIn_d; // Pointer to partIn vector
int *partOut_d; // Pointer to partOut vector
int *adjIndexes_d; // Pointer to adjacency indexes
int *adjacency_d; // Pointer to adjacency
int *aggregated_d; // Pointer to aggregated
bool complete = false; // Indicates whether all nodes are aggregated
partIn_d = thrust::raw_pointer_cast(&partIn[0]);
partOut_d = thrust::raw_pointer_cast(&partOut[0]);
adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]);
adjacency_d = thrust::raw_pointer_cast(&adjacency[0]);
aggregated_d = thrust::raw_pointer_cast(&aggregated[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
int loopCounter = 0;
if( verbose )
std::cout << "Starting aggregateGraph loop." << std::endl;
while(!complete)
{
// Allocating nodes
hipLaunchKernelGGL(( allocateNodesKernel) , dim3(nBlocks), dim3(blockSize) , 0, 0, size, adjIndexes_d, adjacency_d, partIn_d, partOut_d, aggregated_d);
// Copying partOut to partIn
partIn = partOut;
// Checking if done
int unallocatedNodes = thrust::count(aggregated.begin(), aggregated.end(), 0);
if(unallocatedNodes == 0)
{
// Trying to remove parts below minSize
complete = removeRuntyParts(minSize, partIn);
// If stuff was removed get the aggregated labeling again
if(!complete)
{
thrust::transform(partIn.begin(), partIn.end(), aggregated.begin(), findAggregated());
partOut = partIn;
}
}
if( verbose )
{
bool doPrint = false;
if( loopCounter < 10 ) {
doPrint = true;
} else if( loopCounter < 50 ) {
if( loopCounter % 5 == 0 )
doPrint = true;
} else if( loopCounter < 250 ) {
if( loopCounter % 10 == 0 )
doPrint = true;
} else {
if( loopCounter % 100 == 0 )
doPrint = true;
}
if( doPrint )
std::cout << "Finished loop " << loopCounter << " in aggregateGraph loop with " << unallocatedNodes << " unallocated nodes." << std::endl;
loopCounter++;
}
}
}
void misHelpers::aggregateWeightedGraph(int maxSize, int fullSize, int depth, IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &partIn, IdxVector_d &nodeWeights, bool verbose)
{
int size = adjIndexes.size() - 1;
// Get an MIS for the graph:
// getMIS(adjIndexes, adjacency, partIn, depth);
randomizedMIS(adjIndexes, adjacency, partIn, depth);
IdxVector_d aggregated = partIn;
IdxVector_d partOut;
// Prefix sum to number aggregate roots:
thrust::inclusive_scan(partIn.begin(), partIn.end(), partIn.begin());
int misCount = partIn.back();
// DataRecorder::Add("Coarse MIS Count", misCount);
// Transform non root nodes to -1
thrust::transform(partIn.begin(), partIn.end(), aggregated.begin(), partIn.begin(), ifLabelOne());
partOut = partIn;
// Preparing to call aggregate kernel:
int *partIn_d; // Pointer to partIn vector
int *partOut_d; // Pointer to partOut vector
int *adjIndexes_d; // Pointer to adjacency indexes
int *adjacency_d; // Pointer to adjacency
int *aggregated_d; // Pointer to aggregated
bool complete = false; // Indicates whether all nodes are aggregated
partIn_d = thrust::raw_pointer_cast(&partIn[0]);
partOut_d = thrust::raw_pointer_cast(&partOut[0]);
adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]);
adjacency_d = thrust::raw_pointer_cast(&adjacency[0]);
aggregated_d = thrust::raw_pointer_cast(&aggregated[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
bool firstTime = true;
int counter = 0;
while(!complete)
{
counter++;
// Allocating nodes
allocateNodesKernel << < nBlocks, blockSize >> > (size, adjIndexes_d, adjacency_d, partIn_d, partOut_d, aggregated_d);
// Copying partOut to partIn
partIn = partOut;
// Checking if done
int unallocatedNodes = thrust::count(aggregated.begin(), aggregated.end(), 0);
if (verbose)
printf("unallocatedNodes = %d\n", unallocatedNodes);
if(unallocatedNodes == 0)
{
// Removing small partitions:
if(!firstTime || misCount < 10)
{
// Making sure there are no oversized partitions
restrictPartitionSize(maxSize, fullSize, adjIndexes, adjacency, partIn, nodeWeights);
complete = true;
}
else
{
firstTime = false;
removeRuntyPartitions(fullSize, partIn, nodeWeights, verbose);
thrust::transform(partIn.begin(), partIn.end(), aggregated.begin(), findAggregated());
partOut = partIn;
}
}
}
hipDeviceSynchronize();
}
void misHelpers::restrictPartitionSize(int maxSize, int fullSize, IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &partition, IdxVector_d &nodeWeights, bool verbose)
{
int size = partition.size();
IntVector_d partSizes, swap_to(size), swap_from(size), swap_index(size);
FloatVector_d desirability(size);
// Finding the weighted sizes of each partition
getWeightedPartSizes(partition, nodeWeights, partSizes);
// Finding the average size:
int averageSize = fullSize / partSizes.size();
// Finding largest part size:
int largestPart = thrust::reduce(partSizes.begin(), partSizes.end(), (int)0, thrust::maximum<int>());
while(largestPart > maxSize)
{
if (verbose)
printf("largestPart = %d\n", largestPart);
// Calculating the desirability of the nodes:
int *adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]);
int *adjacency_d = thrust::raw_pointer_cast(&adjacency[0]);
int *partition_d = thrust::raw_pointer_cast(&partition[0]);
int *partSizes_d = thrust::raw_pointer_cast(&partSizes[0]);
int *swap_to_d = thrust::raw_pointer_cast(&swap_to[0]);
int *swap_from_d = thrust::raw_pointer_cast(&swap_from[0]);
int *nodeWeights_d = thrust::raw_pointer_cast(&nodeWeights[0]);
int *swap_index_d = thrust::raw_pointer_cast(&swap_index[0]);
float *desirability_d = thrust::raw_pointer_cast(&desirability[0]);
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
findDesirabilityKernel << < nBlocks, blockSize >> >(size, averageSize, adjIndexes_d, adjacency_d, partition_d, partSizes_d, nodeWeights_d, swap_to_d, swap_from_d, swap_index_d, desirability_d);
// Sort the results with (swap_from, desirability) as the key
thrust::sort_by_key(thrust::make_zip_iterator(
thrust::make_tuple(swap_from.begin(), desirability.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(swap_from.end(), desirability.end())),
thrust::make_zip_iterator(
thrust::make_tuple(swap_to.begin(), swap_index.begin())));
// Perform good swaps
makeSwapsKernel << < nBlocks, blockSize >> >(size, partition_d, partSizes_d, nodeWeights_d, swap_to_d, swap_from_d, swap_index_d, desirability_d);
// Repeat until no overlarge aggregates are found
largestPart = thrust::reduce(partSizes.begin(), partSizes.end(), (int)0, thrust::maximum<int>());
}
}
void misHelpers::getSizes(IdxVector_d &adjIndexes, IdxVector_d &sizes)
{
int size = adjIndexes.size() - 1;
sizes.resize(size, 0);
int *adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]);
int *sizes_d = thrust::raw_pointer_cast(&sizes[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel to find sizes:
findAdjacencySizesKernel << < nBlocks, blockSize >> > (size, adjIndexes_d, sizes_d);
}
bool misHelpers::removeRuntyParts(int minSize, IdxVector_d &partition)
{
// Getting the part sizes:
IdxVector_d partSizes;
getPartSizes(partition, partSizes);
// Converting part sizes to a removeStencil
thrust::device_vector<int> removeStencil(partSizes.size());
thrust::transform(partSizes.begin(), partSizes.end(), removeStencil.begin(), labelLessThan(minSize));
// Checking if anything will be removed:
int removed = thrust::count(removeStencil.begin(), removeStencil.end(), 1);
// DataRecorder::Add("Runty parts Removed", removed);
// If nothing to remove, just return.
if(removed == 0)
return true;
// Getting a vector with how much to subtract from non-removed aggregates
thrust::device_vector<int> subtractions(partSizes.size());
thrust::inclusive_scan(removeStencil.begin(), removeStencil.end(), subtractions.begin());
// Figuring out block sizes for kernel call:
int size = partition.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Getting pointers for the call:
int *partition_d = thrust::raw_pointer_cast(&partition[0]);
int *removeStencil_d = thrust::raw_pointer_cast(&removeStencil[0]);
int *subtractions_d = thrust::raw_pointer_cast(&subtractions[0]);
// Calling kernel to find sizes:
hipLaunchKernelGGL(( removeRuntyPartsKernel) , dim3(nBlocks), dim3(blockSize) , 0, 0, size, partition_d, removeStencil_d, subtractions_d);
return false;
}
bool misHelpers::removeRuntyPartitions(int fullSize, IdxVector_d &partition, IdxVector_d &nodeWeights, bool verbose)
{
// Getting the part sizes:
IntVector_d partSizes;
getWeightedPartSizes(partition, nodeWeights, partSizes);
// Figuring out the appropriate removal size
double averageSize = (double)fullSize / partSizes.size();
if (verbose)
printf("Partition average size is %f\n", averageSize);
int threshold = (int)(averageSize * .7);
// Converting part sizes to a removeStencil
thrust::device_vector<int> removeStencil(partSizes.size());
thrust::transform(partSizes.begin(), partSizes.end(), removeStencil.begin(), labelLessThan(threshold));
// Checking if anything will be removed:
int removed = thrust::count(removeStencil.begin(), removeStencil.end(), 1);
// Getting a vector with how much to subtract from non-removed aggregates
thrust::device_vector<int> subtractions(partSizes.size());
thrust::inclusive_scan(removeStencil.begin(), removeStencil.end(), subtractions.begin());
// Figuring out block sizes for kernel call:
int size = partition.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Getting pointers for the call:
int *partition_d = thrust::raw_pointer_cast(&partition[0]);
int *removeStencil_d = thrust::raw_pointer_cast(&removeStencil[0]);
int *subtractions_d = thrust::raw_pointer_cast(&subtractions[0]);
// Calling kernel to find sizes:
removeRuntyPartsKernel << < nBlocks, blockSize >> > (size, partition_d, removeStencil_d, subtractions_d);
return false;
}
void misHelpers::getPartSizes(IdxVector_d &partition, IdxVector_d &partSizes)
{
// Make a copy of the partition vector to mess with:
IdxVector_d temp = partition;
// Sort the copy and find largest element
thrust::sort(temp.begin(), temp.end());
int maxPart = temp[temp.size() - 1];
// Creating a new array size
IdxVector_d partIndices(maxPart + 2, 0);
// Figuring out block sizes for kernel call:
int size = partition.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Getting pointers
int *temp_d = thrust::raw_pointer_cast(&temp[0]);
int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]);
// Calling kernel to find indices for each part:
findPartIndicesKernel << < nBlocks, blockSize >> > (size, temp_d, partIndices_d);
// Getting the sizes:
getSizes(partIndices, partSizes);
}
void misHelpers::getPartSizes(IdxVector_d &partition, IdxVector_d &partSizes, IdxVector_d &partIndices)
{
// Make a copy of the partition vector to mess with:
IdxVector_d temp = partition;
// Sort the copy and find largest element
thrust::sort(temp.begin(), temp.end());
int maxPart = temp[temp.size() - 1];
// Creating a new array size
partIndices.resize(maxPart + 2, 0);
// Figuring out block sizes for kernel call:
int size = partition.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Getting pointers
int *temp_d = thrust::raw_pointer_cast(&temp[0]);
int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]);
// Calling kernel to find indices for each part:
findPartIndicesKernel << < nBlocks, blockSize >> > (size, temp_d, partIndices_d);
// Getting the sizes:
getSizes(partIndices, partSizes);
}
void misHelpers::getPartIndices(IdxVector_d& sortedPartition, IdxVector_d& partIndices)
{
// Sizing the array:
int maxPart = sortedPartition[sortedPartition.size() - 1];
partIndices.resize(maxPart + 2);
thrust::fill(partIndices.begin(), partIndices.end(), 0);
// Figuring out block sizes for kernel call:
int size = sortedPartition.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Getting pointers
int *sortedPartition_d = thrust::raw_pointer_cast(&sortedPartition[0]);
int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]);
// Calling kernel to find indices for each part:
findPartIndicesKernel << < nBlocks, blockSize >> > (size, sortedPartition_d, partIndices_d);
partIndices[partIndices.size() - 1] = size;
}
void misHelpers::getPartIndicesNegStart(IdxVector_d& sortedPartition, IdxVector_d& partIndices)
{
// Sizing the array:
int maxPart = sortedPartition[sortedPartition.size() - 1];
partIndices.resize(maxPart + 2, 0);
// Figuring out block sizes for kernel call:
int size = sortedPartition.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Getting pointers
int *sortedPartition_d = thrust::raw_pointer_cast(&sortedPartition[0]);
int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]);
// Calling kernel to find indices for each part:
findPartIndicesNegStartKernel << < nBlocks, blockSize >> > (size, sortedPartition_d, partIndices_d);
partIndices[partIndices.size() - 1] = size - 1;
}
void misHelpers::fillWithIndex(IdxVector_d &tofill)
{
// Figuring out block sizes for kernel call:
int size = tofill.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
int *tofill_d = thrust::raw_pointer_cast(&tofill[0]);
fillWithIndexKernel << < nBlocks, blockSize >> > (size, tofill_d);
}
void misHelpers::getInversePermutation(IdxVector_d &original, IdxVector_d &inverse)
{
int size = original.size();
inverse.resize(size, -1);
// Get pointers:
int *original_d = thrust::raw_pointer_cast(&original[0]);
int *inverse_d = thrust::raw_pointer_cast(&inverse[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
getInversePermutationKernel << < nBlocks, blockSize >> > (size, original_d, inverse_d);
}
void misHelpers::permuteInitialAdjacency(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &permutedAdjIndexesIn, IdxVector_d &permutedAdjacencyIn, IdxVector_d &ipermutation, IdxVector_d &fineAggregate)
{
int size = adjIndexesIn.size() - 1;
// Get pointers:adjacencyIn
int *adjIndexesIn_d = thrust::raw_pointer_cast(&adjIndexesIn[0]);
int *adjacencyIn_d = thrust::raw_pointer_cast(&adjacencyIn[0]);
int *permutedAdjIndexesIn_d = thrust::raw_pointer_cast(&permutedAdjIndexesIn[0]);
int *permutedAdjacencyIn_d = thrust::raw_pointer_cast(&permutedAdjacencyIn[0]);
int *ipermutation_d = thrust::raw_pointer_cast(&ipermutation[0]);
int *fineAggregate_d = thrust::raw_pointer_cast(&fineAggregate[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
permuteInitialAdjacencyKernel << < nBlocks, blockSize >> > (size, adjIndexesIn_d, adjacencyIn_d, permutedAdjIndexesIn_d, permutedAdjacencyIn_d, ipermutation_d, fineAggregate_d);
}
void misHelpers::getInducedGraphNeighborCounts(IdxVector_d &aggregateIdx,
IdxVector_d &adjIndexesOut,
IdxVector_d &permutedAdjIndexesIn,
IdxVector_d &permutedAdjacencyIn) {
int size = aggregateIdx.size() - 1;
// Get pointers:adjacencyIn
int *aggregateIdx_d = thrust::raw_pointer_cast(&aggregateIdx[0]);
int *adjIndexesOut_d = thrust::raw_pointer_cast(&adjIndexesOut[0]);
int *permutedAdjIndexesIn_d = thrust::raw_pointer_cast(&permutedAdjIndexesIn[0]);
int *permutedAdjacencyIn_d = thrust::raw_pointer_cast(&permutedAdjacencyIn[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
getInducedGraphNeighborCountsKernel << < nBlocks, blockSize >> > (size, aggregateIdx_d, adjIndexesOut_d, permutedAdjIndexesIn_d, permutedAdjacencyIn_d);
}
void misHelpers::fillCondensedAdjacency(IdxVector_d& aggregateIdx, IdxVector_d& adjIndexesOut, IdxVector_d& adjacencyOut, IdxVector_d& permutedAdjIndexesIn, IdxVector_d& permutedAdjacencyIn)
{
int size = adjIndexesOut.size() - 1;
// Get pointers:adjacencyIn
int *aggregateIdx_d = thrust::raw_pointer_cast(&aggregateIdx[0]);
int *adjIndexesOut_d = thrust::raw_pointer_cast(&adjIndexesOut[0]);
int *adjacencyOut_d = thrust::raw_pointer_cast(&adjacencyOut[0]);
int *permutedAdjIndexesIn_d = thrust::raw_pointer_cast(&permutedAdjIndexesIn[0]);
int *permutedAdjacencyIn_d = thrust::raw_pointer_cast(&permutedAdjacencyIn[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
fillCondensedAdjacencyKernel << < nBlocks, blockSize >> > (size, aggregateIdx_d, adjIndexesOut_d, adjacencyOut_d, permutedAdjIndexesIn_d, permutedAdjacencyIn_d);
}
void misHelpers::fillPartitionLabel(IdxVector_d& coarseAggregate, IdxVector_d& fineAggregateSort, IdxVector_d& partitionLabel)
{
int size = partitionLabel.size();
// Get pointers:adjacencyIn
int *coarseAggregate_d = thrust::raw_pointer_cast(&coarseAggregate[0]);
int *fineAggregateSort_d = thrust::raw_pointer_cast(&fineAggregateSort[0]);
int *partitionLabel_d = thrust::raw_pointer_cast(&partitionLabel[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
fillPartitionLabelKernel << < nBlocks, blockSize >> > (size, coarseAggregate_d, fineAggregateSort_d, partitionLabel_d);
}
void misHelpers::getAggregateStartIndices(IdxVector_d& fineAggregateSort, IdxVector_d& aggregateRemapIndex)
{
int size = fineAggregateSort.size();
// Get pointers:adjacencyIn
int *fineAggregateSort_d = thrust::raw_pointer_cast(&fineAggregateSort[0]);
int *aggregateRemapIndex_d = thrust::raw_pointer_cast(&aggregateRemapIndex[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
getAggregateStartIndicesKernel << < nBlocks, blockSize >> > (size, fineAggregateSort_d, aggregateRemapIndex_d);
}
void misHelpers::remapAggregateIdx(IdxVector_d& fineAggregateSort, IdxVector_d& aggregateRemapId)
{
int size = fineAggregateSort.size();
// Get pointers:adjacencyIn
int *fineAggregateSort_d = thrust::raw_pointer_cast(&fineAggregateSort[0]);
int *aggregateRemapId_d = thrust::raw_pointer_cast(&aggregateRemapId[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
remapAggregateIdxKernel << < nBlocks, blockSize >> > (size, fineAggregateSort_d, aggregateRemapId_d);
}
void misHelpers::mapAdjacencyToBlock(IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &adjacencyBlockLabel, IdxVector_d &blockMappedAdjacency, IdxVector_d &fineAggregate)
{
int size = adjIndexes.size() - 1;
// Get pointers:adjacencyIn
int *adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]);
int *adjacency_d = thrust::raw_pointer_cast(&adjacency[0]);
int *adjacencyBlockLabel_d = thrust::raw_pointer_cast(&adjacencyBlockLabel[0]);
int *blockMappedAdjacency_d = thrust::raw_pointer_cast(&blockMappedAdjacency[0]);
int *fineAggregate_d = thrust::raw_pointer_cast(&fineAggregate[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
mapAdjacencyToBlockKernel << < nBlocks, blockSize >> > (size, adjIndexes_d, adjacency_d, adjacencyBlockLabel_d, blockMappedAdjacency_d, fineAggregate_d);
}
void misHelpers::getInducedGraph(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut)
{
// Declaring temporary vectors:
IdxVector_d adjacencyBlockLabel, blockMappedAdjacency;
adjacencyBlockLabel.resize(adjacencyIn.size(), 0);
blockMappedAdjacency.resize(adjacencyIn.size(), 0);
// Get the blocklabeled adjacency:
misHelpers::mapAdjacencyToBlock(adjIndexesIn, adjacencyIn, adjacencyBlockLabel, blockMappedAdjacency, partitionLabel);
// Zip up the block label and block mapped vectors and sort:
thrust::sort(thrust::make_zip_iterator(
thrust::make_tuple(adjacencyBlockLabel.begin(), blockMappedAdjacency.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(adjacencyBlockLabel.end(), blockMappedAdjacency.end())));
// Remove Duplicates and resize:
int newSize = thrust::unique(
thrust::make_zip_iterator(
thrust::make_tuple(adjacencyBlockLabel.begin(), blockMappedAdjacency.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(adjacencyBlockLabel.end(), blockMappedAdjacency.end()))) -
thrust::make_zip_iterator(thrust::make_tuple(adjacencyBlockLabel.begin(), blockMappedAdjacency.begin()));
adjacencyBlockLabel.resize(newSize);
blockMappedAdjacency.resize(newSize);
misHelpers::getPartIndicesNegStart(adjacencyBlockLabel, adjIndexesOut);
adjacencyOut.resize(blockMappedAdjacency.size() - 1);
thrust::copy(blockMappedAdjacency.begin() + 1, blockMappedAdjacency.end(), adjacencyOut.begin());
}
void misHelpers::getWeightedPartSizes(IdxVector_d &partition, IdxVector_d &nodeWeights, IntVector_d &partSizes)
{
// Make copies to mess with
IntVector_d part(partition.begin(), partition.end());
IntVector_d weights(nodeWeights.begin(), nodeWeights.end());
// Sorting temp vectors together
thrust::sort_by_key(part.begin(), part.end(), weights.begin());
// Getting prefix sum of values
thrust::inclusive_scan(weights.begin(), weights.end(), weights.begin());
// Another temp vector for accumulated size at last nodes
IntVector_d accumulatedSize(part[part.size() - 1] + 1);
// Preparing to call kernel to fill accumulated size vector
int size = part.size();
int *part_d = thrust::raw_pointer_cast(&part[0]);
int *weights_d = thrust::raw_pointer_cast(&weights[0]);
int *accumulatedSize_d = thrust::raw_pointer_cast(&accumulatedSize[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
accumulatedPartSizesKernel << < nBlocks, blockSize >> > (size, part_d, weights_d, accumulatedSize_d);
// Calling kernel to get the unaccumulated part sizes:
size = accumulatedSize.size();
nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
partSizes.resize(size);
int *sizes_d = thrust::raw_pointer_cast(&partSizes[0]);
unaccumulatedPartSizesKernel << < nBlocks, blockSize >> > (size, accumulatedSize_d, sizes_d);
}
void misHelpers::checkPartConnectivity(int partCount, IdxVector_d partition, IdxVector_d adjIndexes, IdxVector_d adjacency, char *message)
{
// Debugging check on part connectivity:
std::cout << message << "\n";
vector<int> nodesToExplore, exploredNodes;
for(int i = 0; i < partCount; i++)
{
nodesToExplore.clear();
exploredNodes.clear();
// Find a node in the part
int rootId = -1;
for(int j = 0; j < partition.size(); j++)
{
if(partition[j] == i)
{
rootId = j;
break;
}
}
// Explore out from the part
int start = adjIndexes[rootId], end = adjIndexes[rootId + 1];
for(int n = start; n < end; n++)
{
int neighbor = adjacency[n];
if(partition[neighbor] == i)
nodesToExplore.push_back(neighbor);
}
exploredNodes.push_back(rootId);
// Iterating through everything:
while(nodesToExplore.size() > 0)
{
// Popping off the last node to explore and checking if it's done
int node = nodesToExplore.back();
nodesToExplore.pop_back();
// Checking if the node has been explored:
bool exploredAlready = false;
for(int q = 0; q < exploredNodes.size(); q++)
if(exploredNodes[q] == node)
exploredAlready = true;
if(!exploredAlready)
{
int start = adjIndexes[node], end = adjIndexes[node + 1];
for(int n = start; n < end; n++)
{
int neighbor = adjacency[n];
if(partition[neighbor] == i)
{
nodesToExplore.push_back(neighbor);
//printf("\tAdded %d a neighbor of %d to explore list for part %d", neighbor, node, i);
}
}
exploredNodes.push_back(node);
//printf("\tAdded %d to explored for part %d\n", node, i);
}
}
// Now checking to see if there were any unreachable nodes.
for(int j = 0; j < partition.size(); j++)
{
if(partition[j] == i)
{
bool found = false;
for(int q = 0; q < exploredNodes.size(); q++)
if(exploredNodes[q] == j)
{
found = true;
break;
}
if(!found)
{
printf("Could not reach node %d in part %d from root %d\n", j, i, rootId);
printf("\tExplored nodes:");
for(int g = 0; g < exploredNodes.size(); g++)
printf(" %3d", exploredNodes[g]);
printf("\n");
}
}
}
}
// Pausing
int dummy = 0;
std::cin >> dummy;
if(dummy == 1)
{
int partToCheck;
std::cin >> partToCheck;
for(int i = 0; i < partition.size(); i++)
{
if(partition[i] == partToCheck)
{
int start = adjIndexes[i], end = adjIndexes[i + 1];
printf("Node %d is in partition %d\n\t", i, partToCheck);
for(int j = start; j < end; j++)
{
int neighbor = adjacency[j];
printf(" %4d ", neighbor);
}
printf("\n");
}
}
}
}
void misHelpers::remapInducedGraph(IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &partition)
{
IdxVector_d tempCoarseAggregate = partition;
IdxVector_d aggregateLabel = adjacency;
IdxVector_d permutedAdjacency = adjacency;
IdxVector_d coarsePermutation = partition;
IdxVector_d coarseIPermutation;
// Get the inverse permutation for the re-mapping
misHelpers::fillWithIndex(coarsePermutation);
thrust::stable_sort_by_key(tempCoarseAggregate.begin(), tempCoarseAggregate.end(), coarsePermutation.begin());
misHelpers::getInversePermutation(coarsePermutation, coarseIPermutation);
// Map the adjacency according to the inverse permutation
misHelpers::mapAdjacencyToBlock(adjIndexes, adjacency, aggregateLabel, permutedAdjacency, coarseIPermutation);
thrust::sort_by_key(aggregateLabel.begin(), aggregateLabel.end(), permutedAdjacency.begin());
// Copy from the temp to the real adjacency
thrust::copy(permutedAdjacency.begin(), permutedAdjacency.end(), adjacency.begin());
// Find the adjIndexes for the new adjacency
misHelpers::getPartIndices(aggregateLabel, adjIndexes);
}
| afa93d049127fce1794d0d2c0b50f2950ba8ae65.cu | #include <smoothedMG/aggregators/misHelpers.h>
//#include "thrust/detail/device_ptr.inl"
__global__ void findAdjacencySizesKernel(int size, int *adjIndexes, int *output)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
output[idx] = adjIndexes[idx + 1] - adjIndexes[idx];
}
}
__global__ void allocateNodesKernel(int size, int *adjIndexes, int *adjacency, int *partIn, int *partOut, int *aggregated)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
if(aggregated[idx] == 0)
{
int start = adjIndexes[idx];
int end = adjIndexes[idx + 1];
// Storage for possible aggregations.
int candidates[10];
int candidateCounts[10];
for(int i = 0; i < 10; i++)
{
candidates[i] = -1;
candidateCounts[i] = 0;
}
// Going through neighbors to aggregate:
for(int i = start; i < end; i++)
{
int candidate = partIn[ adjacency[i] ];
if(candidate != -1)
{
for(int j = 0; j < 10 && candidate != -1; j++)
{
if(candidates[j] == -1)
{
candidates[j] = candidate;
candidateCounts[j] = 1;
}
else
{
if(candidates[j] == candidate)
{
candidateCounts[j] += 1;
candidate = -1;
}
}
}
}
}
// Finding the most adjacent aggregate and adding node to it:
int addTo = candidates[0];
int count = candidateCounts[0];
for(int i = 1; i < 10; i++)
{
if(candidateCounts[i] > count)
{
count = candidateCounts[i];
addTo = candidates[i];
}
}
partOut[idx] = addTo;
if(addTo != -1)
{
aggregated[idx] = 1;
}
}
}
}
__global__ void findPartIndicesKernel(int size, int *array, int *partIndices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
int value = array[idx];
int nextValue = (idx != size - 1) ? array[idx + 1] : -1;
if (value != nextValue)
{
partIndices[value + 1] = idx + 1;
}
}
}
__global__ void findPartIndicesNegStartKernel(int size, int *array, int *partIndices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x + 1;
if(idx < size)
{
int value = array[idx];
int nextValue = array[idx + 1];
if(value != nextValue)
partIndices[value + 1] = idx;
}
}
__global__ void fillWithIndexKernel(int size, int *array)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
array[idx] = idx;
}
}
__global__ void getInversePermutationKernel(int size, int *original, int *inverse)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
inverse[original[idx]] = idx;
}
}
__global__ void permuteInitialAdjacencyKernel(int size, int *adjIndexesIn, int *adjacencyIn, int *permutedAdjIndexesIn, int *permutedAdjacencyIn, int *ipermutation, int *fineAggregate)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int oldBegin = adjIndexesIn[ipermutation[idx]];
int oldEnd = adjIndexesIn[ipermutation[idx] + 1];
int runSize = oldEnd - oldBegin;
int newBegin = permutedAdjIndexesIn[idx];
//int newEnd = permutedAdjIndexesIn[idx + 1];
//int newRunSize = newEnd - newBegin;
//printf("Thread %d is copying from %d through %d into %d through %d\n", idx, oldBegin, oldEnd, newBegin, newEnd);
// Transfer old adjacency into new, while changing node id's with partition id's
for(int i = 0; i < runSize; i++)
{
permutedAdjacencyIn[newBegin + i] = fineAggregate[ adjacencyIn[oldBegin + i] ];
}
}
}
__global__ void getInducedGraphNeighborCountsKernel(int size, int *aggregateIdx, int *adjIndexesOut, int *permutedAdjIndexes, int *permutedAdjacencyIn)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int Begin = permutedAdjIndexes[ aggregateIdx[idx] ];
int End = permutedAdjIndexes[ aggregateIdx[idx + 1] ];
// Sort each section of the adjacency:
for(int i = Begin; i < End - 1; i++)
{
for(int ii = i + 1; ii < End; ii++)
{
if(permutedAdjacencyIn[i] < permutedAdjacencyIn[ii])
{
int temp = permutedAdjacencyIn[i];
permutedAdjacencyIn[i] = permutedAdjacencyIn[ii];
permutedAdjacencyIn[ii] = temp;
}
}
}
// Scan through the sorted adjacency to get the condensed adjacency:
int neighborCount = 1;
if(permutedAdjacencyIn[Begin] == idx)
neighborCount = 0;
for(int i = Begin + 1; i < End; i++)
{
if(permutedAdjacencyIn[i] != permutedAdjacencyIn[i - 1] && permutedAdjacencyIn[i] != idx)
{
permutedAdjacencyIn[neighborCount + Begin] = permutedAdjacencyIn[i];
neighborCount++;
}
}
// Store the size
adjIndexesOut[idx] = neighborCount;
}
}
__global__ void fillCondensedAdjacencyKernel(int size, int *aggregateIdx, int *adjIndexesOut, int *adjacencyOut, int *permutedAdjIndexesIn, int *permutedAdjacencyIn)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int oldBegin = permutedAdjIndexesIn[ aggregateIdx[idx] ];
int newBegin = adjIndexesOut[idx];
int runSize = adjIndexesOut[idx + 1] - newBegin;
// Copy adjacency over
for(int i = 0; i < runSize; i++)
{
adjacencyOut[newBegin + i] = permutedAdjacencyIn[oldBegin + i];
}
}
}
__global__ void fillPartitionLabelKernel(int size, int *coarseAggregate, int *fineAggregateSort, int *partitionLabel)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
partitionLabel[idx] = coarseAggregate[ fineAggregateSort[idx] ];
}
}
__global__ void getAggregateStartIndicesKernel(int size, int *fineAggregateSort, int *aggregateRemapIndex)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
if(idx == 0 || fineAggregateSort[idx] != fineAggregateSort[idx - 1])
{
aggregateRemapIndex[fineAggregateSort[idx]] = idx;
}
}
}
__global__ void remapAggregateIdxKernel(int size, int *fineAggregateSort, int *aggregateRemapId)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
fineAggregateSort[idx] = aggregateRemapId[fineAggregateSort[idx]];
}
}
__global__ void mapAdjacencyToBlockKernel(int size, int *adjIndexes, int *adjacency, int *adjacencyBlockLabel, int *blockMappedAdjacency, int *fineAggregate)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int begin = adjIndexes[idx];
int end = adjIndexes[idx + 1];
int thisBlock = fineAggregate[idx];
// Fill block labeled adjacency and block mapped adjacency vectors
for(int i = begin; i < end; i++)
{
int neighbor = fineAggregate[adjacency[i]];
if(thisBlock == neighbor)
{
adjacencyBlockLabel[i] = -1;
blockMappedAdjacency[i] = -1;
}
else
{
adjacencyBlockLabel[i] = thisBlock;
blockMappedAdjacency[i] = neighbor;
}
}
}
}
__global__ void removeRuntyPartsKernel(int size, int *partition, int *removeStencil, int *subtractions)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int currentNode = partition[idx];
if(removeStencil[currentNode] == 1)
partition[idx] = -1;
else
partition[idx] -= subtractions[currentNode];
}
}
__global__ void accumulatedPartSizesKernel(int size, int *part, int *weights, int *accumulatedSize)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx == size - 1)
accumulatedSize[part[idx]] = weights[idx];
if(idx < size - 1)
{
int thisPart = part[idx];
if(thisPart != part[idx + 1])
accumulatedSize[thisPart] = weights[idx];
}
}
__global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx == 0)
sizes[idx] = accumulatedSize[0];
else if(idx < size)
{
sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1];
}
}
__global__ void findDesirabilityKernel(int size, int optimalSize, int *adjIndexes, int *adjacency, int *partition, int *partSizes, int *nodeWeights, int *swap_to, int *swap_from, int *swap_index, float *desirability)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int currentPart = partition[idx];
int currentPartSize = partSizes[currentPart];
int nodeSize = nodeWeights[idx];
int selfAdjacency = 0;
int addTo = -1;
float bestDesirability = 0;
// The currentWeightFactor is higher the farther the count is from average
float currentWeightFactor = (float)abs(currentPartSize - optimalSize) / optimalSize;
// The self improvement is a measure of how much better this partitions size will be if the node is gone.
float selfImprovement = (abs(currentPartSize - optimalSize) - abs((currentPartSize - nodeSize) - optimalSize)) * currentWeightFactor;
if(selfImprovement > 0)
{
int start = adjIndexes[idx];
int end = adjIndexes[idx + 1];
// Arrays to store info about neighboring aggregates
int candidates[10];
int candidateCounts[10];
for(int i = 0; i < 10; i++)
{
candidates[i] = -1;
candidateCounts[i] = 0;
}
// Going through the neighbors:
for(int i = start; i < end; i++)
{
int candidate = partition[ adjacency[i] ];
if(candidate == currentPart)
selfAdjacency++;
else
for(int j = 0; j < 10; j++)
{
if(candidate != -1 && candidates[j] == -1)
{
candidates[j] = candidate;
candidateCounts[j] = 1;
candidate = -1;
}
else if(candidates[j] == candidate)
{
candidateCounts[j] += 1;
candidate = -1;
}
}
}
// Finding the best possible swap:
for(int i = 1; i < 10; i++)
{
if(candidates[i] != -1)
{
int neighborPart = candidates[i];
int neighborPartSize = partSizes[neighborPart];
float neighborWeightFactor = (float)abs(neighborPartSize - optimalSize) / optimalSize;
float neighborImprovement = ((float)(abs(neighborPartSize - optimalSize) - abs((neighborPartSize + nodeSize) - optimalSize))) * neighborWeightFactor;
// Combining with self improvement to get net
neighborImprovement += selfImprovement;
// Multiplying by adjacency factor
neighborImprovement *= (float)candidateCounts[i] / selfAdjacency;
if(neighborImprovement > bestDesirability)
{
addTo = neighborPart;
bestDesirability = neighborImprovement;
}
}
}
}
swap_from[idx] = currentPart;
swap_index[idx] = idx;
swap_to[idx] = addTo;
desirability[idx] = bestDesirability;
}
}
__global__ void makeSwapsKernel(int size, int *partition, int *partSizes, int *nodeWeights, int *swap_to, int *swap_from, int *swap_index, float *desirability)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx == size - 1)
{
if(desirability[idx] > .1)
{
int swapTo = swap_to[idx];
int swapFrom = swap_from[idx];
int swapIndex = swap_index[idx];
int nodeWeight = nodeWeights[swapIndex];
partition[swapIndex] = swapTo;
atomicAdd(&partSizes[swapTo], nodeWeight);
atomicAdd(&partSizes[swapFrom], -nodeWeight);
//printf("Swapping node: %d, %d from part: %d, %d to part: %d, %d desirability: %f\n", swapIndex, nodeWeight, swapFrom, partSizes[swapFrom], swapTo, partSizes[swapTo], desirability[idx]);
}
}
else if(idx < size - 1)
{
if(desirability[idx] > .1 && swap_from[idx] != swap_from[idx + 1])
{
int swapTo = swap_to[idx];
int swapFrom = swap_from[idx];
int swapIndex = swap_index[idx];
int nodeWeight = nodeWeights[swapIndex];
partition[swapIndex] = swapTo;
atomicAdd(&partSizes[swapTo], nodeWeight);
atomicAdd(&partSizes[swapFrom], -nodeWeight);
//printf("Swapping node: %d, %d from part: %d, %d to part: %d, %d desirability: %f\n", swapIndex, nodeWeight, swapFrom, partSizes[swapFrom], swapTo, partSizes[swapTo], desirability[idx]);
}
}
}
void misHelpers::getMIS(IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &misStencil, int depth)
{
IdxVector_d mtxValues_d(adjacency.size(), 1);
int vSize = adjIndexes.size() - 1;
// IdxVector_h tmp = misStencil;
// for(int i=0; i<10; i++)
// {
// printf("%d\n", tmp[i]);
// }
// Creating a matrix with the vectors supplied:
// devMtx graphy(vSize, vSize, adjacency.size());
// graphy.column_indices = adjacency;
// cusp::detail::offsets_to_indices(adjIndexes , graphy.row_indices);
// graphy.values = mtxValues_d;
// cusp::print(graphy);
cusp::csr_matrix<int, int, cusp::device_memory> graphy(vSize, vSize, adjacency.size());
graphy.column_indices = adjacency;
graphy.row_offsets = adjIndexes;
cusp::graph::maximal_independent_set(graphy, misStencil, depth);
// tmp = misStencil;
// for(int i=0; i<50; i++)
// {
// printf("%d\n", tmp[i]);
// }
graphy.resize(0, 0, 0);
}
void misHelpers::getAdjacency(TriMesh *meshPtr, IdxVector_d &adjIndexes, IdxVector_d &adjacency)
{
int vSize = meshPtr->vertices.size();
meshPtr->need_neighbors();
// Finding total size of adjacency list:
int adjacencySize = 0;
for(int i = 0; i < vSize; i++)
{
adjacencySize += meshPtr->neighbors[i].size();
}
// Vectors to hold the adjacency:
IdxVector_h adjIndexes_h(vSize + 1);
IdxVector_h adjacency_h(adjacencySize);
// Populating adjacency
adjIndexes_h[0] = 0;
;
int nextOffset = 0;
for(int i = 0; i < vSize; i++)
{
for(int j = 0; j < meshPtr->neighbors[i].size(); j++)
adjacency_h[nextOffset + j] = meshPtr->neighbors[i][j];
nextOffset += meshPtr->neighbors[i].size();
adjIndexes_h[i + 1] = nextOffset;
}
// Copying to device vectors
adjIndexes = adjIndexes_h;
adjacency = adjacency_h;
}
void misHelpers::getAdjacency(TetMesh *meshPtr, IdxVector_d &adjIndexes, IdxVector_d &adjacency)
{
int vSize = meshPtr->vertices.size();
meshPtr->need_neighbors();
// Finding total size of adjacency list:
int adjacencySize = 0;
for(int i = 0; i < vSize; i++)
{
adjacencySize += meshPtr->neighbors[i].size();
}
// Vectors to hold the adjacency:
IdxVector_h adjIndexes_h(vSize + 1);
IdxVector_h adjacency_h(adjacencySize);
// Populating adjacency
adjIndexes_h[0] = 0;
;
int nextOffset = 0;
for(int i = 0; i < vSize; i++)
{
for(int j = 0; j < meshPtr->neighbors[i].size(); j++)
adjacency_h[nextOffset + j] = meshPtr->neighbors[i][j];
nextOffset += meshPtr->neighbors[i].size();
adjIndexes_h[i + 1] = nextOffset;
}
// Copying to device vectors
adjIndexes = adjIndexes_h;
adjacency = adjacency_h;
}
void misHelpers::aggregateGraph(int minSize, int depth, IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &partIn, bool verbose)
{
int size = adjIndexes.size() - 1;
// Get an MIS for the graph:
// getMIS(adjIndexes, adjacency, partIn, depth);
randomizedMIS(adjIndexes, adjacency, partIn, depth);
IdxVector_d aggregated = partIn;
IdxVector_d partOut;
// Prefix sum to number aggregate roots:
thrust::inclusive_scan(partIn.begin(), partIn.end(), partIn.begin());
if( verbose ) std::cout << "Finished aggregateGraph inclusive_scan." << std::endl;
int misCount = partIn.back();
// DataRecorder::Add("Fine MIS Count", misCount);
// Transform non root nodes to -1
thrust::transform(partIn.begin(), partIn.end(), aggregated.begin(), partIn.begin(), ifLabelOne());
partOut = partIn;
if( verbose ) std::cout << "Finished aggregateGraph thrust::transform." << std::endl;
// Preparing to call aggregate kernel:
int *partIn_d; // Pointer to partIn vector
int *partOut_d; // Pointer to partOut vector
int *adjIndexes_d; // Pointer to adjacency indexes
int *adjacency_d; // Pointer to adjacency
int *aggregated_d; // Pointer to aggregated
bool complete = false; // Indicates whether all nodes are aggregated
partIn_d = thrust::raw_pointer_cast(&partIn[0]);
partOut_d = thrust::raw_pointer_cast(&partOut[0]);
adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]);
adjacency_d = thrust::raw_pointer_cast(&adjacency[0]);
aggregated_d = thrust::raw_pointer_cast(&aggregated[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
int loopCounter = 0;
if( verbose )
std::cout << "Starting aggregateGraph loop." << std::endl;
while(!complete)
{
// Allocating nodes
allocateNodesKernel <<< nBlocks, blockSize >>> (size, adjIndexes_d, adjacency_d, partIn_d, partOut_d, aggregated_d);
// Copying partOut to partIn
partIn = partOut;
// Checking if done
int unallocatedNodes = thrust::count(aggregated.begin(), aggregated.end(), 0);
if(unallocatedNodes == 0)
{
// Trying to remove parts below minSize
complete = removeRuntyParts(minSize, partIn);
// If stuff was removed get the aggregated labeling again
if(!complete)
{
thrust::transform(partIn.begin(), partIn.end(), aggregated.begin(), findAggregated());
partOut = partIn;
}
}
if( verbose )
{
bool doPrint = false;
if( loopCounter < 10 ) {
doPrint = true;
} else if( loopCounter < 50 ) {
if( loopCounter % 5 == 0 )
doPrint = true;
} else if( loopCounter < 250 ) {
if( loopCounter % 10 == 0 )
doPrint = true;
} else {
if( loopCounter % 100 == 0 )
doPrint = true;
}
if( doPrint )
std::cout << "Finished loop " << loopCounter << " in aggregateGraph loop with " << unallocatedNodes << " unallocated nodes." << std::endl;
loopCounter++;
}
}
}
void misHelpers::aggregateWeightedGraph(int maxSize, int fullSize, int depth, IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &partIn, IdxVector_d &nodeWeights, bool verbose)
{
int size = adjIndexes.size() - 1;
// Get an MIS for the graph:
// getMIS(adjIndexes, adjacency, partIn, depth);
randomizedMIS(adjIndexes, adjacency, partIn, depth);
IdxVector_d aggregated = partIn;
IdxVector_d partOut;
// Prefix sum to number aggregate roots:
thrust::inclusive_scan(partIn.begin(), partIn.end(), partIn.begin());
int misCount = partIn.back();
// DataRecorder::Add("Coarse MIS Count", misCount);
// Transform non root nodes to -1
thrust::transform(partIn.begin(), partIn.end(), aggregated.begin(), partIn.begin(), ifLabelOne());
partOut = partIn;
// Preparing to call aggregate kernel:
int *partIn_d; // Pointer to partIn vector
int *partOut_d; // Pointer to partOut vector
int *adjIndexes_d; // Pointer to adjacency indexes
int *adjacency_d; // Pointer to adjacency
int *aggregated_d; // Pointer to aggregated
bool complete = false; // Indicates whether all nodes are aggregated
partIn_d = thrust::raw_pointer_cast(&partIn[0]);
partOut_d = thrust::raw_pointer_cast(&partOut[0]);
adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]);
adjacency_d = thrust::raw_pointer_cast(&adjacency[0]);
aggregated_d = thrust::raw_pointer_cast(&aggregated[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
bool firstTime = true;
int counter = 0;
while(!complete)
{
counter++;
// Allocating nodes
allocateNodesKernel << < nBlocks, blockSize >> > (size, adjIndexes_d, adjacency_d, partIn_d, partOut_d, aggregated_d);
// Copying partOut to partIn
partIn = partOut;
// Checking if done
int unallocatedNodes = thrust::count(aggregated.begin(), aggregated.end(), 0);
if (verbose)
printf("unallocatedNodes = %d\n", unallocatedNodes);
if(unallocatedNodes == 0)
{
// Removing small partitions:
if(!firstTime || misCount < 10)
{
// Making sure there are no oversized partitions
restrictPartitionSize(maxSize, fullSize, adjIndexes, adjacency, partIn, nodeWeights);
complete = true;
}
else
{
firstTime = false;
removeRuntyPartitions(fullSize, partIn, nodeWeights, verbose);
thrust::transform(partIn.begin(), partIn.end(), aggregated.begin(), findAggregated());
partOut = partIn;
}
}
}
cudaThreadSynchronize();
}
void misHelpers::restrictPartitionSize(int maxSize, int fullSize, IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &partition, IdxVector_d &nodeWeights, bool verbose)
{
int size = partition.size();
IntVector_d partSizes, swap_to(size), swap_from(size), swap_index(size);
FloatVector_d desirability(size);
// Finding the weighted sizes of each partition
getWeightedPartSizes(partition, nodeWeights, partSizes);
// Finding the average size:
int averageSize = fullSize / partSizes.size();
// Finding largest part size:
int largestPart = thrust::reduce(partSizes.begin(), partSizes.end(), (int)0, thrust::maximum<int>());
while(largestPart > maxSize)
{
if (verbose)
printf("largestPart = %d\n", largestPart);
// Calculating the desirability of the nodes:
int *adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]);
int *adjacency_d = thrust::raw_pointer_cast(&adjacency[0]);
int *partition_d = thrust::raw_pointer_cast(&partition[0]);
int *partSizes_d = thrust::raw_pointer_cast(&partSizes[0]);
int *swap_to_d = thrust::raw_pointer_cast(&swap_to[0]);
int *swap_from_d = thrust::raw_pointer_cast(&swap_from[0]);
int *nodeWeights_d = thrust::raw_pointer_cast(&nodeWeights[0]);
int *swap_index_d = thrust::raw_pointer_cast(&swap_index[0]);
float *desirability_d = thrust::raw_pointer_cast(&desirability[0]);
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
findDesirabilityKernel << < nBlocks, blockSize >> >(size, averageSize, adjIndexes_d, adjacency_d, partition_d, partSizes_d, nodeWeights_d, swap_to_d, swap_from_d, swap_index_d, desirability_d);
// Sort the results with (swap_from, desirability) as the key
thrust::sort_by_key(thrust::make_zip_iterator(
thrust::make_tuple(swap_from.begin(), desirability.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(swap_from.end(), desirability.end())),
thrust::make_zip_iterator(
thrust::make_tuple(swap_to.begin(), swap_index.begin())));
// Perform good swaps
makeSwapsKernel << < nBlocks, blockSize >> >(size, partition_d, partSizes_d, nodeWeights_d, swap_to_d, swap_from_d, swap_index_d, desirability_d);
// Repeat until no overlarge aggregates are found
largestPart = thrust::reduce(partSizes.begin(), partSizes.end(), (int)0, thrust::maximum<int>());
}
}
void misHelpers::getSizes(IdxVector_d &adjIndexes, IdxVector_d &sizes)
{
int size = adjIndexes.size() - 1;
sizes.resize(size, 0);
int *adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]);
int *sizes_d = thrust::raw_pointer_cast(&sizes[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel to find sizes:
findAdjacencySizesKernel << < nBlocks, blockSize >> > (size, adjIndexes_d, sizes_d);
}
bool misHelpers::removeRuntyParts(int minSize, IdxVector_d &partition)
{
// Getting the part sizes:
IdxVector_d partSizes;
getPartSizes(partition, partSizes);
// Converting part sizes to a removeStencil
thrust::device_vector<int> removeStencil(partSizes.size());
thrust::transform(partSizes.begin(), partSizes.end(), removeStencil.begin(), labelLessThan(minSize));
// Checking if anything will be removed:
int removed = thrust::count(removeStencil.begin(), removeStencil.end(), 1);
// DataRecorder::Add("Runty parts Removed", removed);
// If nothing to remove, just return.
if(removed == 0)
return true;
// Getting a vector with how much to subtract from non-removed aggregates
thrust::device_vector<int> subtractions(partSizes.size());
thrust::inclusive_scan(removeStencil.begin(), removeStencil.end(), subtractions.begin());
// Figuring out block sizes for kernel call:
int size = partition.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Getting pointers for the call:
int *partition_d = thrust::raw_pointer_cast(&partition[0]);
int *removeStencil_d = thrust::raw_pointer_cast(&removeStencil[0]);
int *subtractions_d = thrust::raw_pointer_cast(&subtractions[0]);
// Calling kernel to find sizes:
removeRuntyPartsKernel <<< nBlocks, blockSize >>> (size, partition_d, removeStencil_d, subtractions_d);
return false;
}
bool misHelpers::removeRuntyPartitions(int fullSize, IdxVector_d &partition, IdxVector_d &nodeWeights, bool verbose)
{
// Getting the part sizes:
IntVector_d partSizes;
getWeightedPartSizes(partition, nodeWeights, partSizes);
// Figuring out the appropriate removal size
double averageSize = (double)fullSize / partSizes.size();
if (verbose)
printf("Partition average size is %f\n", averageSize);
int threshold = (int)(averageSize * .7);
// Converting part sizes to a removeStencil
thrust::device_vector<int> removeStencil(partSizes.size());
thrust::transform(partSizes.begin(), partSizes.end(), removeStencil.begin(), labelLessThan(threshold));
// Checking if anything will be removed:
int removed = thrust::count(removeStencil.begin(), removeStencil.end(), 1);
// Getting a vector with how much to subtract from non-removed aggregates
thrust::device_vector<int> subtractions(partSizes.size());
thrust::inclusive_scan(removeStencil.begin(), removeStencil.end(), subtractions.begin());
// Figuring out block sizes for kernel call:
int size = partition.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Getting pointers for the call:
int *partition_d = thrust::raw_pointer_cast(&partition[0]);
int *removeStencil_d = thrust::raw_pointer_cast(&removeStencil[0]);
int *subtractions_d = thrust::raw_pointer_cast(&subtractions[0]);
// Calling kernel to find sizes:
removeRuntyPartsKernel << < nBlocks, blockSize >> > (size, partition_d, removeStencil_d, subtractions_d);
return false;
}
void misHelpers::getPartSizes(IdxVector_d &partition, IdxVector_d &partSizes)
{
// Make a copy of the partition vector to mess with:
IdxVector_d temp = partition;
// Sort the copy and find largest element
thrust::sort(temp.begin(), temp.end());
int maxPart = temp[temp.size() - 1];
// Creating a new array size
IdxVector_d partIndices(maxPart + 2, 0);
// Figuring out block sizes for kernel call:
int size = partition.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Getting pointers
int *temp_d = thrust::raw_pointer_cast(&temp[0]);
int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]);
// Calling kernel to find indices for each part:
findPartIndicesKernel << < nBlocks, blockSize >> > (size, temp_d, partIndices_d);
// Getting the sizes:
getSizes(partIndices, partSizes);
}
void misHelpers::getPartSizes(IdxVector_d &partition, IdxVector_d &partSizes, IdxVector_d &partIndices)
{
// Make a copy of the partition vector to mess with:
IdxVector_d temp = partition;
// Sort the copy and find largest element
thrust::sort(temp.begin(), temp.end());
int maxPart = temp[temp.size() - 1];
// Creating a new array size
partIndices.resize(maxPart + 2, 0);
// Figuring out block sizes for kernel call:
int size = partition.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Getting pointers
int *temp_d = thrust::raw_pointer_cast(&temp[0]);
int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]);
// Calling kernel to find indices for each part:
findPartIndicesKernel << < nBlocks, blockSize >> > (size, temp_d, partIndices_d);
// Getting the sizes:
getSizes(partIndices, partSizes);
}
void misHelpers::getPartIndices(IdxVector_d& sortedPartition, IdxVector_d& partIndices)
{
// Sizing the array:
int maxPart = sortedPartition[sortedPartition.size() - 1];
partIndices.resize(maxPart + 2);
thrust::fill(partIndices.begin(), partIndices.end(), 0);
// Figuring out block sizes for kernel call:
int size = sortedPartition.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Getting pointers
int *sortedPartition_d = thrust::raw_pointer_cast(&sortedPartition[0]);
int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]);
// Calling kernel to find indices for each part:
findPartIndicesKernel << < nBlocks, blockSize >> > (size, sortedPartition_d, partIndices_d);
partIndices[partIndices.size() - 1] = size;
}
void misHelpers::getPartIndicesNegStart(IdxVector_d& sortedPartition, IdxVector_d& partIndices)
{
// Sizing the array:
int maxPart = sortedPartition[sortedPartition.size() - 1];
partIndices.resize(maxPart + 2, 0);
// Figuring out block sizes for kernel call:
int size = sortedPartition.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Getting pointers
int *sortedPartition_d = thrust::raw_pointer_cast(&sortedPartition[0]);
int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]);
// Calling kernel to find indices for each part:
findPartIndicesNegStartKernel << < nBlocks, blockSize >> > (size, sortedPartition_d, partIndices_d);
partIndices[partIndices.size() - 1] = size - 1;
}
void misHelpers::fillWithIndex(IdxVector_d &tofill)
{
// Figuring out block sizes for kernel call:
int size = tofill.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
int *tofill_d = thrust::raw_pointer_cast(&tofill[0]);
fillWithIndexKernel << < nBlocks, blockSize >> > (size, tofill_d);
}
void misHelpers::getInversePermutation(IdxVector_d &original, IdxVector_d &inverse)
{
int size = original.size();
inverse.resize(size, -1);
// Get pointers:
int *original_d = thrust::raw_pointer_cast(&original[0]);
int *inverse_d = thrust::raw_pointer_cast(&inverse[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
getInversePermutationKernel << < nBlocks, blockSize >> > (size, original_d, inverse_d);
}
void misHelpers::permuteInitialAdjacency(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &permutedAdjIndexesIn, IdxVector_d &permutedAdjacencyIn, IdxVector_d &ipermutation, IdxVector_d &fineAggregate)
{
int size = adjIndexesIn.size() - 1;
// Get pointers:adjacencyIn
int *adjIndexesIn_d = thrust::raw_pointer_cast(&adjIndexesIn[0]);
int *adjacencyIn_d = thrust::raw_pointer_cast(&adjacencyIn[0]);
int *permutedAdjIndexesIn_d = thrust::raw_pointer_cast(&permutedAdjIndexesIn[0]);
int *permutedAdjacencyIn_d = thrust::raw_pointer_cast(&permutedAdjacencyIn[0]);
int *ipermutation_d = thrust::raw_pointer_cast(&ipermutation[0]);
int *fineAggregate_d = thrust::raw_pointer_cast(&fineAggregate[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
permuteInitialAdjacencyKernel << < nBlocks, blockSize >> > (size, adjIndexesIn_d, adjacencyIn_d, permutedAdjIndexesIn_d, permutedAdjacencyIn_d, ipermutation_d, fineAggregate_d);
}
void misHelpers::getInducedGraphNeighborCounts(IdxVector_d &aggregateIdx,
IdxVector_d &adjIndexesOut,
IdxVector_d &permutedAdjIndexesIn,
IdxVector_d &permutedAdjacencyIn) {
int size = aggregateIdx.size() - 1;
// Get pointers:adjacencyIn
int *aggregateIdx_d = thrust::raw_pointer_cast(&aggregateIdx[0]);
int *adjIndexesOut_d = thrust::raw_pointer_cast(&adjIndexesOut[0]);
int *permutedAdjIndexesIn_d = thrust::raw_pointer_cast(&permutedAdjIndexesIn[0]);
int *permutedAdjacencyIn_d = thrust::raw_pointer_cast(&permutedAdjacencyIn[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
getInducedGraphNeighborCountsKernel << < nBlocks, blockSize >> > (size, aggregateIdx_d, adjIndexesOut_d, permutedAdjIndexesIn_d, permutedAdjacencyIn_d);
}
void misHelpers::fillCondensedAdjacency(IdxVector_d& aggregateIdx, IdxVector_d& adjIndexesOut, IdxVector_d& adjacencyOut, IdxVector_d& permutedAdjIndexesIn, IdxVector_d& permutedAdjacencyIn)
{
int size = adjIndexesOut.size() - 1;
// Get pointers:adjacencyIn
int *aggregateIdx_d = thrust::raw_pointer_cast(&aggregateIdx[0]);
int *adjIndexesOut_d = thrust::raw_pointer_cast(&adjIndexesOut[0]);
int *adjacencyOut_d = thrust::raw_pointer_cast(&adjacencyOut[0]);
int *permutedAdjIndexesIn_d = thrust::raw_pointer_cast(&permutedAdjIndexesIn[0]);
int *permutedAdjacencyIn_d = thrust::raw_pointer_cast(&permutedAdjacencyIn[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
fillCondensedAdjacencyKernel << < nBlocks, blockSize >> > (size, aggregateIdx_d, adjIndexesOut_d, adjacencyOut_d, permutedAdjIndexesIn_d, permutedAdjacencyIn_d);
}
void misHelpers::fillPartitionLabel(IdxVector_d& coarseAggregate, IdxVector_d& fineAggregateSort, IdxVector_d& partitionLabel)
{
int size = partitionLabel.size();
// Get pointers:adjacencyIn
int *coarseAggregate_d = thrust::raw_pointer_cast(&coarseAggregate[0]);
int *fineAggregateSort_d = thrust::raw_pointer_cast(&fineAggregateSort[0]);
int *partitionLabel_d = thrust::raw_pointer_cast(&partitionLabel[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
fillPartitionLabelKernel << < nBlocks, blockSize >> > (size, coarseAggregate_d, fineAggregateSort_d, partitionLabel_d);
}
void misHelpers::getAggregateStartIndices(IdxVector_d& fineAggregateSort, IdxVector_d& aggregateRemapIndex)
{
int size = fineAggregateSort.size();
// Get pointers:adjacencyIn
int *fineAggregateSort_d = thrust::raw_pointer_cast(&fineAggregateSort[0]);
int *aggregateRemapIndex_d = thrust::raw_pointer_cast(&aggregateRemapIndex[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
getAggregateStartIndicesKernel << < nBlocks, blockSize >> > (size, fineAggregateSort_d, aggregateRemapIndex_d);
}
void misHelpers::remapAggregateIdx(IdxVector_d& fineAggregateSort, IdxVector_d& aggregateRemapId)
{
int size = fineAggregateSort.size();
// Get pointers:adjacencyIn
int *fineAggregateSort_d = thrust::raw_pointer_cast(&fineAggregateSort[0]);
int *aggregateRemapId_d = thrust::raw_pointer_cast(&aggregateRemapId[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
remapAggregateIdxKernel << < nBlocks, blockSize >> > (size, fineAggregateSort_d, aggregateRemapId_d);
}
void misHelpers::mapAdjacencyToBlock(IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &adjacencyBlockLabel, IdxVector_d &blockMappedAdjacency, IdxVector_d &fineAggregate)
{
int size = adjIndexes.size() - 1;
// Get pointers:adjacencyIn
int *adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]);
int *adjacency_d = thrust::raw_pointer_cast(&adjacency[0]);
int *adjacencyBlockLabel_d = thrust::raw_pointer_cast(&adjacencyBlockLabel[0]);
int *blockMappedAdjacency_d = thrust::raw_pointer_cast(&blockMappedAdjacency[0]);
int *fineAggregate_d = thrust::raw_pointer_cast(&fineAggregate[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
// Calling kernel:
mapAdjacencyToBlockKernel << < nBlocks, blockSize >> > (size, adjIndexes_d, adjacency_d, adjacencyBlockLabel_d, blockMappedAdjacency_d, fineAggregate_d);
}
void misHelpers::getInducedGraph(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut)
{
// Declaring temporary vectors:
IdxVector_d adjacencyBlockLabel, blockMappedAdjacency;
adjacencyBlockLabel.resize(adjacencyIn.size(), 0);
blockMappedAdjacency.resize(adjacencyIn.size(), 0);
// Get the blocklabeled adjacency:
misHelpers::mapAdjacencyToBlock(adjIndexesIn, adjacencyIn, adjacencyBlockLabel, blockMappedAdjacency, partitionLabel);
// Zip up the block label and block mapped vectors and sort:
thrust::sort(thrust::make_zip_iterator(
thrust::make_tuple(adjacencyBlockLabel.begin(), blockMappedAdjacency.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(adjacencyBlockLabel.end(), blockMappedAdjacency.end())));
// Remove Duplicates and resize:
int newSize = thrust::unique(
thrust::make_zip_iterator(
thrust::make_tuple(adjacencyBlockLabel.begin(), blockMappedAdjacency.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(adjacencyBlockLabel.end(), blockMappedAdjacency.end()))) -
thrust::make_zip_iterator(thrust::make_tuple(adjacencyBlockLabel.begin(), blockMappedAdjacency.begin()));
adjacencyBlockLabel.resize(newSize);
blockMappedAdjacency.resize(newSize);
misHelpers::getPartIndicesNegStart(adjacencyBlockLabel, adjIndexesOut);
adjacencyOut.resize(blockMappedAdjacency.size() - 1);
thrust::copy(blockMappedAdjacency.begin() + 1, blockMappedAdjacency.end(), adjacencyOut.begin());
}
void misHelpers::getWeightedPartSizes(IdxVector_d &partition, IdxVector_d &nodeWeights, IntVector_d &partSizes)
{
// Make copies to mess with
IntVector_d part(partition.begin(), partition.end());
IntVector_d weights(nodeWeights.begin(), nodeWeights.end());
// Sorting temp vectors together
thrust::sort_by_key(part.begin(), part.end(), weights.begin());
// Getting prefix sum of values
thrust::inclusive_scan(weights.begin(), weights.end(), weights.begin());
// Another temp vector for accumulated size at last nodes
IntVector_d accumulatedSize(part[part.size() - 1] + 1);
// Preparing to call kernel to fill accumulated size vector
int size = part.size();
int *part_d = thrust::raw_pointer_cast(&part[0]);
int *weights_d = thrust::raw_pointer_cast(&weights[0]);
int *accumulatedSize_d = thrust::raw_pointer_cast(&accumulatedSize[0]);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
accumulatedPartSizesKernel << < nBlocks, blockSize >> > (size, part_d, weights_d, accumulatedSize_d);
// Calling kernel to get the unaccumulated part sizes:
size = accumulatedSize.size();
nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
partSizes.resize(size);
int *sizes_d = thrust::raw_pointer_cast(&partSizes[0]);
unaccumulatedPartSizesKernel << < nBlocks, blockSize >> > (size, accumulatedSize_d, sizes_d);
}
void misHelpers::checkPartConnectivity(int partCount, IdxVector_d partition, IdxVector_d adjIndexes, IdxVector_d adjacency, char *message)
{
// Debugging check on part connectivity:
std::cout << message << "\n";
vector<int> nodesToExplore, exploredNodes;
for(int i = 0; i < partCount; i++)
{
nodesToExplore.clear();
exploredNodes.clear();
// Find a node in the part
int rootId = -1;
for(int j = 0; j < partition.size(); j++)
{
if(partition[j] == i)
{
rootId = j;
break;
}
}
// Explore out from the part
int start = adjIndexes[rootId], end = adjIndexes[rootId + 1];
for(int n = start; n < end; n++)
{
int neighbor = adjacency[n];
if(partition[neighbor] == i)
nodesToExplore.push_back(neighbor);
}
exploredNodes.push_back(rootId);
// Iterating through everything:
while(nodesToExplore.size() > 0)
{
// Popping off the last node to explore and checking if it's done
int node = nodesToExplore.back();
nodesToExplore.pop_back();
// Checking if the node has been explored:
bool exploredAlready = false;
for(int q = 0; q < exploredNodes.size(); q++)
if(exploredNodes[q] == node)
exploredAlready = true;
if(!exploredAlready)
{
int start = adjIndexes[node], end = adjIndexes[node + 1];
for(int n = start; n < end; n++)
{
int neighbor = adjacency[n];
if(partition[neighbor] == i)
{
nodesToExplore.push_back(neighbor);
//printf("\tAdded %d a neighbor of %d to explore list for part %d", neighbor, node, i);
}
}
exploredNodes.push_back(node);
//printf("\tAdded %d to explored for part %d\n", node, i);
}
}
// Now checking to see if there were any unreachable nodes.
for(int j = 0; j < partition.size(); j++)
{
if(partition[j] == i)
{
bool found = false;
for(int q = 0; q < exploredNodes.size(); q++)
if(exploredNodes[q] == j)
{
found = true;
break;
}
if(!found)
{
printf("Could not reach node %d in part %d from root %d\n", j, i, rootId);
printf("\tExplored nodes:");
for(int g = 0; g < exploredNodes.size(); g++)
printf(" %3d", exploredNodes[g]);
printf("\n");
}
}
}
}
// Pausing
int dummy = 0;
std::cin >> dummy;
if(dummy == 1)
{
int partToCheck;
std::cin >> partToCheck;
for(int i = 0; i < partition.size(); i++)
{
if(partition[i] == partToCheck)
{
int start = adjIndexes[i], end = adjIndexes[i + 1];
printf("Node %d is in partition %d\n\t", i, partToCheck);
for(int j = start; j < end; j++)
{
int neighbor = adjacency[j];
printf(" %4d ", neighbor);
}
printf("\n");
}
}
}
}
void misHelpers::remapInducedGraph(IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &partition)
{
IdxVector_d tempCoarseAggregate = partition;
IdxVector_d aggregateLabel = adjacency;
IdxVector_d permutedAdjacency = adjacency;
IdxVector_d coarsePermutation = partition;
IdxVector_d coarseIPermutation;
// Get the inverse permutation for the re-mapping
misHelpers::fillWithIndex(coarsePermutation);
thrust::stable_sort_by_key(tempCoarseAggregate.begin(), tempCoarseAggregate.end(), coarsePermutation.begin());
misHelpers::getInversePermutation(coarsePermutation, coarseIPermutation);
// Map the adjacency according to the inverse permutation
misHelpers::mapAdjacencyToBlock(adjIndexes, adjacency, aggregateLabel, permutedAdjacency, coarseIPermutation);
thrust::sort_by_key(aggregateLabel.begin(), aggregateLabel.end(), permutedAdjacency.begin());
// Copy from the temp to the real adjacency
thrust::copy(permutedAdjacency.begin(), permutedAdjacency.end(), adjacency.begin());
// Find the adjIndexes for the new adjacency
misHelpers::getPartIndices(aggregateLabel, adjIndexes);
}
|
2a385210a9fa36450b0f5d9fd8bb11d421a262f0.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef GPU
#include "edsdca/memory/memsync.h"
namespace edsdca {
namespace memory {
/**
* Puts data from matrix @p X onto GPU memory
*
* @param X data to be transfered to GPU memory
*/
double *MemSync::PushToGpuMatrix(const std::vector<Eigen::VectorXd> &X) {
int rows = X.size();
int cols = X.front().size();
double *cx = (double*)malloc(sizeof(double) * rows * cols);
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
cx[i * rows + j] = X[i](j);
}
}
if (!memory_is_allocated_ || rows * cols != MemSync::matrix_size_) {
SetMatrixMemoryAllocationSize(rows * cols);
AllocateGlobalSharedMem();
}
hipMemcpy(MemSync::dX_, cx, sizeof(double) * rows * cols,
hipMemcpyHostToDevice);
free(cx);
return dX_;
}
/**
* Puts data stored by @p v onto GPU memory
*
* @param v data to be transfered to GPU
* @param size size of the required data buffer (in bytes)
*
* @return a pointer to the memory location on the GPU where the pushed data
* is stored
*/
double* MemSync::PushToGpuX(const Eigen::VectorXd& x) {
int size = x.size();
double *cv = (double*)malloc(sizeof(double) * size);
for (int i = 0; i < size; ++i) {
cv[i] = x(i);
}
// Make sure memory is allocated before proceeding
if (!memory_is_allocated_ || size != MemSync::d_) {
SetMemoryAllocationSize(size);
AllocateGlobalSharedMem();
}
hipMemcpy(MemSync::dx_, cv, sizeof(double) * size, hipMemcpyHostToDevice);
free(cv);
return dx_;
}
/**
* Puts data stored by @p v onto GPU memory
*
* @param v data to be transfered to GPU
* @param size size of the required data buffer (in bytes)
*
* @return a pointer to the memory location on the GPU where the pushed data
* is stored
*/
double* MemSync::PushToGpuY(const Eigen::VectorXd& x) {
int size = x.size();
double *cv = (double*)malloc(sizeof(double) * size);
for (int i = 0; i < size; ++i) {
cv[i] = x(i);
}
// Make sure memory is allocated before proceeding
if (!memory_is_allocated_ || size != MemSync::d_) {
SetMemoryAllocationSize(size);
AllocateGlobalSharedMem();
}
hipMemcpy(MemSync::dy_, cv, sizeof(double) * size, hipMemcpyHostToDevice);
free(cv);
return dy_;
}
/**
* Pulls data from the GPU to host
*
* @param d_v pointer to the data on GPU
* @param size size of the data buffer required to store the data (in bytes)
*
* @return pointer to the location of the data on host memory
*/
Eigen::VectorXd MemSync::PullFromGpu(double* d_v, long size) {
double* v = (double*)malloc(sizeof(double) * size);
hipMemcpy(v, d_v, sizeof(double) * size, hipMemcpyDeviceToHost);
Eigen::VectorXd eig_v(size);
for (long i = 0; i < size; ++i) {
eig_v(i) = v[i];
}
return eig_v;
}
Eigen::MatrixXd MemSync::PullMatrixFromGpu() {
int cols = d_;
int rows = int(matrix_size_ / d_);
double *tmpX = (double*)malloc(sizeof(double) * matrix_size_);
hipMemcpy(tmpX, dX_, sizeof(double) * matrix_size_,
hipMemcpyDeviceToHost);
Eigen::MatrixXd result(rows, cols);
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
result(i, j) = tmpX[i * rows + i];
}
}
free(tmpX);
return result;
}
Eigen::VectorXd MemSync::PullResFromGpu() {
double *tmpR = (double*)malloc(sizeof(double) * d_);
hipMemcpy(tmpR, res_, sizeof(double) * d_, hipMemcpyDeviceToHost);
Eigen::VectorXd result(d_);
for (long i = 0; i < d_; ++i) {
result(i) = tmpR[i];
}
free(tmpR);
return result;
}
/**
* Allocates memory on the GPU
*
* @param size size of requested memory in bytes
*
* @return returns a pointer to the allocated memory on GPU
*/
double* MemSync::AllocateMemOnGpu(const long size) {
double* d_v = (double*)malloc(sizeof(double) * size);
hipMalloc((void**)&d_v, sizeof(double) * size);
return d_v;
}
double MemSync::PullValFromGpu(double* d_x) {
double res;
double* tmp = (double*)malloc(sizeof(double));
hipMemcpy(tmp, d_x, sizeof(double), hipMemcpyDeviceToHost);
res = *tmp;
free(tmp);
return res;
}
void MemSync::AllocateGlobalSharedMem() {
if (memory_is_allocated_) {
hipFree(dx_);
hipFree(dy_);
hipFree(res_);
hipFree(dX_);
}
hipMalloc((double**)&dx_, d_ * sizeof(double));
hipMalloc((double**)&dy_, d_ * sizeof(double));
hipMalloc((double**)&res_, d_ * sizeof(double));
hipMalloc((double**)&dX_, matrix_size_ * sizeof(double));
memory_is_allocated_ = true;
}
} // memory
} // edsdca
#endif // GPU
| 2a385210a9fa36450b0f5d9fd8bb11d421a262f0.cu | #ifdef GPU
#include "edsdca/memory/memsync.h"
namespace edsdca {
namespace memory {
/**
* Puts data from matrix @p X onto GPU memory
*
* @param X data to be transfered to GPU memory
*/
double *MemSync::PushToGpuMatrix(const std::vector<Eigen::VectorXd> &X) {
int rows = X.size();
int cols = X.front().size();
double *cx = (double*)malloc(sizeof(double) * rows * cols);
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
cx[i * rows + j] = X[i](j);
}
}
if (!memory_is_allocated_ || rows * cols != MemSync::matrix_size_) {
SetMatrixMemoryAllocationSize(rows * cols);
AllocateGlobalSharedMem();
}
cudaMemcpy(MemSync::dX_, cx, sizeof(double) * rows * cols,
cudaMemcpyHostToDevice);
free(cx);
return dX_;
}
/**
* Puts data stored by @p v onto GPU memory
*
* @param v data to be transfered to GPU
* @param size size of the required data buffer (in bytes)
*
* @return a pointer to the memory location on the GPU where the pushed data
* is stored
*/
double* MemSync::PushToGpuX(const Eigen::VectorXd& x) {
int size = x.size();
double *cv = (double*)malloc(sizeof(double) * size);
for (int i = 0; i < size; ++i) {
cv[i] = x(i);
}
// Make sure memory is allocated before proceeding
if (!memory_is_allocated_ || size != MemSync::d_) {
SetMemoryAllocationSize(size);
AllocateGlobalSharedMem();
}
cudaMemcpy(MemSync::dx_, cv, sizeof(double) * size, cudaMemcpyHostToDevice);
free(cv);
return dx_;
}
/**
* Puts data stored by @p v onto GPU memory
*
* @param v data to be transfered to GPU
* @param size size of the required data buffer (in bytes)
*
* @return a pointer to the memory location on the GPU where the pushed data
* is stored
*/
double* MemSync::PushToGpuY(const Eigen::VectorXd& x) {
int size = x.size();
double *cv = (double*)malloc(sizeof(double) * size);
for (int i = 0; i < size; ++i) {
cv[i] = x(i);
}
// Make sure memory is allocated before proceeding
if (!memory_is_allocated_ || size != MemSync::d_) {
SetMemoryAllocationSize(size);
AllocateGlobalSharedMem();
}
cudaMemcpy(MemSync::dy_, cv, sizeof(double) * size, cudaMemcpyHostToDevice);
free(cv);
return dy_;
}
/**
* Pulls data from the GPU to host
*
* @param d_v pointer to the data on GPU
* @param size size of the data buffer required to store the data (in bytes)
*
* @return pointer to the location of the data on host memory
*/
Eigen::VectorXd MemSync::PullFromGpu(double* d_v, long size) {
double* v = (double*)malloc(sizeof(double) * size);
cudaMemcpy(v, d_v, sizeof(double) * size, cudaMemcpyDeviceToHost);
Eigen::VectorXd eig_v(size);
for (long i = 0; i < size; ++i) {
eig_v(i) = v[i];
}
return eig_v;
}
Eigen::MatrixXd MemSync::PullMatrixFromGpu() {
int cols = d_;
int rows = int(matrix_size_ / d_);
double *tmpX = (double*)malloc(sizeof(double) * matrix_size_);
cudaMemcpy(tmpX, dX_, sizeof(double) * matrix_size_,
cudaMemcpyDeviceToHost);
Eigen::MatrixXd result(rows, cols);
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
result(i, j) = tmpX[i * rows + i];
}
}
free(tmpX);
return result;
}
Eigen::VectorXd MemSync::PullResFromGpu() {
double *tmpR = (double*)malloc(sizeof(double) * d_);
cudaMemcpy(tmpR, res_, sizeof(double) * d_, cudaMemcpyDeviceToHost);
Eigen::VectorXd result(d_);
for (long i = 0; i < d_; ++i) {
result(i) = tmpR[i];
}
free(tmpR);
return result;
}
/**
* Allocates memory on the GPU
*
* @param size size of requested memory in bytes
*
* @return returns a pointer to the allocated memory on GPU
*/
double* MemSync::AllocateMemOnGpu(const long size) {
double* d_v = (double*)malloc(sizeof(double) * size);
cudaMalloc((void**)&d_v, sizeof(double) * size);
return d_v;
}
double MemSync::PullValFromGpu(double* d_x) {
double res;
double* tmp = (double*)malloc(sizeof(double));
cudaMemcpy(tmp, d_x, sizeof(double), cudaMemcpyDeviceToHost);
res = *tmp;
free(tmp);
return res;
}
void MemSync::AllocateGlobalSharedMem() {
if (memory_is_allocated_) {
cudaFree(dx_);
cudaFree(dy_);
cudaFree(res_);
cudaFree(dX_);
}
cudaMalloc((double**)&dx_, d_ * sizeof(double));
cudaMalloc((double**)&dy_, d_ * sizeof(double));
cudaMalloc((double**)&res_, d_ * sizeof(double));
cudaMalloc((double**)&dX_, matrix_size_ * sizeof(double));
memory_is_allocated_ = true;
}
} // memory
} // edsdca
#endif // GPU
|
e381d7cc8a99d4178de9475491c472f9140d121b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <iostream>
using namespace std;
#define H 1000
#define W 1000
__global__ void multMatCUDA(int *d_a,int *d_b,int *d_c){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < H && col < W){
int result = 0;
for(int k = 0; k < W; k++){
result += d_a[row * W + k] * d_b[k * W + col];
}
d_c[row * W + col] = result;
}
}
void multMat(int *h_a, int *h_b, int *h_c){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
int result = 0;
for(int k = 0; k < W; k++){
result += h_a[i * W + k] * h_b[k * W + j];
}
h_c[i * W + j] = result;
}
}
}
bool compareTo(int *h_c,int *h_result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
if(h_c[i * W + j] != h_result[i * W + j]){
return false;
}
}
}
return true;
}
void printMatrix(int *result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
cout<<result[i * W + j]<<" ";
}
cout<<endl;
}
}
int main(){
clock_t start, end;
double cpu_time_used, gpu_time_used;
float blockSize = 32;
int *h_a, *h_b, *h_c, *d_a, *d_b, *d_c, *h_result;
//Asignar memoria en el host
h_a = (int*)malloc(sizeof(int)*H*W);
h_b = (int*)malloc(sizeof(int)*H*W);
h_c = (int*)malloc(sizeof(int)*H*W);
h_result = (int*)malloc(sizeof(int)*H*W);
//Inicializar las matrices
for(int i = 0; i < H; i++){
for(int j=0; j < W; j++){
h_a[i*W+j] = i;
h_b[i*W+j] = i+1;
h_c[i*W+j] = 0;
}
}
start = clock();
//Llamar funcion que sume dos vectores y retorne el resultado en h_c
multMat(h_a, h_b, h_c);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido CPU = %lf s\n", cpu_time_used);
//Asignacion de memoria en el device
hipMalloc(&d_a, sizeof(int)*H*W);
hipMalloc(&d_b, sizeof(int)*H*W);
hipMalloc(&d_c, sizeof(int)*H*W);
//Copiar los datos del host al device
hipMemcpy(d_a, h_a, H*W* sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, H*W* sizeof(int), hipMemcpyHostToDevice);
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(W/blockSize),ceil(H/blockSize),1);
start = clock();
hipLaunchKernelGGL(( multMatCUDA), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_a, d_b, d_c);
hipMemcpy(h_result, d_c, H*W*sizeof(int), hipMemcpyDeviceToHost);
end = clock();
gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido GPU = %lf s\n", gpu_time_used);
if(compareTo(h_c, h_result)){
printf("Matrices Iguales");
}
else{
printf("Matrices Diferentes");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
free(h_result);
return 0;
} | e381d7cc8a99d4178de9475491c472f9140d121b.cu | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <iostream>
using namespace std;
#define H 1000
#define W 1000
__global__ void multMatCUDA(int *d_a,int *d_b,int *d_c){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < H && col < W){
int result = 0;
for(int k = 0; k < W; k++){
result += d_a[row * W + k] * d_b[k * W + col];
}
d_c[row * W + col] = result;
}
}
void multMat(int *h_a, int *h_b, int *h_c){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
int result = 0;
for(int k = 0; k < W; k++){
result += h_a[i * W + k] * h_b[k * W + j];
}
h_c[i * W + j] = result;
}
}
}
bool compareTo(int *h_c,int *h_result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
if(h_c[i * W + j] != h_result[i * W + j]){
return false;
}
}
}
return true;
}
void printMatrix(int *result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
cout<<result[i * W + j]<<" ";
}
cout<<endl;
}
}
int main(){
clock_t start, end;
double cpu_time_used, gpu_time_used;
float blockSize = 32;
int *h_a, *h_b, *h_c, *d_a, *d_b, *d_c, *h_result;
//Asignar memoria en el host
h_a = (int*)malloc(sizeof(int)*H*W);
h_b = (int*)malloc(sizeof(int)*H*W);
h_c = (int*)malloc(sizeof(int)*H*W);
h_result = (int*)malloc(sizeof(int)*H*W);
//Inicializar las matrices
for(int i = 0; i < H; i++){
for(int j=0; j < W; j++){
h_a[i*W+j] = i;
h_b[i*W+j] = i+1;
h_c[i*W+j] = 0;
}
}
start = clock();
//Llamar funcion que sume dos vectores y retorne el resultado en h_c
multMat(h_a, h_b, h_c);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido CPU = %lf s\n", cpu_time_used);
//Asignacion de memoria en el device
cudaMalloc(&d_a, sizeof(int)*H*W);
cudaMalloc(&d_b, sizeof(int)*H*W);
cudaMalloc(&d_c, sizeof(int)*H*W);
//Copiar los datos del host al device
cudaMemcpy(d_a, h_a, H*W* sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, H*W* sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(W/blockSize),ceil(H/blockSize),1);
start = clock();
multMatCUDA<<< dimGrid, dimBlock >>>(d_a, d_b, d_c);
cudaMemcpy(h_result, d_c, H*W*sizeof(int), cudaMemcpyDeviceToHost);
end = clock();
gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido GPU = %lf s\n", gpu_time_used);
if(compareTo(h_c, h_result)){
printf("Matrices Iguales");
}
else{
printf("Matrices Diferentes");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
free(h_result);
return 0;
} |
e0499d4a6c92331662dd9d4d7021a2289f3a7254.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <hip/hip_runtime.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/cuda.hpp>
#include <vector>
#define N 512
#define N2 256
#define Db 16
#define Rb 8
#define Dnum 249 //N2-Rb
using namespace cv;
using namespace std;
//Run on terminal:
// nvcc FE512YUVClassify.cu -o FE512 `pkg-config --cflags --libs opencv` --expt-relaxed-constexpr
// nvprof ./FE512 ../Dataset/Image512/
Mat readRawfile(const char* filename,int width,int height){
Mat outputimage;
//read the raw file
FILE *fp = NULL;
char *imagedata = NULL;
int IMAGE_WIDTH = width;
int IMAGE_HEIGHT = height;
int framesize = IMAGE_WIDTH * IMAGE_HEIGHT;
//Open raw Bayer image.
fp = fopen(filename, "rb");
if(!fp){
cout << "read file failure";
return outputimage;
}
//Memory allocation for bayer image data buffer.
imagedata = (char*) malloc (sizeof(char) * framesize);
//Read image data and store in buffer.
fread(imagedata, sizeof(char), framesize, fp);
//Create Opencv mat structure for image dimension. For 8 bit bayer, type should be CV_8UC1.
outputimage.create(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC1);
memcpy(outputimage.data, imagedata, framesize);
free(imagedata);
fclose(fp);
return outputimage;
}
bool InitCUDA()
{
int count;
hipGetDeviceCount(&count);
if(count == 0) {
cout << "There is no device."<< endl;
return false;
}
int i;
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count){
cout << "There is no device supporting CUDA 1.x." << endl;
return false;
}
hipSetDevice(i);
return true;
}
__device__ void permutation(const int *h,int *a,int H,int R,int k){
//x,y is the position in the Mat h.
//R is the size of array a.
int i1,j1;
switch(k){
case 0:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1) = *(h+i1*H+j1);
break;
case 1:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+(R-1-j1)*H+i1);
break;
case 2:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+(R-1-i1)*H+R-1-j1);
break;
case 3:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+j1*H+R-1-i1);
break;
/* Reflect w.r.t. y-axis, then rotate
counterclockwise 90, 180, 270 degree(s)
*/
case 4:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+i1*H+R-1-j1);
break;
case 5:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+(R-1-j1)*H+R-1-i1);
break;
case 6:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+(R-1-i1)*H+j1);
break;
case 7:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+(j1)*H+i1);
break;
} /* end switch */
}
__device__ int Classify(int* inputBlock,int inputSize){
const int BlockSize = Rb;
int permu[BlockSize][BlockSize];
int a1,a2,a3,a4; //Four subblock of the classify block
int i,j,k;
int kout;
//Permutation
for(k=0;k<8;k++){
permutation(inputBlock,&permu[0][0],inputSize,BlockSize,k);
//Calculate a1,a2,a3,a4
a1=0;
a2=0;
a3=0;
a4=0;
for(i=0;i<BlockSize;i++){
for(j=0;j<BlockSize;j++){
if(i < BlockSize/2 && j< BlockSize/2){
a1 += permu[i][j];
}else if(i< BlockSize/2 && j >= BlockSize/2){
a2 += permu[i][j];
}else if(i >= BlockSize/2 && j < BlockSize/2){
a3 += permu[i][j];
}else{
a4 += permu[i][j];
}
}
}
//Classify by means of a1,a2,a3,a4
if(a1>=a2 && a2>=a3 && a3>=a4){
kout = 10+k;
}
if(a1>=a2 && a2>=a4 && a4 >=a3){
kout = 20+k;
}
if(a1>=a4 && a4>=a2 && a2>=a3){
kout = 30+k;
}
}//k
return kout;
}
__global__ void DomainBlockClassify(cuda::PtrStepSz<uchar> downImage,cuda::PtrStepSz<uchar> Result){
int x= blockIdx.x;
int y= threadIdx.x;
__shared__ int tmpDown[Rb][N2];
if(y<Dnum){
for(int i=0;i<Rb;i++){
tmpDown[i][y] = downImage(x+i,y);
}
}else{
for(int i=0;i<Rb;i++){
for(int j=0;j<Rb;j++){
tmpDown[i][y+j] = downImage(x+i,y+j);
}
}
}
__syncthreads();
if(x<Dnum && y<Dnum){
Result(x,y) = Classify(&tmpDown[0][y],N2);
}
}
__device__ void calSM(int *sourceR,int* sourceD,float* desS,float* desM,float* desErr){
int Ud = 0;
int m = 0;
int i,j,ks;
float s;
float sup = 0.0;
float sdown = 0.0;
int tmpR,tmpD;
float tmperr;
float err = 0.005;
//Calculate s,m,k
for(i=0;i<Rb;i++){
for(j=0;j<Rb;j++){
Ud += *(sourceD+i*Rb+j);
m += *(sourceR+i*Rb+j);
}
}
Ud = Ud/(Rb*Rb);
m = m/(Rb*Rb);
for(i=0;i<Rb;i++){
for(j=0;j<Rb;j++){
tmpR = *(sourceR+i*Rb+j);
tmpD = *(sourceD+i*Rb+j);
sup += (tmpD-Ud)*(tmpR);
sdown += (tmpD-Ud)*(tmpD-Ud);
}
}
s= ( fabs(sdown)<0.01? 0.0 : sup/sdown);
ks=(s<-1? 0: s>=2.1?31:(short int)(10.5+s*10));
s=0.1*ks-1;
for(i=0;i<Rb;i++){
for(j=0;j<Rb;j++){
tmpR = *(sourceR+i*Rb+j);
tmpD = *(sourceD+i*Rb+j);
tmperr = s*(tmpD-Ud)+ m - tmpR;
err += (tmperr*tmperr);
}
}
*desS = (float)ks;
*desM = (float)m;
*desErr = err;
}
__device__ float calK(int Rk,int Dk){
if(Rk==Dk){
return 0;
}else if(Rk < 4 && Dk < 4){
if(Rk<Dk){
if(Dk-Rk ==1){
return 1;
}else if(Dk-Rk ==2){
return 2;
}else{
return 3;
}
}else{
if(Rk-Dk==1){
return 3;
}else if(Rk-Dk==2){
return 2;
}else{
return 1;
}
}
}else if(Rk >= 4 && Dk >= 4){
if(Rk<Dk){
if(Dk-Rk ==1){
return 3;
}else if(Dk-Rk ==2){
return 2;
}else{
return 1;
}
}else{
if(Rk-Dk==1){
return 1;
}else if(Rk-Dk==2){
return 2;
}else{
return 3;
}
}
}else if(Rk < 4 && Dk >= 4){
if(Dk-Rk==4){
return 4;
}else if(Dk-Rk==5 || Dk-Rk == 1){
return 5;
}else if(Dk-Rk==6 || Dk-Rk == 2){
return 6;
}else{
return 7;
}
}else{
if(Rk-Dk==4){
return 4;
}else if(Rk-Dk==5 || Rk-Dk == 1){
return 5;
}else if(Rk-Dk==6 || Rk-Dk == 2){
return 6;
}else{
return 7;
}
}
}
__global__ static void RangeParallel(cuda::PtrStep<uchar> image,cuda::PtrStep<uchar> downImage,cuda::PtrStep<uchar> klass,float *Output,int Rx,int Ry){
__shared__ float tmpOutput[5][Dnum];
__shared__ int tmpDown[Rb][N2];
int i,j;
int tmpR[Rb][Rb];
int perR[Rb][Rb];
int perD[Rb][Rb];
float s,m,err;
float* ds = &s;
float* dm = &m;
float* derr = &err;
int Dclass,Rclass;
int Dk,Rk;
int mask=1;
int offset=1;
int x = blockIdx.x;
int y = threadIdx.x;
//Set shared mem
if(y<Dnum){
for(i=0;i<Rb;i++){
tmpDown[i][y] = downImage(x+i,y);
}
}else{
for(i=0;i<Rb;i++){
for(j=0;j<Rb;j++){
tmpDown[i][y+j] = downImage(x+i,y+j);
}
}
}
__syncthreads();
//Set Range block
for(i=0;i<Rb;i++){
for(j=0;j<Rb;j++){
tmpR[i][j] = image(Rx+i,Ry+j);
}
}
Dclass = klass(x,y)/10;
Rclass = Classify(&tmpR[0][0],Rb);
Dk = klass(x,y)%10;
tmpOutput[4][y] = 6553500;
if(Dclass == Rclass/10){
Rk = Rclass%10;
permutation(&tmpR[0][0],&perR[0][0],Rb,Rb,Rk);
permutation(&tmpDown[0][y],&perD[0][0],N2,Rb,Dk);
calSM(&perR[0][0],&perD[0][0],ds,dm,derr);
tmpOutput[0][y] = y;
tmpOutput[1][y] = calK(Rk,Dk);
tmpOutput[2][y] = *ds;
tmpOutput[3][y] = *dm;
tmpOutput[4][y] = *derr;
}
__syncthreads();
while(offset < blockDim.x){
if((y & mask) == 0 && (y+offset) < blockDim.x){
if(tmpOutput[4][y+offset] < tmpOutput[4][y]){
tmpOutput[0][y] = tmpOutput[0][y+offset];
tmpOutput[1][y] = tmpOutput[1][y+offset];
tmpOutput[2][y] = tmpOutput[2][y+offset];
tmpOutput[3][y] = tmpOutput[3][y+offset];
tmpOutput[4][y] = tmpOutput[4][y+offset];
}
}
offset += offset;
mask = offset + mask;
__syncthreads();
}
if(y==0){
Output[x*5]= tmpOutput[0][y];
Output[x*5+1]= tmpOutput[1][y];
Output[x*5+2]= tmpOutput[2][y];
Output[x*5+3]= tmpOutput[3][y];
Output[x*5+4]= tmpOutput[4][y];
}
}
int main(int argc, char** argv){
if(!InitCUDA()) return 0;
printf("CUDA initialized.\n");
clock_t start, end, totaltime;
size_t free_mem,total_mem;
Mat oimage,image,downimage,tmpimage;
vector<Mat> rgbchannels(3);
float *output;
cuda::GpuMat Gpuimage,Gpudownimage;
cuda::GpuMat Gpuclass(Dnum,Dnum,CV_8UC3);
float *GpuOutput;
hipMalloc((void**)&GpuOutput,sizeof(float)*5*Dnum);
output = (float*)malloc(sizeof(float)*5*Dnum);
oimage = imread(argv[1],1);
cvtColor(oimage,image,CV_BGR2YCrCb);
split(image,rgbchannels);
//Open the file for store encoding data
fstream outfile;
outfile.open("512Outcode",ios::out);
if(!outfile){
cout << "Open out file fail!!" << endl;
return 0;
}
start = clock();
//Encoding
int i,j,ll;
int x,y,k,m,s;
int RGB;
int BlockNum,ThreadNum,ImageSize;
float Emin;
Emin=6553600;
for(RGB=0;RGB<3;RGB++){
resize(rgbchannels[RGB],downimage,Size(image.cols/2,image.rows/2),0,0,INTER_LINEAR);
resize(downimage,tmpimage,Size(downimage.cols/2,downimage.rows/2),0,0,INTER_LINEAR);
if(RGB==0){
Gpuimage.upload(rgbchannels[RGB]);
Gpudownimage.upload(downimage);
BlockNum = Dnum;
ThreadNum = Dnum;
ImageSize = N;
}else{
Gpuimage.upload(downimage);
Gpudownimage.upload(tmpimage);
BlockNum = 121;
ThreadNum = 121;
ImageSize = N/2;
}
//Classify the domain block into 3 class
hipLaunchKernelGGL(( DomainBlockClassify), dim3(BlockNum),dim3(ThreadNum), 0, 0, Gpudownimage,Gpuclass);
//For each Range, calculate s,m value
for(i=0;i<ImageSize;i+=Rb){
for(j=0;j<ImageSize;j+=Rb){
hipLaunchKernelGGL(( RangeParallel), dim3(BlockNum),dim3(ThreadNum), 0, 0, Gpuimage,Gpudownimage,Gpuclass,GpuOutput,i,j);
hipMemcpy2D(output,sizeof(float)*5,GpuOutput,sizeof(float)*5,sizeof(float)*5,BlockNum,hipMemcpyDeviceToHost);
for(ll=0;ll<BlockNum;ll++){
if(output[ll*5+4] <= Emin){
Emin = output[ll*5+4];
x = ll;
y = output[ll*5];
k= output[ll*5+1];
s= output[ll*5+2];
m= output[ll*5+3];
}
}
Emin = 6553600;
outfile << (char)x << (char)y << (char)m << (char)((k<<5)+s);
}
}
}
//Release the memory
outfile.close();
Gpuimage.release();
Gpudownimage.release();
Gpuclass.release();
hipFree(GpuOutput);
free(output);
end = clock();
//Print time and the remain memory
hipError_t cudaErr;
totaltime = end-start;
double sec = (double) totaltime / CLOCKS_PER_SEC;
cout <<"Time:" << sec << endl;
cudaErr = hipMemGetInfo(&free_mem, &total_mem);
if(cudaErr != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(cudaErr), __FILE__, __LINE__);
}
cout << "free:" << free_mem << endl;
cout << "total:" << total_mem << endl;
return 0;
} | e0499d4a6c92331662dd9d4d7021a2289f3a7254.cu | #include <iostream>
#include <fstream>
#include <cuda_runtime.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/cuda.hpp>
#include <vector>
#define N 512
#define N2 256
#define Db 16
#define Rb 8
#define Dnum 249 //N2-Rb
using namespace cv;
using namespace std;
//Run on terminal:
// nvcc FE512YUVClassify.cu -o FE512 `pkg-config --cflags --libs opencv` --expt-relaxed-constexpr
// nvprof ./FE512 ../Dataset/Image512/
Mat readRawfile(const char* filename,int width,int height){
Mat outputimage;
//read the raw file
FILE *fp = NULL;
char *imagedata = NULL;
int IMAGE_WIDTH = width;
int IMAGE_HEIGHT = height;
int framesize = IMAGE_WIDTH * IMAGE_HEIGHT;
//Open raw Bayer image.
fp = fopen(filename, "rb");
if(!fp){
cout << "read file failure";
return outputimage;
}
//Memory allocation for bayer image data buffer.
imagedata = (char*) malloc (sizeof(char) * framesize);
//Read image data and store in buffer.
fread(imagedata, sizeof(char), framesize, fp);
//Create Opencv mat structure for image dimension. For 8 bit bayer, type should be CV_8UC1.
outputimage.create(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC1);
memcpy(outputimage.data, imagedata, framesize);
free(imagedata);
fclose(fp);
return outputimage;
}
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count == 0) {
cout << "There is no device."<< endl;
return false;
}
int i;
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count){
cout << "There is no device supporting CUDA 1.x." << endl;
return false;
}
cudaSetDevice(i);
return true;
}
__device__ void permutation(const int *h,int *a,int H,int R,int k){
//x,y is the position in the Mat h.
//R is the size of array a.
int i1,j1;
switch(k){
case 0:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1) = *(h+i1*H+j1);
break;
case 1:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+(R-1-j1)*H+i1);
break;
case 2:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+(R-1-i1)*H+R-1-j1);
break;
case 3:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+j1*H+R-1-i1);
break;
/* Reflect w.r.t. y-axis, then rotate
counterclockwise 90, 180, 270 degree(s)
*/
case 4:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+i1*H+R-1-j1);
break;
case 5:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+(R-1-j1)*H+R-1-i1);
break;
case 6:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+(R-1-i1)*H+j1);
break;
case 7:
for (i1=0; i1<R; i1++)
for (j1=0; j1<R; j1++)
*(a+i1*R+j1)= *(h+(j1)*H+i1);
break;
} /* end switch */
}
__device__ int Classify(int* inputBlock,int inputSize){
const int BlockSize = Rb;
int permu[BlockSize][BlockSize];
int a1,a2,a3,a4; //Four subblock of the classify block
int i,j,k;
int kout;
//Permutation
for(k=0;k<8;k++){
permutation(inputBlock,&permu[0][0],inputSize,BlockSize,k);
//Calculate a1,a2,a3,a4
a1=0;
a2=0;
a3=0;
a4=0;
for(i=0;i<BlockSize;i++){
for(j=0;j<BlockSize;j++){
if(i < BlockSize/2 && j< BlockSize/2){
a1 += permu[i][j];
}else if(i< BlockSize/2 && j >= BlockSize/2){
a2 += permu[i][j];
}else if(i >= BlockSize/2 && j < BlockSize/2){
a3 += permu[i][j];
}else{
a4 += permu[i][j];
}
}
}
//Classify by means of a1,a2,a3,a4
if(a1>=a2 && a2>=a3 && a3>=a4){
kout = 10+k;
}
if(a1>=a2 && a2>=a4 && a4 >=a3){
kout = 20+k;
}
if(a1>=a4 && a4>=a2 && a2>=a3){
kout = 30+k;
}
}//k
return kout;
}
__global__ void DomainBlockClassify(cuda::PtrStepSz<uchar> downImage,cuda::PtrStepSz<uchar> Result){
int x= blockIdx.x;
int y= threadIdx.x;
__shared__ int tmpDown[Rb][N2];
if(y<Dnum){
for(int i=0;i<Rb;i++){
tmpDown[i][y] = downImage(x+i,y);
}
}else{
for(int i=0;i<Rb;i++){
for(int j=0;j<Rb;j++){
tmpDown[i][y+j] = downImage(x+i,y+j);
}
}
}
__syncthreads();
if(x<Dnum && y<Dnum){
Result(x,y) = Classify(&tmpDown[0][y],N2);
}
}
__device__ void calSM(int *sourceR,int* sourceD,float* desS,float* desM,float* desErr){
int Ud = 0;
int m = 0;
int i,j,ks;
float s;
float sup = 0.0;
float sdown = 0.0;
int tmpR,tmpD;
float tmperr;
float err = 0.005;
//Calculate s,m,k
for(i=0;i<Rb;i++){
for(j=0;j<Rb;j++){
Ud += *(sourceD+i*Rb+j);
m += *(sourceR+i*Rb+j);
}
}
Ud = Ud/(Rb*Rb);
m = m/(Rb*Rb);
for(i=0;i<Rb;i++){
for(j=0;j<Rb;j++){
tmpR = *(sourceR+i*Rb+j);
tmpD = *(sourceD+i*Rb+j);
sup += (tmpD-Ud)*(tmpR);
sdown += (tmpD-Ud)*(tmpD-Ud);
}
}
s= ( fabs(sdown)<0.01? 0.0 : sup/sdown);
ks=(s<-1? 0: s>=2.1?31:(short int)(10.5+s*10));
s=0.1*ks-1;
for(i=0;i<Rb;i++){
for(j=0;j<Rb;j++){
tmpR = *(sourceR+i*Rb+j);
tmpD = *(sourceD+i*Rb+j);
tmperr = s*(tmpD-Ud)+ m - tmpR;
err += (tmperr*tmperr);
}
}
*desS = (float)ks;
*desM = (float)m;
*desErr = err;
}
__device__ float calK(int Rk,int Dk){
if(Rk==Dk){
return 0;
}else if(Rk < 4 && Dk < 4){
if(Rk<Dk){
if(Dk-Rk ==1){
return 1;
}else if(Dk-Rk ==2){
return 2;
}else{
return 3;
}
}else{
if(Rk-Dk==1){
return 3;
}else if(Rk-Dk==2){
return 2;
}else{
return 1;
}
}
}else if(Rk >= 4 && Dk >= 4){
if(Rk<Dk){
if(Dk-Rk ==1){
return 3;
}else if(Dk-Rk ==2){
return 2;
}else{
return 1;
}
}else{
if(Rk-Dk==1){
return 1;
}else if(Rk-Dk==2){
return 2;
}else{
return 3;
}
}
}else if(Rk < 4 && Dk >= 4){
if(Dk-Rk==4){
return 4;
}else if(Dk-Rk==5 || Dk-Rk == 1){
return 5;
}else if(Dk-Rk==6 || Dk-Rk == 2){
return 6;
}else{
return 7;
}
}else{
if(Rk-Dk==4){
return 4;
}else if(Rk-Dk==5 || Rk-Dk == 1){
return 5;
}else if(Rk-Dk==6 || Rk-Dk == 2){
return 6;
}else{
return 7;
}
}
}
__global__ static void RangeParallel(cuda::PtrStep<uchar> image,cuda::PtrStep<uchar> downImage,cuda::PtrStep<uchar> klass,float *Output,int Rx,int Ry){
__shared__ float tmpOutput[5][Dnum];
__shared__ int tmpDown[Rb][N2];
int i,j;
int tmpR[Rb][Rb];
int perR[Rb][Rb];
int perD[Rb][Rb];
float s,m,err;
float* ds = &s;
float* dm = &m;
float* derr = &err;
int Dclass,Rclass;
int Dk,Rk;
int mask=1;
int offset=1;
int x = blockIdx.x;
int y = threadIdx.x;
//Set shared mem
if(y<Dnum){
for(i=0;i<Rb;i++){
tmpDown[i][y] = downImage(x+i,y);
}
}else{
for(i=0;i<Rb;i++){
for(j=0;j<Rb;j++){
tmpDown[i][y+j] = downImage(x+i,y+j);
}
}
}
__syncthreads();
//Set Range block
for(i=0;i<Rb;i++){
for(j=0;j<Rb;j++){
tmpR[i][j] = image(Rx+i,Ry+j);
}
}
Dclass = klass(x,y)/10;
Rclass = Classify(&tmpR[0][0],Rb);
Dk = klass(x,y)%10;
tmpOutput[4][y] = 6553500;
if(Dclass == Rclass/10){
Rk = Rclass%10;
permutation(&tmpR[0][0],&perR[0][0],Rb,Rb,Rk);
permutation(&tmpDown[0][y],&perD[0][0],N2,Rb,Dk);
calSM(&perR[0][0],&perD[0][0],ds,dm,derr);
tmpOutput[0][y] = y;
tmpOutput[1][y] = calK(Rk,Dk);
tmpOutput[2][y] = *ds;
tmpOutput[3][y] = *dm;
tmpOutput[4][y] = *derr;
}
__syncthreads();
while(offset < blockDim.x){
if((y & mask) == 0 && (y+offset) < blockDim.x){
if(tmpOutput[4][y+offset] < tmpOutput[4][y]){
tmpOutput[0][y] = tmpOutput[0][y+offset];
tmpOutput[1][y] = tmpOutput[1][y+offset];
tmpOutput[2][y] = tmpOutput[2][y+offset];
tmpOutput[3][y] = tmpOutput[3][y+offset];
tmpOutput[4][y] = tmpOutput[4][y+offset];
}
}
offset += offset;
mask = offset + mask;
__syncthreads();
}
if(y==0){
Output[x*5]= tmpOutput[0][y];
Output[x*5+1]= tmpOutput[1][y];
Output[x*5+2]= tmpOutput[2][y];
Output[x*5+3]= tmpOutput[3][y];
Output[x*5+4]= tmpOutput[4][y];
}
}
int main(int argc, char** argv){
if(!InitCUDA()) return 0;
printf("CUDA initialized.\n");
clock_t start, end, totaltime;
size_t free_mem,total_mem;
Mat oimage,image,downimage,tmpimage;
vector<Mat> rgbchannels(3);
float *output;
cuda::GpuMat Gpuimage,Gpudownimage;
cuda::GpuMat Gpuclass(Dnum,Dnum,CV_8UC3);
float *GpuOutput;
cudaMalloc((void**)&GpuOutput,sizeof(float)*5*Dnum);
output = (float*)malloc(sizeof(float)*5*Dnum);
oimage = imread(argv[1],1);
cvtColor(oimage,image,CV_BGR2YCrCb);
split(image,rgbchannels);
//Open the file for store encoding data
fstream outfile;
outfile.open("512Outcode",ios::out);
if(!outfile){
cout << "Open out file fail!!" << endl;
return 0;
}
start = clock();
//Encoding
int i,j,ll;
int x,y,k,m,s;
int RGB;
int BlockNum,ThreadNum,ImageSize;
float Emin;
Emin=6553600;
for(RGB=0;RGB<3;RGB++){
resize(rgbchannels[RGB],downimage,Size(image.cols/2,image.rows/2),0,0,INTER_LINEAR);
resize(downimage,tmpimage,Size(downimage.cols/2,downimage.rows/2),0,0,INTER_LINEAR);
if(RGB==0){
Gpuimage.upload(rgbchannels[RGB]);
Gpudownimage.upload(downimage);
BlockNum = Dnum;
ThreadNum = Dnum;
ImageSize = N;
}else{
Gpuimage.upload(downimage);
Gpudownimage.upload(tmpimage);
BlockNum = 121;
ThreadNum = 121;
ImageSize = N/2;
}
//Classify the domain block into 3 class
DomainBlockClassify<<<BlockNum,ThreadNum>>>(Gpudownimage,Gpuclass);
//For each Range, calculate s,m value
for(i=0;i<ImageSize;i+=Rb){
for(j=0;j<ImageSize;j+=Rb){
RangeParallel<<<BlockNum,ThreadNum>>>(Gpuimage,Gpudownimage,Gpuclass,GpuOutput,i,j);
cudaMemcpy2D(output,sizeof(float)*5,GpuOutput,sizeof(float)*5,sizeof(float)*5,BlockNum,cudaMemcpyDeviceToHost);
for(ll=0;ll<BlockNum;ll++){
if(output[ll*5+4] <= Emin){
Emin = output[ll*5+4];
x = ll;
y = output[ll*5];
k= output[ll*5+1];
s= output[ll*5+2];
m= output[ll*5+3];
}
}
Emin = 6553600;
outfile << (char)x << (char)y << (char)m << (char)((k<<5)+s);
}
}
}
//Release the memory
outfile.close();
Gpuimage.release();
Gpudownimage.release();
Gpuclass.release();
cudaFree(GpuOutput);
free(output);
end = clock();
//Print time and the remain memory
cudaError_t cudaErr;
totaltime = end-start;
double sec = (double) totaltime / CLOCKS_PER_SEC;
cout <<"Time:" << sec << endl;
cudaErr = cudaMemGetInfo(&free_mem, &total_mem);
if(cudaErr != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(cudaErr), __FILE__, __LINE__);
}
cout << "free:" << free_mem << endl;
cout << "total:" << total_mem << endl;
return 0;
} |
9be82c181997d0066b69917a4b457604bd447422.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <cstdint>
#include <hip/hip_runtime.h>
#include "chainerx/arithmetic_ops.h"
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/cuda/float16.cuh"
#include "chainerx/cuda/op_regist.h"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/routines/math.h"
#include "chainerx/scalar.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct AddImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Add(x1, x2); }
};
class CudaAddOp : public AddOp {
public:
// TODO(sonots): support stream
void Call(const Array& x1, const Array& x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, x2, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, T>(AddImpl<T>{}, x1_cast, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(AddOp, CudaAddOp);
template <typename T>
struct AddASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Add(x1, x2); }
CudaType x2;
};
class CudaAddASOp : public AddASOp {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(AddASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(AddASOp, CudaAddASOp);
template <typename T>
struct SubtractImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Subtract(x1, x2); }
};
class CudaSubtractOp : public SubtractOp {
public:
void Call(const Array& x1, const Array& x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, x2, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, T>(SubtractImpl<T>{}, x1_cast, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(SubtractOp, CudaSubtractOp);
template <typename T>
struct SubtractASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Subtract(x1, x2); }
CudaType x2;
};
class CudaSubtractASOp : public SubtractASOp {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(SubtractASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(SubtractASOp, CudaSubtractASOp);
template <typename T>
struct MultiplyImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Multiply(x1, x2); }
};
// TODO(sonots): support stream
class CudaMultiplyOp : public MultiplyOp {
public:
void Call(const Array& x1, const Array& x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, x2, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, T>(MultiplyImpl<T>{}, x1_cast, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(MultiplyOp, CudaMultiplyOp);
template <typename T>
struct MultiplyASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Multiply(x1, x2); }
CudaType x2;
};
class CudaMultiplyASOp : public MultiplyASOp {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(MultiplyASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(MultiplyASOp, CudaMultiplyASOp);
// CUDA does not have std::div.
__device__ int8_t FloorDivide(int8_t x, int8_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); }
__device__ int16_t FloorDivide(int16_t x, int16_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); }
__device__ int32_t FloorDivide(int32_t x, int32_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); }
__device__ int64_t FloorDivide(int64_t x, int64_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); }
__device__ uint8_t FloorDivide(uint8_t x, uint8_t y) { return x / y; }
__device__ float FloorDivide(float x, float y) {
float rem = ::fmod(x, y);
return (x - rem) / y - ((rem < 0 && y > 0) || (rem > 0 && y < 0) ? 1 : 0);
}
__device__ double FloorDivide(double x, double y) {
double rem = ::fmod(x, y);
return (x - rem) / y - ((rem < 0 && y > 0) || (rem > 0 && y < 0) ? 1 : 0);
}
__device__ cuda::Float16 FloorDivide(cuda::Float16 x, cuda::Float16 y) {
return cuda::Float16{FloorDivide(static_cast<float>(x), static_cast<float>(y))};
}
template <typename T>
struct FloorDivideImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = cuda::FloorDivide(x1, x2); }
};
class CudaFloorDivideOp : public FloorDivideOp {
public:
void Call(const Array& x1, const Array& x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, x2, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, T>(FloorDivideImpl<T>{}, x1_cast, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(FloorDivideOp, CudaFloorDivideOp);
template <typename T>
struct FloorDivideASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = cuda::FloorDivide(x1, x2); }
CudaType x2;
};
class CudaFloorDivideASOp : public FloorDivideASOp {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(FloorDivideASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(FloorDivideASOp, CudaFloorDivideASOp);
template <typename T>
struct DivideImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Divide(x1, x2); }
};
class CudaDivideOp : public DivideOp {
public:
void Call(const Array& x1, const Array& x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, x2, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, T>(DivideImpl<T>{}, x1_cast, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(DivideOp, CudaDivideOp);
template <typename T>
struct DivideASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Divide(x1, x2); }
CudaType x2;
};
class CudaDivideASOp : public DivideASOp {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(DivideASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(DivideASOp, CudaDivideASOp);
} // namespace
} // namespace cuda
} // namespace chainerx
| 9be82c181997d0066b69917a4b457604bd447422.cu | #include "chainerx/cuda/cuda_device.h"
#include <cstdint>
#include <cuda_runtime.h>
#include "chainerx/arithmetic_ops.h"
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/cuda/float16.cuh"
#include "chainerx/cuda/op_regist.h"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/routines/math.h"
#include "chainerx/scalar.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct AddImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Add(x1, x2); }
};
class CudaAddOp : public AddOp {
public:
// TODO(sonots): support stream
void Call(const Array& x1, const Array& x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, x2, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, T>(AddImpl<T>{}, x1_cast, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(AddOp, CudaAddOp);
template <typename T>
struct AddASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Add(x1, x2); }
CudaType x2;
};
class CudaAddASOp : public AddASOp {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(AddASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(AddASOp, CudaAddASOp);
template <typename T>
struct SubtractImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Subtract(x1, x2); }
};
class CudaSubtractOp : public SubtractOp {
public:
void Call(const Array& x1, const Array& x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, x2, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, T>(SubtractImpl<T>{}, x1_cast, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(SubtractOp, CudaSubtractOp);
template <typename T>
struct SubtractASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Subtract(x1, x2); }
CudaType x2;
};
class CudaSubtractASOp : public SubtractASOp {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(SubtractASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(SubtractASOp, CudaSubtractASOp);
template <typename T>
struct MultiplyImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Multiply(x1, x2); }
};
// TODO(sonots): support stream
class CudaMultiplyOp : public MultiplyOp {
public:
void Call(const Array& x1, const Array& x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, x2, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, T>(MultiplyImpl<T>{}, x1_cast, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(MultiplyOp, CudaMultiplyOp);
template <typename T>
struct MultiplyASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Multiply(x1, x2); }
CudaType x2;
};
class CudaMultiplyASOp : public MultiplyASOp {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(MultiplyASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(MultiplyASOp, CudaMultiplyASOp);
// CUDA does not have std::div.
__device__ int8_t FloorDivide(int8_t x, int8_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); }
__device__ int16_t FloorDivide(int16_t x, int16_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); }
__device__ int32_t FloorDivide(int32_t x, int32_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); }
__device__ int64_t FloorDivide(int64_t x, int64_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); }
__device__ uint8_t FloorDivide(uint8_t x, uint8_t y) { return x / y; }
__device__ float FloorDivide(float x, float y) {
float rem = std::fmod(x, y);
return (x - rem) / y - ((rem < 0 && y > 0) || (rem > 0 && y < 0) ? 1 : 0);
}
__device__ double FloorDivide(double x, double y) {
double rem = std::fmod(x, y);
return (x - rem) / y - ((rem < 0 && y > 0) || (rem > 0 && y < 0) ? 1 : 0);
}
__device__ cuda::Float16 FloorDivide(cuda::Float16 x, cuda::Float16 y) {
return cuda::Float16{FloorDivide(static_cast<float>(x), static_cast<float>(y))};
}
template <typename T>
struct FloorDivideImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = cuda::FloorDivide(x1, x2); }
};
class CudaFloorDivideOp : public FloorDivideOp {
public:
void Call(const Array& x1, const Array& x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, x2, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, T>(FloorDivideImpl<T>{}, x1_cast, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(FloorDivideOp, CudaFloorDivideOp);
template <typename T>
struct FloorDivideASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = cuda::FloorDivide(x1, x2); }
CudaType x2;
};
class CudaFloorDivideASOp : public FloorDivideASOp {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(FloorDivideASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(FloorDivideASOp, CudaFloorDivideASOp);
template <typename T>
struct DivideImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Divide(x1, x2); }
};
class CudaDivideOp : public DivideOp {
public:
void Call(const Array& x1, const Array& x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, x2, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, T>(DivideImpl<T>{}, x1_cast, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(DivideOp, CudaDivideOp);
template <typename T>
struct DivideASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Divide(x1, x2); }
CudaType x2;
};
class CudaDivideASOp : public DivideASOp {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(DivideASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_OP(DivideASOp, CudaDivideASOp);
} // namespace
} // namespace cuda
} // namespace chainerx
|
5012f6c0fd8ee3f60608799674af3d89f2d68ffb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/TensorAccessor.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/NonSymbolicBC.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <ATen/native/hip/PersistentSoftmax.cuh>
#include <ATen/native/hip/block_reduce.cuh>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/native/transformers/attention.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/transformers/hip/sdp_utils.h>
#ifdef USE_FLASH_ATTENTION
#include <ATen/native/transformers/hip/flash_attn/fmha_api.h>
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h>
#endif
namespace at {
namespace native {
namespace {
#define DISPATCH_BLOCKSIZE(VALUE_HEAD_DIM, FN) \
{ \
if (VALUE_HEAD_DIM <= 64) { \
constexpr bool kIs64x64 = true; \
constexpr bool kSingleValueIteration = true; \
FN(); \
} else { \
constexpr bool kIs64x64 = false; \
if (VALUE_HEAD_DIM <= 128) { \
constexpr bool kSingleValueIteration = true; \
FN(); \
} else { \
constexpr bool kSingleValueIteration = false; \
FN(); \
} \
} \
}
#define DISPATCH_KERNEL(QUERY, KEY, VALUE, FUNC) \
{ \
hipDeviceProp_t* properties = \
at::cuda::getDeviceProperties(QUERY.device().index()); \
const int computeCapability = properties->major * 10 + properties->minor; \
DISPATCH_BLOCKSIZE( \
VALUE.size(-1), ([&]() { \
static constexpr int64_t kQueriesPerBlock = kIs64x64 ? 64 : 32; \
static constexpr int64_t kKeysPerBlock = kIs64x64 ? 64 : 128; \
DISPATCH_TYPES( \
QUERY, ([&]() { \
DISPATCH_ARCHTAG( \
computeCapability, ([&]() { \
using AlignedAK = AttentionKernel< \
scalar_t, \
ArchTag, \
true, \
kQueriesPerBlock, \
kKeysPerBlock, \
kSingleValueIteration>; \
/* Run a more efficient kernel (with `isAligned=True`) \
if memory is correctly aligned*/ \
bool isAligned = \
(QUERY.stride(2) % AlignedAK::kAlignmentQ == 0 && \
KEY.stride(2) % AlignedAK::kAlignmentK == 0 && \
VALUE.stride(2) % AlignedAK::kAlignmentV == 0); \
/* TODO: Should we warn or log somewhere when we use a \
less efficient kernel due to wrong alignment? */ \
DISPATCH_BOOL(isAligned, kIsAligned, ([&]() { \
using Kernel = AttentionKernel< \
scalar_t, \
ArchTag, \
kIsAligned, \
kQueriesPerBlock, \
kKeysPerBlock, \
kSingleValueIteration>; \
FUNC(); \
})) \
})) \
})); \
})); \
}
static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4;
template <typename scalar_t, typename accscalar_t, bool assume_aligned>
__global__ void transform_bias_rescale_qkv_kernel(
// [B, T, 3 * D]
const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
auto NH = q_k_v.size(2);
auto T = q_k_v.size(3);
auto DH = q_k_v.size(4);
auto t = blockIdx.x % T;
auto b = blockIdx.x / T;
auto D = NH * DH;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
// Same as above, but we can't vectorize memory access.
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
scalar_t qkv_q = qkv[b][t][d + 0 * D];
scalar_t qkv_k = qkv[b][t][d + 1 * D];
scalar_t qkv_v = qkv[b][t][d + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
template <typename scalar_t, typename accscalar_t, bool assume_aligned = false>
__global__ void transform_bias_rescale_qkv_add_padding_kernel(
// [B, T, 3 * D], but it's a NestedTensor buffer
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
const int* offsets,
const int* input_sizes,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
const auto NH = q_k_v.size(2);
const auto T = q_k_v.size(3);
const auto DH = q_k_v.size(4);
const auto t = blockIdx.x % T;
const auto b = blockIdx.x / T;
const auto D = NH * DH;
const auto _3D = 3 * D;
const auto offset_for_batch = offsets[b];
const auto input_dim = 1;
const auto* sizes_i = input_sizes + b * input_dim;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
const auto first_item_offset = t * _3D + d;
const auto last_item_offset = first_item_offset + VEC - 1;
const bool first_item_in_bounds = first_item_offset < sizes_i[0];
const bool entire_vec_in_bounds = last_item_offset < sizes_i[0];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
if (entire_vec_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
} else if (first_item_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
qkv_q[0] = qkv[offset + 0 * D];
qkv_k[0] = qkv[offset + 1 * D];
qkv_v[0] = qkv[offset + 2 * D];
qkv_q[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[0]) +
static_cast<accscalar_t>(qkv_bias_q[0])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[0]) +
static_cast<accscalar_t>(qkv_bias_k[0])));
qkv_v[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[0]) +
static_cast<accscalar_t>(qkv_bias_v[0])));
#pragma unroll
for (auto ii = 1; ii < VEC; ++ii) {
const auto loop_offset = offset + ii;
if (loop_offset < sizes_i[0]) {
qkv_q[ii] = qkv[loop_offset + 0 * D];
qkv_k[ii] = qkv[loop_offset + 1 * D];
qkv_v[ii] = qkv[loop_offset + 2 * D];
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
} else {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
} else {
#pragma unroll
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
const auto item_offset = t * _3D + d;
const bool in_bounds = item_offset < sizes_i[0];
scalar_t qkv_q, qkv_k, qkv_v;
if (in_bounds) {
const auto qkv_offset = offset_for_batch + item_offset;
qkv_q = qkv[qkv_offset + 0 * D];
qkv_k = qkv[qkv_offset + 1 * D];
qkv_v = qkv[qkv_offset + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
} else {
qkv_q = 0;
qkv_k = 0;
qkv_v = 0;
}
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
Tensor collapse_dims_1_and_2(const Tensor& sizes) {
auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1);
auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1);
return (sizes_dim1 * sizes_dim2).contiguous();
}
} // namespace
// compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias
__host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda(
const Tensor& qkv,
const Tensor& qkv_bias,
const int64_t num_head) {
auto B = qkv.is_nested()
? get_nested_tensor_impl(qkv)->get_nested_size_tensor().size(0)
: qkv.size(0);
// TODO: calculate this without the std::vector -- NestedTensor_to_mask wants
// this too
auto T = qkv.is_nested()
? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0]
: qkv.size(1);
if (qkv.is_nested()) {
// Don't mess with non-nested case for now since it's not set up to fiddle
// with mask size.
// Round T up to next multiple of 8 so as to be able to utilize Tensor
// cores. Otherwise, sometimes with padding, *no* row will have the maximum
// sequence length and so we'll have a non-divisible-by-8 dimension even if
// the model author chose a multiple of 8.
T = T + (8 - (T % 8)) % 8;
}
auto _3D = qkv_bias.size(0);
auto D = _3D / 3;
TORCH_CHECK(D % num_head == 0);
const auto dim_per_head = D / num_head;
auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options());
#define CALL_KERNEL(assume_aligned) \
hipLaunchKernelGGL(( transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned>) \
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
#define CALL_ADD_PADDING_KERNEL(assume_aligned) \
hipLaunchKernelGGL(( transform_bias_rescale_qkv_add_padding_kernel< \
scalar_t, \
accscalar_t, \
assume_aligned>) \
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
nt_qkv_buffer \
.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
offsets_ptr, \
sizes_ptr, \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
qkv.scalar_type(),
"transform_bias_rescale_qkv",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
auto threads = ::max(
std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1);
auto blocks = B * T;
const bool aligned =
((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) &&
((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0);
if (aligned) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
D % TRANSFORM_BIAS_RESCALE_VEC == 0,
"D = num_heads * dim_per_head, so we should have dim_per_head % "
"TRANSFORM_BIAS_RESCALE_VEC == 0 => "
"D % TRANSFORM_BIAS_RESCALE_VEC == 0");
}
if (qkv.is_nested()) {
auto* nt_qkv = get_nested_tensor_impl(qkv);
const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer();
auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_size_tensor());
auto offsets =
NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel());
at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel())
.copy_(sizes.reshape({-1}));
auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true);
const auto offsets_ptr = metadata.data_ptr<int>();
const auto sizes_ptr = offsets_ptr + sizes.numel() + 1;
const auto input_dim = sizes.sizes()[1];
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1);
if (aligned &&
((reinterpret_cast<intptr_t>(qkv.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0)) {
CALL_ADD_PADDING_KERNEL(true);
} else {
CALL_ADD_PADDING_KERNEL(false);
}
} else if (aligned) {
CALL_KERNEL(true);
} else {
CALL_KERNEL(false);
}
C10_HIP_KERNEL_LAUNCH_CHECK();
});
#undef CALL_ADD_PADDING_KERNEL
#undef CALL_KERNEL
auto q_k_v_s =
at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0);
return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]);
}
std::tuple<Tensor, Tensor> native_multi_head_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const int64_t embed_dim,
const int64_t num_head,
const Tensor& qkv_weight,
const Tensor& qkv_bias,
const Tensor& proj_weight,
const Tensor& proj_bias,
const c10::optional<Tensor>& mask,
bool need_weights,
bool average_attn_weights,
const c10::optional<int64_t> mask_type) {
// query shape: [B, T, D]
// qkv_weight shape: [3 * D, D]
TORCH_CHECK(
!mask || !query.is_nested(),
"NestedTensor with mask is not supported yet");
const auto D = embed_dim;
TORCH_CHECK(
query.dim() == 3,
"expected 3-D `query`, got ",
query.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || query.sizes()[2] == embed_dim,
"passed-in embed_dim ",
embed_dim,
" didn't match last dim of query ",
query.sizes()[2]);
TORCH_CHECK(
key.dim() == 3,
"expected 3-D `key`, got ",
key.dim(),
"-D tensor");
TORCH_CHECK(
value.dim() == 3,
"expected 3-D `value`, got ",
value.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || key.is_nested() || value.is_nested() ||
(query.sizes() == key.sizes() && key.sizes() == value.sizes()),
"expected `query`/`key`/`value` shapes to match");
TORCH_CHECK(
qkv_weight.dim() == 2,
"expected 2-D `qkv_weight`, got ",
qkv_weight.dim(),
"-D tensor");
TORCH_CHECK(
D * 3 == qkv_weight.sizes()[0],
"expected `qkv_weight` first dim to be 3x embed_dim");
TORCH_CHECK(
D == qkv_weight.sizes()[1],
"expected `qkv_weight` second dim to be embed_Dim");
TORCH_CHECK(
qkv_bias.dim() == 1,
"expected 2-D `qkv_bias`, got ",
qkv_bias.dim(),
"-D tensor");
TORCH_CHECK(
qkv_bias.sizes()[0] == 3 * D,
"expected `qkv_bias` first dim and first dim of query to be equal");
TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`");
#ifndef NDEBUG
const auto B = query.is_nested()
? get_nested_tensor_impl(query)->get_nested_size_tensor().size(0)
: query.sizes()[0];
auto T = query.is_nested() ? 0 : query.sizes()[1];
#endif
const auto dim_per_head = D / num_head;
if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 ) {
// We have not done linear projection yet but the input for SDP
// Is expected to be 4 dimensional. We "cheaply" create view tensors
// That will then be used for checking hot path conditions with select_sd_backend
auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, need_weights, false};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention) {
auto x = at::linear(query, qkv_weight, qkv_bias);
auto chunks = x.chunk(3, -1);
auto x_size_0 = x.size(0);
chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
auto y = at::_scaled_dot_product_attention(
chunks[0], chunks[1], chunks[2], mask, 0.0, need_weights, false);
auto past_sdp =
std::get<0>(y).transpose(1, 2).reshape({x_size_0, -1, embed_dim});
return std::make_tuple(
at::linear(past_sdp, proj_weight, proj_bias), Tensor());
}
// Returned math or error lets not use it
}
// shape: [B, T, 3 x D]
auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight);
if (!qkv.is_nested() && qkv.numel() == 0) {
if (query.is_nested()) {
return std::make_tuple(Tensor(), Tensor());
}
return std::make_tuple(at::empty_like(query), Tensor());
}
#ifndef NDEBUG
if (!query.is_nested() || !qkv.is_nested()) {
if (query.is_nested()) {
T = qkv.size(1);
}
debug_assert_shape(__LINE__, qkv, {B, T, 3 * D});
}
#endif
#ifdef DEBUG_PRINT_EACH_STEP
if (!qkv.is_nested()) {
std::cerr << "qkv: " << qkv << std::endl;
}
#endif
// shape: 3 x [B, num_head, T, dim_per_head]
auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head);
qkv = Tensor(); // Not used any more, allow free
auto& q = std::get<0>(q_k_v);
const auto& k = std::get<1>(q_k_v);
const auto& v = std::get<2>(q_k_v);
#ifndef NDEBUG
debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "q: " << q << std::endl;
std::cerr << "k: " << k << std::endl;
std::cerr << "v: " << v << std::endl;
#endif
// shape: [B, num_head, T, T]
auto qkt = bmm_nt(q, k);
// q & k are dead but cannot be freed because they were packed with v
#ifndef NDEBUG
debug_assert_shape(__LINE__, qkt, {B, num_head, T, T});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, T]
// TODO: long-term, have a kernel that works with
// NestedTensor directly if there is no mask passed
qkt = masked_softmax(qkt, mask, query, mask_type);
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt after softmax: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, dim_per_head]
// reuse storage for q; we're done with it
auto attn_ctx = bmm_nn(q, qkt, v);
// qkv is not dead; we just reused storage for q!
if (!need_weights) {
qkt = Tensor();
}
#ifndef NDEBUG
debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "attn_ctx: " << attn_ctx << std::endl;
#endif
// shape: [B, T, D]
// Fuse transform_0213 inside
auto proj = transform0213_gemm_nt_bias(
attn_ctx, proj_weight, proj_bias, query);
#ifndef NDEBUG
debug_assert_shape(__LINE__, proj, {B, T, D});
#endif
if (need_weights && average_attn_weights) {
// weights are not needed for full transformer, so don't worry too
// much about performance -- we implement this just to make use
// cases that don't disable need_weights still get some speedup.
qkt = qkt.sum(1);
qkt /= num_head;
}
return std::make_tuple(std::move(proj), std::move(qkt));
}
std::tuple<Tensor, Tensor> flash_attention_helper_dense_unpacked(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool need_atten_weights,
bool is_causal) {
// Query (Batch x Num_heads x Q_seq_len x Dim_per_head)
// Key (Batch x Num_heads x KV_seq_len x Dim_per_head)
// Value (Batch x Num_heads x KV_seq_len x Dim_per_head)
const int64_t batch_size = query.size(0);
const int64_t num_heads = query.size(1);
const int64_t max_seqlen_batch_q = query.size(2);
const int64_t head_dim = query.size(3);
const int64_t max_seqlen_batch_k = key.size(2);
const int64_t max_seqlen_batch_v = value.size(2);
TORCH_CHECK(
max_seqlen_batch_k == max_seqlen_batch_v,
"Key and Value must have the same sequence length");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor cumulative_sequence_length_q = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_q,
max_seqlen_batch_q,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
Tensor cumulative_sequence_length_k = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_k,
max_seqlen_batch_k,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
int64_t Nnz_q{batch_size * max_seqlen_batch_q};
int64_t Nnz_kv{batch_size * max_seqlen_batch_k};
// For the standard MHA these will actually be views
Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim});
Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor attention =
at::_flash_scaled_dot_product_attention(
query_reshaped,
key_reshaped,
value_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
is_causal);
// Reshape output to convert nnz to batch_size and seq_len
attention =
attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2);
return std::tuple<Tensor, Tensor>(attention, Tensor());
}
std::tuple<Tensor, Tensor> mem_eff_helper(
const Tensor& query,
const Tensor& key,
const Tensor& value){
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor attention = std::get<0>(at::_efficient_attention_forward(
q_t,
k_t,
v_t,
c10::nullopt,
c10::nullopt,
c10::nullopt,
false,
false)).transpose(1,2);
return std::make_tuple(attention, Tensor());
}
std::tuple<Tensor, Tensor> _scaled_dot_product_attention_forward_cuda(
const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool need_attn_weights, bool is_causal) {
// Determine which efficient kernel to use
sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, need_attn_weights, is_causal};
auto backend = select_sdp_backend(kernel_params);
switch(backend){
case sdp::SDPBackend::flash_attention:
return flash_attention_helper_dense_unpacked(query_, key, value, dropout_p, need_attn_weights, is_causal);
case sdp::SDPBackend::efficient_attention:
return mem_eff_helper(query_, key , value);
case sdp::SDPBackend::math:
return at::_scaled_dot_product_attention_math(query_, key, value, attn_mask_, dropout_p, need_attn_weights, is_causal);
default:
TORCH_CHECK(false, "No viable backend for scaled_dot_product_attention was found.");
return std::make_tuple(Tensor(), Tensor());
}
}
Tensor flash_scaled_dot_product_attention(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool is_causal) {
#if defined(USE_FLASH_ATTENTION)
auto softmax_scale = ::pow(query.size(-1), -0.5);
std::vector<Tensor> output = fmha::mha_fwd(
query,
key,
value,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
softmax_scale,
false,
is_causal,
false,
c10::nullopt);
return output[0];
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return Tensor();
}
std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward(
const at::Tensor& query, // [b, seqlen, num_heads, K]
const at::Tensor& key, // [b, seqlen, num_heads, K]
const at::Tensor& value, // [b, seqlen, num_heads, Kv]
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_q,
// (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_k,
// (Mode 1MHK only) Maximum sequence length across batches
const c10::optional<int64_t> max_seqlen_q_,
bool compute_logsumexp,
bool causal) {
#if defined(USE_FLASH_ATTENTION)
// TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a
// machine that is >= 5.0. In practice, this is not a problem but since
// this would avoid runtime architecture checks, we should look into it
TORCH_CHECK(query.dim() == 4);
TORCH_CHECK(key.dim() == 4);
TORCH_CHECK(value.dim() == 4);
// Batch sizes
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// Sequence length
TORCH_CHECK(key.size(1) == value.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
int64_t max_seqlen_q = 0, max_seqlen_k=0;
TORCH_CHECK(cu_seqlens_q.has_value() == cu_seqlens_k.has_value());
if (cu_seqlens_q.has_value()) {
TORCH_CHECK(cu_seqlens_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_q->dim() == 1 && cu_seqlens_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_k));
TORCH_CHECK(cu_seqlens_q->size(0) == cu_seqlens_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q_.has_value());
max_seqlen_q = *max_seqlen_q_;
max_seqlen_k = 0; // Will be set inside the kernel
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(query.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t num_heads = query.size(-2);
int64_t K = query.size(-1);
int64_t Kv = value.size(-1);
at::Tensor res;
at::Tensor logsumexp;
auto launchKernel = [&](auto _k, int computeCapability) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
res = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
TypeTraits<typename Kernel::output_t>::atScalarType()));
// NOTE: Should be aligned (by padding) in case M is
// not a good number for loading during backward
constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE;
logsumexp = at::empty(
{B,
num_heads,
compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0},
query.options().dtype(at::ScalarType::Float));
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = compute_logsumexp
? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr()
: nullptr;
at::Tensor output_accum;
if (Kernel::kNeedsOutputAccumulatorBuffer) {
output_accum = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
TypeTraits<typename Kernel::output_accum_t>::atScalarType()));
p.output_accum_ptr =
(typename Kernel::output_accum_t*)output_accum.data_ptr();
} else {
p.output_accum_ptr = nullptr;
}
p.output_ptr = (typename Kernel::output_t*)res.data_ptr();
if (cu_seqlens_q.has_value()) {
p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr();
p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr();
}
#define ASSIGN_CHECK_OVERFLOW(A, B) \
{ \
A = B; \
TORCH_CHECK(B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \
}
p.num_heads = num_heads;
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = cu_seqlens_q.has_value() ? cu_seqlens_q->size(0) - 1 : B;
p.causal = causal;
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
constexpr auto kernel_fn = attention_kernel_batched<Kernel>;
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > 0xc000) {
TORCH_INTERNAL_ASSERT(
computeCapability >= 70,
"This kernel requires too much shared memory on this machine!");
AT_CUDA_CHECK(hipFuncSetAttribute(
kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes));
}
Kernel::check_supported(p);
hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, stream, p);
};
// Dispatch to the right kernel
DISPATCH_KERNEL(query, key, value, ([&]() {
launchKernel(Kernel{}, computeCapability);
}));
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(res, logsumexp);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{});
}
Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){
TORCH_CHECK(false, "This operator should be overridden in python before use");
return at::Tensor();
}
} // namespace native
} // namespace at
| 5012f6c0fd8ee3f60608799674af3d89f2d68ffb.cu | #include <type_traits>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/TensorAccessor.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/NonSymbolicBC.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <ATen/native/cuda/PersistentSoftmax.cuh>
#include <ATen/native/cuda/block_reduce.cuh>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/native/transformers/attention.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/transformers/cuda/sdp_utils.h>
#ifdef USE_FLASH_ATTENTION
#include <ATen/native/transformers/cuda/flash_attn/fmha_api.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h>
#endif
namespace at {
namespace native {
namespace {
#define DISPATCH_BLOCKSIZE(VALUE_HEAD_DIM, FN) \
{ \
if (VALUE_HEAD_DIM <= 64) { \
constexpr bool kIs64x64 = true; \
constexpr bool kSingleValueIteration = true; \
FN(); \
} else { \
constexpr bool kIs64x64 = false; \
if (VALUE_HEAD_DIM <= 128) { \
constexpr bool kSingleValueIteration = true; \
FN(); \
} else { \
constexpr bool kSingleValueIteration = false; \
FN(); \
} \
} \
}
#define DISPATCH_KERNEL(QUERY, KEY, VALUE, FUNC) \
{ \
cudaDeviceProp* properties = \
at::cuda::getDeviceProperties(QUERY.device().index()); \
const int computeCapability = properties->major * 10 + properties->minor; \
DISPATCH_BLOCKSIZE( \
VALUE.size(-1), ([&]() { \
static constexpr int64_t kQueriesPerBlock = kIs64x64 ? 64 : 32; \
static constexpr int64_t kKeysPerBlock = kIs64x64 ? 64 : 128; \
DISPATCH_TYPES( \
QUERY, ([&]() { \
DISPATCH_ARCHTAG( \
computeCapability, ([&]() { \
using AlignedAK = AttentionKernel< \
scalar_t, \
ArchTag, \
true, \
kQueriesPerBlock, \
kKeysPerBlock, \
kSingleValueIteration>; \
/* Run a more efficient kernel (with `isAligned=True`) \
if memory is correctly aligned*/ \
bool isAligned = \
(QUERY.stride(2) % AlignedAK::kAlignmentQ == 0 && \
KEY.stride(2) % AlignedAK::kAlignmentK == 0 && \
VALUE.stride(2) % AlignedAK::kAlignmentV == 0); \
/* TODO: Should we warn or log somewhere when we use a \
less efficient kernel due to wrong alignment? */ \
DISPATCH_BOOL(isAligned, kIsAligned, ([&]() { \
using Kernel = AttentionKernel< \
scalar_t, \
ArchTag, \
kIsAligned, \
kQueriesPerBlock, \
kKeysPerBlock, \
kSingleValueIteration>; \
FUNC(); \
})) \
})) \
})); \
})); \
}
static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4;
template <typename scalar_t, typename accscalar_t, bool assume_aligned>
__global__ void transform_bias_rescale_qkv_kernel(
// [B, T, 3 * D]
const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
auto NH = q_k_v.size(2);
auto T = q_k_v.size(3);
auto DH = q_k_v.size(4);
auto t = blockIdx.x % T;
auto b = blockIdx.x / T;
auto D = NH * DH;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
// Same as above, but we can't vectorize memory access.
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
scalar_t qkv_q = qkv[b][t][d + 0 * D];
scalar_t qkv_k = qkv[b][t][d + 1 * D];
scalar_t qkv_v = qkv[b][t][d + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
template <typename scalar_t, typename accscalar_t, bool assume_aligned = false>
__global__ void transform_bias_rescale_qkv_add_padding_kernel(
// [B, T, 3 * D], but it's a NestedTensor buffer
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
const int* offsets,
const int* input_sizes,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
const auto NH = q_k_v.size(2);
const auto T = q_k_v.size(3);
const auto DH = q_k_v.size(4);
const auto t = blockIdx.x % T;
const auto b = blockIdx.x / T;
const auto D = NH * DH;
const auto _3D = 3 * D;
const auto offset_for_batch = offsets[b];
const auto input_dim = 1;
const auto* sizes_i = input_sizes + b * input_dim;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
const auto first_item_offset = t * _3D + d;
const auto last_item_offset = first_item_offset + VEC - 1;
const bool first_item_in_bounds = first_item_offset < sizes_i[0];
const bool entire_vec_in_bounds = last_item_offset < sizes_i[0];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
if (entire_vec_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
} else if (first_item_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
qkv_q[0] = qkv[offset + 0 * D];
qkv_k[0] = qkv[offset + 1 * D];
qkv_v[0] = qkv[offset + 2 * D];
qkv_q[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[0]) +
static_cast<accscalar_t>(qkv_bias_q[0])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[0]) +
static_cast<accscalar_t>(qkv_bias_k[0])));
qkv_v[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[0]) +
static_cast<accscalar_t>(qkv_bias_v[0])));
#pragma unroll
for (auto ii = 1; ii < VEC; ++ii) {
const auto loop_offset = offset + ii;
if (loop_offset < sizes_i[0]) {
qkv_q[ii] = qkv[loop_offset + 0 * D];
qkv_k[ii] = qkv[loop_offset + 1 * D];
qkv_v[ii] = qkv[loop_offset + 2 * D];
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
} else {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
} else {
#pragma unroll
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
const auto item_offset = t * _3D + d;
const bool in_bounds = item_offset < sizes_i[0];
scalar_t qkv_q, qkv_k, qkv_v;
if (in_bounds) {
const auto qkv_offset = offset_for_batch + item_offset;
qkv_q = qkv[qkv_offset + 0 * D];
qkv_k = qkv[qkv_offset + 1 * D];
qkv_v = qkv[qkv_offset + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
} else {
qkv_q = 0;
qkv_k = 0;
qkv_v = 0;
}
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
Tensor collapse_dims_1_and_2(const Tensor& sizes) {
auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1);
auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1);
return (sizes_dim1 * sizes_dim2).contiguous();
}
} // namespace
// compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias
__host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda(
const Tensor& qkv,
const Tensor& qkv_bias,
const int64_t num_head) {
auto B = qkv.is_nested()
? get_nested_tensor_impl(qkv)->get_nested_size_tensor().size(0)
: qkv.size(0);
// TODO: calculate this without the std::vector -- NestedTensor_to_mask wants
// this too
auto T = qkv.is_nested()
? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0]
: qkv.size(1);
if (qkv.is_nested()) {
// Don't mess with non-nested case for now since it's not set up to fiddle
// with mask size.
// Round T up to next multiple of 8 so as to be able to utilize Tensor
// cores. Otherwise, sometimes with padding, *no* row will have the maximum
// sequence length and so we'll have a non-divisible-by-8 dimension even if
// the model author chose a multiple of 8.
T = T + (8 - (T % 8)) % 8;
}
auto _3D = qkv_bias.size(0);
auto D = _3D / 3;
TORCH_CHECK(D % num_head == 0);
const auto dim_per_head = D / num_head;
auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options());
#define CALL_KERNEL(assume_aligned) \
transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned> \
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \
qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
#define CALL_ADD_PADDING_KERNEL(assume_aligned) \
transform_bias_rescale_qkv_add_padding_kernel< \
scalar_t, \
accscalar_t, \
assume_aligned> \
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \
nt_qkv_buffer \
.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
offsets_ptr, \
sizes_ptr, \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
qkv.scalar_type(),
"transform_bias_rescale_qkv",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
auto threads = std::max(
std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1);
auto blocks = B * T;
const bool aligned =
((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) &&
((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0);
if (aligned) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
D % TRANSFORM_BIAS_RESCALE_VEC == 0,
"D = num_heads * dim_per_head, so we should have dim_per_head % "
"TRANSFORM_BIAS_RESCALE_VEC == 0 => "
"D % TRANSFORM_BIAS_RESCALE_VEC == 0");
}
if (qkv.is_nested()) {
auto* nt_qkv = get_nested_tensor_impl(qkv);
const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer();
auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_size_tensor());
auto offsets =
NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel());
at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel())
.copy_(sizes.reshape({-1}));
auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true);
const auto offsets_ptr = metadata.data_ptr<int>();
const auto sizes_ptr = offsets_ptr + sizes.numel() + 1;
const auto input_dim = sizes.sizes()[1];
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1);
if (aligned &&
((reinterpret_cast<intptr_t>(qkv.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0)) {
CALL_ADD_PADDING_KERNEL(true);
} else {
CALL_ADD_PADDING_KERNEL(false);
}
} else if (aligned) {
CALL_KERNEL(true);
} else {
CALL_KERNEL(false);
}
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
#undef CALL_ADD_PADDING_KERNEL
#undef CALL_KERNEL
auto q_k_v_s =
at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0);
return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]);
}
std::tuple<Tensor, Tensor> native_multi_head_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const int64_t embed_dim,
const int64_t num_head,
const Tensor& qkv_weight,
const Tensor& qkv_bias,
const Tensor& proj_weight,
const Tensor& proj_bias,
const c10::optional<Tensor>& mask,
bool need_weights,
bool average_attn_weights,
const c10::optional<int64_t> mask_type) {
// query shape: [B, T, D]
// qkv_weight shape: [3 * D, D]
TORCH_CHECK(
!mask || !query.is_nested(),
"NestedTensor with mask is not supported yet");
const auto D = embed_dim;
TORCH_CHECK(
query.dim() == 3,
"expected 3-D `query`, got ",
query.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || query.sizes()[2] == embed_dim,
"passed-in embed_dim ",
embed_dim,
" didn't match last dim of query ",
query.sizes()[2]);
TORCH_CHECK(
key.dim() == 3,
"expected 3-D `key`, got ",
key.dim(),
"-D tensor");
TORCH_CHECK(
value.dim() == 3,
"expected 3-D `value`, got ",
value.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || key.is_nested() || value.is_nested() ||
(query.sizes() == key.sizes() && key.sizes() == value.sizes()),
"expected `query`/`key`/`value` shapes to match");
TORCH_CHECK(
qkv_weight.dim() == 2,
"expected 2-D `qkv_weight`, got ",
qkv_weight.dim(),
"-D tensor");
TORCH_CHECK(
D * 3 == qkv_weight.sizes()[0],
"expected `qkv_weight` first dim to be 3x embed_dim");
TORCH_CHECK(
D == qkv_weight.sizes()[1],
"expected `qkv_weight` second dim to be embed_Dim");
TORCH_CHECK(
qkv_bias.dim() == 1,
"expected 2-D `qkv_bias`, got ",
qkv_bias.dim(),
"-D tensor");
TORCH_CHECK(
qkv_bias.sizes()[0] == 3 * D,
"expected `qkv_bias` first dim and first dim of query to be equal");
TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`");
#ifndef NDEBUG
const auto B = query.is_nested()
? get_nested_tensor_impl(query)->get_nested_size_tensor().size(0)
: query.sizes()[0];
auto T = query.is_nested() ? 0 : query.sizes()[1];
#endif
const auto dim_per_head = D / num_head;
if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 ) {
// We have not done linear projection yet but the input for SDP
// Is expected to be 4 dimensional. We "cheaply" create view tensors
// That will then be used for checking hot path conditions with select_sd_backend
auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, need_weights, false};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention) {
auto x = at::linear(query, qkv_weight, qkv_bias);
auto chunks = x.chunk(3, -1);
auto x_size_0 = x.size(0);
chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
auto y = at::_scaled_dot_product_attention(
chunks[0], chunks[1], chunks[2], mask, 0.0, need_weights, false);
auto past_sdp =
std::get<0>(y).transpose(1, 2).reshape({x_size_0, -1, embed_dim});
return std::make_tuple(
at::linear(past_sdp, proj_weight, proj_bias), Tensor());
}
// Returned math or error lets not use it
}
// shape: [B, T, 3 x D]
auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight);
if (!qkv.is_nested() && qkv.numel() == 0) {
if (query.is_nested()) {
return std::make_tuple(Tensor(), Tensor());
}
return std::make_tuple(at::empty_like(query), Tensor());
}
#ifndef NDEBUG
if (!query.is_nested() || !qkv.is_nested()) {
if (query.is_nested()) {
T = qkv.size(1);
}
debug_assert_shape(__LINE__, qkv, {B, T, 3 * D});
}
#endif
#ifdef DEBUG_PRINT_EACH_STEP
if (!qkv.is_nested()) {
std::cerr << "qkv: " << qkv << std::endl;
}
#endif
// shape: 3 x [B, num_head, T, dim_per_head]
auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head);
qkv = Tensor(); // Not used any more, allow free
auto& q = std::get<0>(q_k_v);
const auto& k = std::get<1>(q_k_v);
const auto& v = std::get<2>(q_k_v);
#ifndef NDEBUG
debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "q: " << q << std::endl;
std::cerr << "k: " << k << std::endl;
std::cerr << "v: " << v << std::endl;
#endif
// shape: [B, num_head, T, T]
auto qkt = bmm_nt(q, k);
// q & k are dead but cannot be freed because they were packed with v
#ifndef NDEBUG
debug_assert_shape(__LINE__, qkt, {B, num_head, T, T});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, T]
// TODO: long-term, have a kernel that works with
// NestedTensor directly if there is no mask passed
qkt = masked_softmax(qkt, mask, query, mask_type);
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt after softmax: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, dim_per_head]
// reuse storage for q; we're done with it
auto attn_ctx = bmm_nn(q, qkt, v);
// qkv is not dead; we just reused storage for q!
if (!need_weights) {
qkt = Tensor();
}
#ifndef NDEBUG
debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "attn_ctx: " << attn_ctx << std::endl;
#endif
// shape: [B, T, D]
// Fuse transform_0213 inside
auto proj = transform0213_gemm_nt_bias(
attn_ctx, proj_weight, proj_bias, query);
#ifndef NDEBUG
debug_assert_shape(__LINE__, proj, {B, T, D});
#endif
if (need_weights && average_attn_weights) {
// weights are not needed for full transformer, so don't worry too
// much about performance -- we implement this just to make use
// cases that don't disable need_weights still get some speedup.
qkt = qkt.sum(1);
qkt /= num_head;
}
return std::make_tuple(std::move(proj), std::move(qkt));
}
std::tuple<Tensor, Tensor> flash_attention_helper_dense_unpacked(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool need_atten_weights,
bool is_causal) {
// Query (Batch x Num_heads x Q_seq_len x Dim_per_head)
// Key (Batch x Num_heads x KV_seq_len x Dim_per_head)
// Value (Batch x Num_heads x KV_seq_len x Dim_per_head)
const int64_t batch_size = query.size(0);
const int64_t num_heads = query.size(1);
const int64_t max_seqlen_batch_q = query.size(2);
const int64_t head_dim = query.size(3);
const int64_t max_seqlen_batch_k = key.size(2);
const int64_t max_seqlen_batch_v = value.size(2);
TORCH_CHECK(
max_seqlen_batch_k == max_seqlen_batch_v,
"Key and Value must have the same sequence length");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor cumulative_sequence_length_q = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_q,
max_seqlen_batch_q,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
Tensor cumulative_sequence_length_k = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_k,
max_seqlen_batch_k,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
int64_t Nnz_q{batch_size * max_seqlen_batch_q};
int64_t Nnz_kv{batch_size * max_seqlen_batch_k};
// For the standard MHA these will actually be views
Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim});
Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor attention =
at::_flash_scaled_dot_product_attention(
query_reshaped,
key_reshaped,
value_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
is_causal);
// Reshape output to convert nnz to batch_size and seq_len
attention =
attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2);
return std::tuple<Tensor, Tensor>(attention, Tensor());
}
std::tuple<Tensor, Tensor> mem_eff_helper(
const Tensor& query,
const Tensor& key,
const Tensor& value){
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor attention = std::get<0>(at::_efficient_attention_forward(
q_t,
k_t,
v_t,
c10::nullopt,
c10::nullopt,
c10::nullopt,
false,
false)).transpose(1,2);
return std::make_tuple(attention, Tensor());
}
std::tuple<Tensor, Tensor> _scaled_dot_product_attention_forward_cuda(
const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool need_attn_weights, bool is_causal) {
// Determine which efficient kernel to use
sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, need_attn_weights, is_causal};
auto backend = select_sdp_backend(kernel_params);
switch(backend){
case sdp::SDPBackend::flash_attention:
return flash_attention_helper_dense_unpacked(query_, key, value, dropout_p, need_attn_weights, is_causal);
case sdp::SDPBackend::efficient_attention:
return mem_eff_helper(query_, key , value);
case sdp::SDPBackend::math:
return at::_scaled_dot_product_attention_math(query_, key, value, attn_mask_, dropout_p, need_attn_weights, is_causal);
default:
TORCH_CHECK(false, "No viable backend for scaled_dot_product_attention was found.");
return std::make_tuple(Tensor(), Tensor());
}
}
Tensor flash_scaled_dot_product_attention(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool is_causal) {
#if defined(USE_FLASH_ATTENTION)
auto softmax_scale = std::pow(query.size(-1), -0.5);
std::vector<Tensor> output = fmha::mha_fwd(
query,
key,
value,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
softmax_scale,
false,
is_causal,
false,
c10::nullopt);
return output[0];
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return Tensor();
}
std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward(
const at::Tensor& query, // [b, seqlen, num_heads, K]
const at::Tensor& key, // [b, seqlen, num_heads, K]
const at::Tensor& value, // [b, seqlen, num_heads, Kv]
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_q,
// (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_k,
// (Mode 1MHK only) Maximum sequence length across batches
const c10::optional<int64_t> max_seqlen_q_,
bool compute_logsumexp,
bool causal) {
#if defined(USE_FLASH_ATTENTION)
// TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a
// machine that is >= 5.0. In practice, this is not a problem but since
// this would avoid runtime architecture checks, we should look into it
TORCH_CHECK(query.dim() == 4);
TORCH_CHECK(key.dim() == 4);
TORCH_CHECK(value.dim() == 4);
// Batch sizes
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// Sequence length
TORCH_CHECK(key.size(1) == value.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
int64_t max_seqlen_q = 0, max_seqlen_k=0;
TORCH_CHECK(cu_seqlens_q.has_value() == cu_seqlens_k.has_value());
if (cu_seqlens_q.has_value()) {
TORCH_CHECK(cu_seqlens_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_q->dim() == 1 && cu_seqlens_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_k));
TORCH_CHECK(cu_seqlens_q->size(0) == cu_seqlens_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q_.has_value());
max_seqlen_q = *max_seqlen_q_;
max_seqlen_k = 0; // Will be set inside the kernel
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
at::cuda::CUDAGuard device_guard(query.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t num_heads = query.size(-2);
int64_t K = query.size(-1);
int64_t Kv = value.size(-1);
at::Tensor res;
at::Tensor logsumexp;
auto launchKernel = [&](auto _k, int computeCapability) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
res = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
TypeTraits<typename Kernel::output_t>::atScalarType()));
// NOTE: Should be aligned (by padding) in case M is
// not a good number for loading during backward
constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE;
logsumexp = at::empty(
{B,
num_heads,
compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0},
query.options().dtype(at::ScalarType::Float));
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = compute_logsumexp
? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr()
: nullptr;
at::Tensor output_accum;
if (Kernel::kNeedsOutputAccumulatorBuffer) {
output_accum = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
TypeTraits<typename Kernel::output_accum_t>::atScalarType()));
p.output_accum_ptr =
(typename Kernel::output_accum_t*)output_accum.data_ptr();
} else {
p.output_accum_ptr = nullptr;
}
p.output_ptr = (typename Kernel::output_t*)res.data_ptr();
if (cu_seqlens_q.has_value()) {
p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr();
p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr();
}
#define ASSIGN_CHECK_OVERFLOW(A, B) \
{ \
A = B; \
TORCH_CHECK(B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \
}
p.num_heads = num_heads;
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = cu_seqlens_q.has_value() ? cu_seqlens_q->size(0) - 1 : B;
p.causal = causal;
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
constexpr auto kernel_fn = attention_kernel_batched<Kernel>;
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > 0xc000) {
TORCH_INTERNAL_ASSERT(
computeCapability >= 70,
"This kernel requires too much shared memory on this machine!");
AT_CUDA_CHECK(cudaFuncSetAttribute(
kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes));
}
Kernel::check_supported(p);
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p);
};
// Dispatch to the right kernel
DISPATCH_KERNEL(query, key, value, ([&]() {
launchKernel(Kernel{}, computeCapability);
}));
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(res, logsumexp);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{});
}
Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){
TORCH_CHECK(false, "This operator should be overridden in python before use");
return at::Tensor();
}
} // namespace native
} // namespace at
|
4da55dd12deb43e8af5cb8efc0536b984d0cb877.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <common_functions.h>
#include <hip/hip_runtime.h>
#include "helper_cuda.h"
#include "svm.h"
// for get_Q()
struct svm_node** d_x = NULL;
double *d_x_square = NULL;
signed char* d_y = NULL;
svm_node* g_x_space = NULL;
__constant__ __device__ struct svm_parameter d_svm_parameter;
__constant__ __device__ struct svm_node* d_x_space;
__constant__ __device__ struct svm_node* host_x_space;
// for CUDA_k_function()
svm_node** d_SV = NULL;
double* d_output;
__constant__ __device__ struct svm_parameter d_model_parameter;
__device__ double dot(const svm_node *px, const svm_node *py)
{
double sum = 0;
while(px->index != -1 && py->index != -1)
{
if(px->index == py->index)
{
sum += px->value * py->value;
++px;
++py;
}
else
{
if(px->index > py->index)
++py;
else
++px;
}
}
return sum;
}
__device__ static double powi(double base, int times)
{
double tmp = base, ret = 1.0;
for(int t=times; t>0; t/=2)
{
if(t%2==1) ret*=tmp;
tmp = tmp * tmp;
}
return ret;
}
__global__ void get_Q(struct svm_node** CUDA_x, signed char* CUDA_y, double* CUDA_x_square, int x, int starty, int problen, float* output)
{
int y = blockDim.x*blockIdx.x + threadIdx.x + starty;
if( y>=problen )
return;
const svm_node *px = d_x_space + (CUDA_x[x] - host_x_space);
const svm_node *py = d_x_space + (CUDA_x[y] - host_x_space);
double value = 0;
switch(d_svm_parameter.kernel_type)
{
case LINEAR:
value = dot(px,py);
break;
case POLY:
value = powi(d_svm_parameter.gamma*dot(px,py)+d_svm_parameter.coef0,d_svm_parameter.degree);
break;
case RBF:
value = exp(-d_svm_parameter.gamma*(CUDA_x_square[x]+CUDA_x_square[y]-2*dot(px,py)));
break;
case SIGMOID:
value = tanh(d_svm_parameter.gamma*dot(px,py)+d_svm_parameter.coef0);
break;
default:
break;
}
output[y-starty] = (float)(value*CUDA_y[x]*CUDA_y[y]);
}
static int has_init = 0;
void CUDA_init_model(const struct svm_node* x_space, int problen)
{
if( !has_init )
{
findCudaDevice(1, NULL);
has_init = 1;
size_t elements = 0;
const struct svm_node* pNode = x_space;
for( int i=0; i<problen; i++ )
{
while( pNode->index!=-1 )
{
elements++;
pNode++;
}
pNode++;
elements++;
}
checkCudaErrors(hipMalloc((void **)&g_x_space, sizeof(struct svm_node)*elements));
checkCudaErrors(hipMemcpy(g_x_space, x_space, sizeof(struct svm_node)*elements, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(d_x_space, &g_x_space, sizeof(g_x_space)));
checkCudaErrors(hipMemcpyToSymbol(host_x_space, &x_space, sizeof(x_space)));
}
}
void CUDA_uninit_model()
{
checkCudaErrors(hipFree(g_x_space));
checkCudaErrors(hipFree(d_output));
checkCudaErrors(hipFree(d_SV));
d_SV = NULL;
d_output = NULL;
}
void CUDA_init_SVC_Q(int problen, const struct svm_node** x, const signed char* y, double* x_square, const svm_parameter& svm_parameter)
{
checkCudaErrors(hipMalloc((void ***)&d_y, sizeof(signed char)*problen));
checkCudaErrors(hipMemcpy(d_y, y, sizeof(signed char)*problen, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void ***)&d_x_square, sizeof(double)*problen));
checkCudaErrors(hipMemcpy(d_x_square, x_square, sizeof(double)*problen, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void ***)&d_x, sizeof(struct svm_node*)*problen));
checkCudaErrors(hipMemcpy(d_x, x, sizeof(struct svm_node*)*problen, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(d_svm_parameter, &svm_parameter, sizeof(svm_parameter)));
}
void CUDA_uninit_SVC_Q()
{
checkCudaErrors(hipFree(d_y));
checkCudaErrors(hipFree(d_x_square));
checkCudaErrors(hipFree(d_x));
d_y = NULL;
d_x_square = NULL;
d_x = NULL;
}
void CUDA_get_Q(int x, int starty, int endy, float* output)
{
const int threadPerBlock=256;
const int blockPerGrid=(endy-starty+threadPerBlock-1)/threadPerBlock;
float* d_output;
checkCudaErrors(hipMalloc((void **)&d_output, sizeof(float)*(endy-starty)));
hipLaunchKernelGGL(( get_Q), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, d_x, d_y, d_x_square, x, starty, endy-starty, d_output);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(output+starty, d_output, sizeof(float)*(endy-starty), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_output));
}
__global__ void k_function(const svm_node* x, svm_node** SV, int modellen, double* d_output)
{
int y = blockDim.x*blockIdx.x + threadIdx.x;
if( y>=modellen )
return;
svm_node* px = d_x_space + (x-host_x_space);
const svm_node *py = d_x_space + (SV[y]-host_x_space);
switch(d_model_parameter.kernel_type)
{
case LINEAR:
d_output[y] = dot(px,py);
break;
case POLY:
d_output[y] = powi(d_model_parameter.gamma*dot(px,py)+d_model_parameter.coef0,d_model_parameter.degree);
break;
case SIGMOID:
d_output[y] = tanh(d_model_parameter.gamma*dot(px,py)+d_model_parameter.coef0);
break;
case RBF:
{
double sum = 0;
while(px->index != -1 && py->index !=-1)
{
if(px->index == py->index)
{
double d = px->value - py->value;
sum += d*d;
++px;
++py;
}
else
{
if(px->index > py->index)
{
sum += py->value * py->value;
++py;
}
else
{
sum += px->value * px->value;
++px;
}
}
}
while(px->index != -1)
{
sum += px->value * px->value;
++px;
}
while(py->index != -1)
{
sum += py->value * py->value;
++py;
}
d_output[y] = exp(-d_model_parameter.gamma*sum);
break;
}
}
}
void CUDA_k_function(svm_node** SV, int modellen, const svm_parameter& param, const svm_node *x, double* output)
{
const int threadPerBlock=256;
const int blockPerGrid=(modellen+threadPerBlock-1)/threadPerBlock;
if( d_SV==NULL )
{
checkCudaErrors(hipMemcpyToSymbol(d_model_parameter, ¶m, sizeof(param)));
checkCudaErrors(hipMalloc((void ***)&d_SV, sizeof(struct svm_node*)*modellen));
checkCudaErrors(hipMemcpy(d_SV, SV, sizeof(struct svm_node*)*modellen, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&d_output, sizeof(double)*modellen));
}
hipLaunchKernelGGL(( k_function), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, x, d_SV, modellen, d_output);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(output, d_output, sizeof(double)*modellen, hipMemcpyDeviceToHost));
}
void CUDA_k_function_cleanup()
{
checkCudaErrors(hipFree(d_output));
checkCudaErrors(hipFree(d_SV));
d_SV = NULL;
d_output = NULL;
}
| 4da55dd12deb43e8af5cb8efc0536b984d0cb877.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <common_functions.h>
#include <cuda_runtime.h>
#include "helper_cuda.h"
#include "svm.h"
// for get_Q()
struct svm_node** d_x = NULL;
double *d_x_square = NULL;
signed char* d_y = NULL;
svm_node* g_x_space = NULL;
__constant__ __device__ struct svm_parameter d_svm_parameter;
__constant__ __device__ struct svm_node* d_x_space;
__constant__ __device__ struct svm_node* host_x_space;
// for CUDA_k_function()
svm_node** d_SV = NULL;
double* d_output;
__constant__ __device__ struct svm_parameter d_model_parameter;
__device__ double dot(const svm_node *px, const svm_node *py)
{
double sum = 0;
while(px->index != -1 && py->index != -1)
{
if(px->index == py->index)
{
sum += px->value * py->value;
++px;
++py;
}
else
{
if(px->index > py->index)
++py;
else
++px;
}
}
return sum;
}
__device__ static double powi(double base, int times)
{
double tmp = base, ret = 1.0;
for(int t=times; t>0; t/=2)
{
if(t%2==1) ret*=tmp;
tmp = tmp * tmp;
}
return ret;
}
__global__ void get_Q(struct svm_node** CUDA_x, signed char* CUDA_y, double* CUDA_x_square, int x, int starty, int problen, float* output)
{
int y = blockDim.x*blockIdx.x + threadIdx.x + starty;
if( y>=problen )
return;
const svm_node *px = d_x_space + (CUDA_x[x] - host_x_space);
const svm_node *py = d_x_space + (CUDA_x[y] - host_x_space);
double value = 0;
switch(d_svm_parameter.kernel_type)
{
case LINEAR:
value = dot(px,py);
break;
case POLY:
value = powi(d_svm_parameter.gamma*dot(px,py)+d_svm_parameter.coef0,d_svm_parameter.degree);
break;
case RBF:
value = exp(-d_svm_parameter.gamma*(CUDA_x_square[x]+CUDA_x_square[y]-2*dot(px,py)));
break;
case SIGMOID:
value = tanh(d_svm_parameter.gamma*dot(px,py)+d_svm_parameter.coef0);
break;
default:
break;
}
output[y-starty] = (float)(value*CUDA_y[x]*CUDA_y[y]);
}
static int has_init = 0;
void CUDA_init_model(const struct svm_node* x_space, int problen)
{
if( !has_init )
{
findCudaDevice(1, NULL);
has_init = 1;
size_t elements = 0;
const struct svm_node* pNode = x_space;
for( int i=0; i<problen; i++ )
{
while( pNode->index!=-1 )
{
elements++;
pNode++;
}
pNode++;
elements++;
}
checkCudaErrors(cudaMalloc((void **)&g_x_space, sizeof(struct svm_node)*elements));
checkCudaErrors(cudaMemcpy(g_x_space, x_space, sizeof(struct svm_node)*elements, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(d_x_space, &g_x_space, sizeof(g_x_space)));
checkCudaErrors(cudaMemcpyToSymbol(host_x_space, &x_space, sizeof(x_space)));
}
}
void CUDA_uninit_model()
{
checkCudaErrors(cudaFree(g_x_space));
checkCudaErrors(cudaFree(d_output));
checkCudaErrors(cudaFree(d_SV));
d_SV = NULL;
d_output = NULL;
}
void CUDA_init_SVC_Q(int problen, const struct svm_node** x, const signed char* y, double* x_square, const svm_parameter& svm_parameter)
{
checkCudaErrors(cudaMalloc((void ***)&d_y, sizeof(signed char)*problen));
checkCudaErrors(cudaMemcpy(d_y, y, sizeof(signed char)*problen, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void ***)&d_x_square, sizeof(double)*problen));
checkCudaErrors(cudaMemcpy(d_x_square, x_square, sizeof(double)*problen, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void ***)&d_x, sizeof(struct svm_node*)*problen));
checkCudaErrors(cudaMemcpy(d_x, x, sizeof(struct svm_node*)*problen, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(d_svm_parameter, &svm_parameter, sizeof(svm_parameter)));
}
void CUDA_uninit_SVC_Q()
{
checkCudaErrors(cudaFree(d_y));
checkCudaErrors(cudaFree(d_x_square));
checkCudaErrors(cudaFree(d_x));
d_y = NULL;
d_x_square = NULL;
d_x = NULL;
}
void CUDA_get_Q(int x, int starty, int endy, float* output)
{
const int threadPerBlock=256;
const int blockPerGrid=(endy-starty+threadPerBlock-1)/threadPerBlock;
float* d_output;
checkCudaErrors(cudaMalloc((void **)&d_output, sizeof(float)*(endy-starty)));
get_Q<<<blockPerGrid, threadPerBlock>>>(d_x, d_y, d_x_square, x, starty, endy-starty, d_output);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(output+starty, d_output, sizeof(float)*(endy-starty), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_output));
}
__global__ void k_function(const svm_node* x, svm_node** SV, int modellen, double* d_output)
{
int y = blockDim.x*blockIdx.x + threadIdx.x;
if( y>=modellen )
return;
svm_node* px = d_x_space + (x-host_x_space);
const svm_node *py = d_x_space + (SV[y]-host_x_space);
switch(d_model_parameter.kernel_type)
{
case LINEAR:
d_output[y] = dot(px,py);
break;
case POLY:
d_output[y] = powi(d_model_parameter.gamma*dot(px,py)+d_model_parameter.coef0,d_model_parameter.degree);
break;
case SIGMOID:
d_output[y] = tanh(d_model_parameter.gamma*dot(px,py)+d_model_parameter.coef0);
break;
case RBF:
{
double sum = 0;
while(px->index != -1 && py->index !=-1)
{
if(px->index == py->index)
{
double d = px->value - py->value;
sum += d*d;
++px;
++py;
}
else
{
if(px->index > py->index)
{
sum += py->value * py->value;
++py;
}
else
{
sum += px->value * px->value;
++px;
}
}
}
while(px->index != -1)
{
sum += px->value * px->value;
++px;
}
while(py->index != -1)
{
sum += py->value * py->value;
++py;
}
d_output[y] = exp(-d_model_parameter.gamma*sum);
break;
}
}
}
void CUDA_k_function(svm_node** SV, int modellen, const svm_parameter& param, const svm_node *x, double* output)
{
const int threadPerBlock=256;
const int blockPerGrid=(modellen+threadPerBlock-1)/threadPerBlock;
if( d_SV==NULL )
{
checkCudaErrors(cudaMemcpyToSymbol(d_model_parameter, ¶m, sizeof(param)));
checkCudaErrors(cudaMalloc((void ***)&d_SV, sizeof(struct svm_node*)*modellen));
checkCudaErrors(cudaMemcpy(d_SV, SV, sizeof(struct svm_node*)*modellen, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&d_output, sizeof(double)*modellen));
}
k_function<<<blockPerGrid, threadPerBlock>>>(x, d_SV, modellen, d_output);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(output, d_output, sizeof(double)*modellen, cudaMemcpyDeviceToHost));
}
void CUDA_k_function_cleanup()
{
checkCudaErrors(cudaFree(d_output));
checkCudaErrors(cudaFree(d_SV));
d_SV = NULL;
d_output = NULL;
}
|
e0948f94d2280ca532c1d98cafc4012e4b8e9cb0.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=64 --gridDim=64 --no-inline
#include "hip/hip_runtime.h"
__device__ void baz (int p []){
int a;
p = &a;
}
__device__ void bar (int *p){
int a;
p = &a;
}
__global__ void foo (int* p, int* q){
__shared__ int sharedArr [100];
__shared__ int sharedArr2 [50];
bar(p);
baz (sharedArr);
bar(q);
if (*q){
baz(sharedArr2);
}
}
| e0948f94d2280ca532c1d98cafc4012e4b8e9cb0.cu | //pass
//--blockDim=64 --gridDim=64 --no-inline
#include "cuda.h"
__device__ void baz (int p []){
int a;
p = &a;
}
__device__ void bar (int *p){
int a;
p = &a;
}
__global__ void foo (int* p, int* q){
__shared__ int sharedArr [100];
__shared__ int sharedArr2 [50];
bar(p);
baz (sharedArr);
bar(q);
if (*q){
baz(sharedArr2);
}
}
|
deefe69fc579d460df108ca24c8f63665f9aa05f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/defines.h>
#include <backend.hpp>
#include <dispatch.hpp>
#include <Param.hpp>
#include <debug_cuda.hpp>
#include <math.hpp>
#include <convolve.hpp>
namespace cuda
{
namespace kernel
{
static const int THREADS_X = 16;
static const int THREADS_Y = 16;
// below shared MAX_*_LEN's are calculated based on
// a maximum shared memory configuration of 48KB per block
// considering complex types as well
static const int MAX_SCONV_FILTER_LEN = 31;
// we shall declare the maximum size required of above all three cases
// and re-use the same constant memory locations for every case
__constant__ char sFilter[2*THREADS_Y*(2*(MAX_SCONV_FILTER_LEN-1)+THREADS_X)*sizeof(double)];
template<typename T, typename accType, int conv_dim, bool expand, int fLen>
__global__
void convolve2_separable(Param<T> out, CParam<T> signal, int nBBS0, int nBBS1)
{
const int smem_len = (conv_dim==0 ?
(THREADS_X+2*(fLen-1))* THREADS_Y:
(THREADS_Y+2*(fLen-1))* THREADS_X);
__shared__ T shrdMem[smem_len];
const int radius = fLen-1;
const int padding = 2*radius;
const int s0 = signal.strides[0];
const int s1 = signal.strides[1];
const int d0 = signal.dims[0];
const int d1 = signal.dims[1];
const int shrdLen = THREADS_X + (conv_dim==0 ? padding : 0);
unsigned b2 = blockIdx.x/nBBS0;
unsigned b3 = blockIdx.y/nBBS1;
T *dst = (T *)out.ptr + (b2*out.strides[2] + b3*out.strides[3]);
const T *src = (const T *)signal.ptr + (b2*signal.strides[2] + b3*signal.strides[3]);
const accType *impulse = (const accType *)sFilter;
int lx = threadIdx.x;
int ly = threadIdx.y;
int ox = THREADS_X * (blockIdx.x-b2*nBBS0) + lx;
int oy = THREADS_Y * (blockIdx.y-b3*nBBS1) + ly;
int gx = ox;
int gy = oy;
// below if-else statement is based on template parameter
if (conv_dim==0) {
gx += (expand ? 0 : fLen>>1);
int endX = ((fLen-1)<<1) + THREADS_X;
#pragma unroll
for(int lx = threadIdx.x, glb_x = gx; lx<endX; lx += THREADS_X, glb_x += THREADS_X) {
int i = glb_x - radius;
int j = gy;
bool is_i = i>=0 && i<d0;
bool is_j = j>=0 && j<d1;
shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0));
}
} else if (conv_dim==1) {
gy += (expand ? 0 : fLen>>1);
int endY = ((fLen-1)<<1) + THREADS_Y;
#pragma unroll
for(int ly = threadIdx.y, glb_y = gy; ly<endY; ly += THREADS_Y, glb_y += THREADS_Y) {
int i = gx;
int j = glb_y - radius;
bool is_i = i>=0 && i<d0;
bool is_j = j>=0 && j<d1;
shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0));
}
}
__syncthreads();
if (ox<out.dims[0] && oy<out.dims[1]) {
// below conditional statement is based on template parameter
int i = (conv_dim==0 ? lx : ly) + radius;
accType accum = scalar<accType>(0);
#pragma unroll
for(int f=0; f<fLen; ++f) {
accType f_val = impulse[f];
// below conditional statement is based on template parameter
int s_idx = (conv_dim==0 ? (ly*shrdLen+(i-f)) : ((i-f)*shrdLen+lx));
T s_val = shrdMem[s_idx];
accum = accum + s_val*f_val;
}
dst[oy*out.strides[1]+ox] = (T)accum;
}
}
template<typename T, typename aT, int cDim, bool expand, int f>
void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, int nBBS0, int nBBS1)
{
CUDA_LAUNCH((convolve2_separable<T, aT, cDim, expand, f>), blks, thrds, out, sig, nBBS0, nBBS1);
}
template<typename T, typename accType, int conv_dim, bool expand>
void convolve2(Param<T> out, CParam<T> signal, CParam<accType> filter)
{
int fLen = filter.dims[0] * filter.dims[1] * filter.dims[2] * filter.dims[3];
if(fLen > kernel::MAX_SCONV_FILTER_LEN) {
// call upon fft
CUDA_NOT_SUPPORTED();
}
dim3 threads(THREADS_X, THREADS_Y);
int blk_x = divup(out.dims[0], threads.x);
int blk_y = divup(out.dims[1], threads.y);
dim3 blocks(blk_x*signal.dims[2], blk_y*signal.dims[3]);
// FIX ME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(hipMemcpyToSymbolAsync(kernel::sFilter, filter.ptr, fLen*sizeof(accType), 0,
hipMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId())));
switch(fLen) {
case 2: conv2Helper<T, accType, conv_dim, expand, 2>(blocks, threads, out, signal, blk_x, blk_y); break;
case 3: conv2Helper<T, accType, conv_dim, expand, 3>(blocks, threads, out, signal, blk_x, blk_y); break;
case 4: conv2Helper<T, accType, conv_dim, expand, 4>(blocks, threads, out, signal, blk_x, blk_y); break;
case 5: conv2Helper<T, accType, conv_dim, expand, 5>(blocks, threads, out, signal, blk_x, blk_y); break;
case 6: conv2Helper<T, accType, conv_dim, expand, 6>(blocks, threads, out, signal, blk_x, blk_y); break;
case 7: conv2Helper<T, accType, conv_dim, expand, 7>(blocks, threads, out, signal, blk_x, blk_y); break;
case 8: conv2Helper<T, accType, conv_dim, expand, 8>(blocks, threads, out, signal, blk_x, blk_y); break;
case 9: conv2Helper<T, accType, conv_dim, expand, 9>(blocks, threads, out, signal, blk_x, blk_y); break;
case 10: conv2Helper<T, accType, conv_dim, expand, 10>(blocks, threads, out, signal, blk_x, blk_y); break;
case 11: conv2Helper<T, accType, conv_dim, expand, 11>(blocks, threads, out, signal, blk_x, blk_y); break;
case 12: conv2Helper<T, accType, conv_dim, expand, 12>(blocks, threads, out, signal, blk_x, blk_y); break;
case 13: conv2Helper<T, accType, conv_dim, expand, 13>(blocks, threads, out, signal, blk_x, blk_y); break;
case 14: conv2Helper<T, accType, conv_dim, expand, 14>(blocks, threads, out, signal, blk_x, blk_y); break;
case 15: conv2Helper<T, accType, conv_dim, expand, 15>(blocks, threads, out, signal, blk_x, blk_y); break;
case 16: conv2Helper<T, accType, conv_dim, expand, 16>(blocks, threads, out, signal, blk_x, blk_y); break;
case 17: conv2Helper<T, accType, conv_dim, expand, 17>(blocks, threads, out, signal, blk_x, blk_y); break;
case 18: conv2Helper<T, accType, conv_dim, expand, 18>(blocks, threads, out, signal, blk_x, blk_y); break;
case 19: conv2Helper<T, accType, conv_dim, expand, 19>(blocks, threads, out, signal, blk_x, blk_y); break;
case 20: conv2Helper<T, accType, conv_dim, expand, 20>(blocks, threads, out, signal, blk_x, blk_y); break;
case 21: conv2Helper<T, accType, conv_dim, expand, 21>(blocks, threads, out, signal, blk_x, blk_y); break;
case 22: conv2Helper<T, accType, conv_dim, expand, 22>(blocks, threads, out, signal, blk_x, blk_y); break;
case 23: conv2Helper<T, accType, conv_dim, expand, 23>(blocks, threads, out, signal, blk_x, blk_y); break;
case 24: conv2Helper<T, accType, conv_dim, expand, 24>(blocks, threads, out, signal, blk_x, blk_y); break;
case 25: conv2Helper<T, accType, conv_dim, expand, 25>(blocks, threads, out, signal, blk_x, blk_y); break;
case 26: conv2Helper<T, accType, conv_dim, expand, 26>(blocks, threads, out, signal, blk_x, blk_y); break;
case 27: conv2Helper<T, accType, conv_dim, expand, 27>(blocks, threads, out, signal, blk_x, blk_y); break;
case 28: conv2Helper<T, accType, conv_dim, expand, 28>(blocks, threads, out, signal, blk_x, blk_y); break;
case 29: conv2Helper<T, accType, conv_dim, expand, 29>(blocks, threads, out, signal, blk_x, blk_y); break;
case 30: conv2Helper<T, accType, conv_dim, expand, 30>(blocks, threads, out, signal, blk_x, blk_y); break;
case 31: conv2Helper<T, accType, conv_dim, expand, 31>(blocks, threads, out, signal, blk_x, blk_y); break;
default: CUDA_NOT_SUPPORTED();
}
POST_LAUNCH_CHECK();
}
#define INSTANTIATE(T, accType) \
template void convolve2<T, accType, 0, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 0, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 1, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 1, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \
INSTANTIATE(cdouble, cdouble)
INSTANTIATE(cfloat , cfloat)
INSTANTIATE(double , double)
INSTANTIATE(float , float)
INSTANTIATE(uint , float)
INSTANTIATE(int , float)
INSTANTIATE(uchar , float)
INSTANTIATE(char , float)
INSTANTIATE(ushort , float)
INSTANTIATE(short , float)
}
}
| deefe69fc579d460df108ca24c8f63665f9aa05f.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/defines.h>
#include <backend.hpp>
#include <dispatch.hpp>
#include <Param.hpp>
#include <debug_cuda.hpp>
#include <math.hpp>
#include <convolve.hpp>
namespace cuda
{
namespace kernel
{
static const int THREADS_X = 16;
static const int THREADS_Y = 16;
// below shared MAX_*_LEN's are calculated based on
// a maximum shared memory configuration of 48KB per block
// considering complex types as well
static const int MAX_SCONV_FILTER_LEN = 31;
// we shall declare the maximum size required of above all three cases
// and re-use the same constant memory locations for every case
__constant__ char sFilter[2*THREADS_Y*(2*(MAX_SCONV_FILTER_LEN-1)+THREADS_X)*sizeof(double)];
template<typename T, typename accType, int conv_dim, bool expand, int fLen>
__global__
void convolve2_separable(Param<T> out, CParam<T> signal, int nBBS0, int nBBS1)
{
const int smem_len = (conv_dim==0 ?
(THREADS_X+2*(fLen-1))* THREADS_Y:
(THREADS_Y+2*(fLen-1))* THREADS_X);
__shared__ T shrdMem[smem_len];
const int radius = fLen-1;
const int padding = 2*radius;
const int s0 = signal.strides[0];
const int s1 = signal.strides[1];
const int d0 = signal.dims[0];
const int d1 = signal.dims[1];
const int shrdLen = THREADS_X + (conv_dim==0 ? padding : 0);
unsigned b2 = blockIdx.x/nBBS0;
unsigned b3 = blockIdx.y/nBBS1;
T *dst = (T *)out.ptr + (b2*out.strides[2] + b3*out.strides[3]);
const T *src = (const T *)signal.ptr + (b2*signal.strides[2] + b3*signal.strides[3]);
const accType *impulse = (const accType *)sFilter;
int lx = threadIdx.x;
int ly = threadIdx.y;
int ox = THREADS_X * (blockIdx.x-b2*nBBS0) + lx;
int oy = THREADS_Y * (blockIdx.y-b3*nBBS1) + ly;
int gx = ox;
int gy = oy;
// below if-else statement is based on template parameter
if (conv_dim==0) {
gx += (expand ? 0 : fLen>>1);
int endX = ((fLen-1)<<1) + THREADS_X;
#pragma unroll
for(int lx = threadIdx.x, glb_x = gx; lx<endX; lx += THREADS_X, glb_x += THREADS_X) {
int i = glb_x - radius;
int j = gy;
bool is_i = i>=0 && i<d0;
bool is_j = j>=0 && j<d1;
shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0));
}
} else if (conv_dim==1) {
gy += (expand ? 0 : fLen>>1);
int endY = ((fLen-1)<<1) + THREADS_Y;
#pragma unroll
for(int ly = threadIdx.y, glb_y = gy; ly<endY; ly += THREADS_Y, glb_y += THREADS_Y) {
int i = gx;
int j = glb_y - radius;
bool is_i = i>=0 && i<d0;
bool is_j = j>=0 && j<d1;
shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0));
}
}
__syncthreads();
if (ox<out.dims[0] && oy<out.dims[1]) {
// below conditional statement is based on template parameter
int i = (conv_dim==0 ? lx : ly) + radius;
accType accum = scalar<accType>(0);
#pragma unroll
for(int f=0; f<fLen; ++f) {
accType f_val = impulse[f];
// below conditional statement is based on template parameter
int s_idx = (conv_dim==0 ? (ly*shrdLen+(i-f)) : ((i-f)*shrdLen+lx));
T s_val = shrdMem[s_idx];
accum = accum + s_val*f_val;
}
dst[oy*out.strides[1]+ox] = (T)accum;
}
}
template<typename T, typename aT, int cDim, bool expand, int f>
void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, int nBBS0, int nBBS1)
{
CUDA_LAUNCH((convolve2_separable<T, aT, cDim, expand, f>), blks, thrds, out, sig, nBBS0, nBBS1);
}
template<typename T, typename accType, int conv_dim, bool expand>
void convolve2(Param<T> out, CParam<T> signal, CParam<accType> filter)
{
int fLen = filter.dims[0] * filter.dims[1] * filter.dims[2] * filter.dims[3];
if(fLen > kernel::MAX_SCONV_FILTER_LEN) {
// call upon fft
CUDA_NOT_SUPPORTED();
}
dim3 threads(THREADS_X, THREADS_Y);
int blk_x = divup(out.dims[0], threads.x);
int blk_y = divup(out.dims[1], threads.y);
dim3 blocks(blk_x*signal.dims[2], blk_y*signal.dims[3]);
// FIX ME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(cudaMemcpyToSymbolAsync(kernel::sFilter, filter.ptr, fLen*sizeof(accType), 0,
cudaMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId())));
switch(fLen) {
case 2: conv2Helper<T, accType, conv_dim, expand, 2>(blocks, threads, out, signal, blk_x, blk_y); break;
case 3: conv2Helper<T, accType, conv_dim, expand, 3>(blocks, threads, out, signal, blk_x, blk_y); break;
case 4: conv2Helper<T, accType, conv_dim, expand, 4>(blocks, threads, out, signal, blk_x, blk_y); break;
case 5: conv2Helper<T, accType, conv_dim, expand, 5>(blocks, threads, out, signal, blk_x, blk_y); break;
case 6: conv2Helper<T, accType, conv_dim, expand, 6>(blocks, threads, out, signal, blk_x, blk_y); break;
case 7: conv2Helper<T, accType, conv_dim, expand, 7>(blocks, threads, out, signal, blk_x, blk_y); break;
case 8: conv2Helper<T, accType, conv_dim, expand, 8>(blocks, threads, out, signal, blk_x, blk_y); break;
case 9: conv2Helper<T, accType, conv_dim, expand, 9>(blocks, threads, out, signal, blk_x, blk_y); break;
case 10: conv2Helper<T, accType, conv_dim, expand, 10>(blocks, threads, out, signal, blk_x, blk_y); break;
case 11: conv2Helper<T, accType, conv_dim, expand, 11>(blocks, threads, out, signal, blk_x, blk_y); break;
case 12: conv2Helper<T, accType, conv_dim, expand, 12>(blocks, threads, out, signal, blk_x, blk_y); break;
case 13: conv2Helper<T, accType, conv_dim, expand, 13>(blocks, threads, out, signal, blk_x, blk_y); break;
case 14: conv2Helper<T, accType, conv_dim, expand, 14>(blocks, threads, out, signal, blk_x, blk_y); break;
case 15: conv2Helper<T, accType, conv_dim, expand, 15>(blocks, threads, out, signal, blk_x, blk_y); break;
case 16: conv2Helper<T, accType, conv_dim, expand, 16>(blocks, threads, out, signal, blk_x, blk_y); break;
case 17: conv2Helper<T, accType, conv_dim, expand, 17>(blocks, threads, out, signal, blk_x, blk_y); break;
case 18: conv2Helper<T, accType, conv_dim, expand, 18>(blocks, threads, out, signal, blk_x, blk_y); break;
case 19: conv2Helper<T, accType, conv_dim, expand, 19>(blocks, threads, out, signal, blk_x, blk_y); break;
case 20: conv2Helper<T, accType, conv_dim, expand, 20>(blocks, threads, out, signal, blk_x, blk_y); break;
case 21: conv2Helper<T, accType, conv_dim, expand, 21>(blocks, threads, out, signal, blk_x, blk_y); break;
case 22: conv2Helper<T, accType, conv_dim, expand, 22>(blocks, threads, out, signal, blk_x, blk_y); break;
case 23: conv2Helper<T, accType, conv_dim, expand, 23>(blocks, threads, out, signal, blk_x, blk_y); break;
case 24: conv2Helper<T, accType, conv_dim, expand, 24>(blocks, threads, out, signal, blk_x, blk_y); break;
case 25: conv2Helper<T, accType, conv_dim, expand, 25>(blocks, threads, out, signal, blk_x, blk_y); break;
case 26: conv2Helper<T, accType, conv_dim, expand, 26>(blocks, threads, out, signal, blk_x, blk_y); break;
case 27: conv2Helper<T, accType, conv_dim, expand, 27>(blocks, threads, out, signal, blk_x, blk_y); break;
case 28: conv2Helper<T, accType, conv_dim, expand, 28>(blocks, threads, out, signal, blk_x, blk_y); break;
case 29: conv2Helper<T, accType, conv_dim, expand, 29>(blocks, threads, out, signal, blk_x, blk_y); break;
case 30: conv2Helper<T, accType, conv_dim, expand, 30>(blocks, threads, out, signal, blk_x, blk_y); break;
case 31: conv2Helper<T, accType, conv_dim, expand, 31>(blocks, threads, out, signal, blk_x, blk_y); break;
default: CUDA_NOT_SUPPORTED();
}
POST_LAUNCH_CHECK();
}
#define INSTANTIATE(T, accType) \
template void convolve2<T, accType, 0, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 0, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 1, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 1, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \
INSTANTIATE(cdouble, cdouble)
INSTANTIATE(cfloat , cfloat)
INSTANTIATE(double , double)
INSTANTIATE(float , float)
INSTANTIATE(uint , float)
INSTANTIATE(int , float)
INSTANTIATE(uchar , float)
INSTANTIATE(char , float)
INSTANTIATE(ushort , float)
INSTANTIATE(short , float)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.