hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
61adbddee1c39eec107277b6dfb3340378268dd5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define BLOCK_SIZE 16
// Matrixes Multiplcation (Global Memory)
__global__ void multiply_gm(float *C,float *A,float *B, int nrow,int ncol)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index=idy*ncol+idx;
if (idy<nrow && idx<ncol){
float sum=0.0f;
for(int k=0;k<ncol;k++){
sum+=A[idy*ncol+k]*B[k*ncol+idx];
}
C[index] = sum;
}
}
int div_up(int a,int b){
return(a/b + (a%b == 0 ? 0:1));
}
int main(int argc, char* argv[]){
float *A_h,*B_h,*C_h; // Host matrixes
float *A_d,*B_d,*C_d; //Device matrixes
int nrow = atoi(argv[1]); // rows
int ncol = nrow; // cols
float N=nrow*ncol; // number of elements
//GPU Time
hipEvent_t start, stop;
float time;
size_t size=N * sizeof(float);
A_h = (float *)malloc(size);
B_h = (float *)malloc(size);
C_h = (float *)malloc(size);
//Initializing Host matrixes
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
A_h[i*ncol+j] = 1.0f;
B_h[i*ncol+j] = 2.0f;
//A_h[i*ncol+j] = rand()/100.0f;
//B_h[i*ncol+j] = rand()/100.0f;
}
}
/*
printf("\nMatrix A:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", A_h[i*ncol+j]);
}
printf("\n");
}
printf("\n\nMatrix B:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", B_h[i*ncol+j]);
}
printf("\n");
}
*/
hipMalloc((void **) &A_d,size);
hipMalloc((void **) &B_d,size);
hipMalloc((void **) &C_d,size);
// Host to Device transfer
hipMemcpy(A_d, A_h, size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, size, hipMemcpyHostToDevice);
//Realizamos el clculo en el Device
dim3 block_size(BLOCK_SIZE,BLOCK_SIZE);
dim3 n_blocks(div_up(ncol,block_size.x),div_up(nrow,block_size.y)) ;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipLaunchKernelGGL(( multiply_gm), dim3(n_blocks), dim3(block_size) , 0, 0, C_d,A_d,B_d,nrow,ncol);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Time : %f ms\n",time);
// Result from Device to Host
hipMemcpy(C_h, C_d, size,hipMemcpyDeviceToHost);
/*
//Results
printf("\n\nMatrix C:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", C_h[i*ncol+j]);
}
printf("\n");
}
*/
system("sleep 1");
free(A_h);
free(B_h);
free(C_h);
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
}
| 61adbddee1c39eec107277b6dfb3340378268dd5.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define BLOCK_SIZE 16
// Matrixes Multiplcation (Global Memory)
__global__ void multiply_gm(float *C,float *A,float *B, int nrow,int ncol)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index=idy*ncol+idx;
if (idy<nrow && idx<ncol){
float sum=0.0f;
for(int k=0;k<ncol;k++){
sum+=A[idy*ncol+k]*B[k*ncol+idx];
}
C[index] = sum;
}
}
int div_up(int a,int b){
return(a/b + (a%b == 0 ? 0:1));
}
int main(int argc, char* argv[]){
float *A_h,*B_h,*C_h; // Host matrixes
float *A_d,*B_d,*C_d; //Device matrixes
int nrow = atoi(argv[1]); // rows
int ncol = nrow; // cols
float N=nrow*ncol; // number of elements
//GPU Time
cudaEvent_t start, stop;
float time;
size_t size=N * sizeof(float);
A_h = (float *)malloc(size);
B_h = (float *)malloc(size);
C_h = (float *)malloc(size);
//Initializing Host matrixes
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
A_h[i*ncol+j] = 1.0f;
B_h[i*ncol+j] = 2.0f;
//A_h[i*ncol+j] = rand()/100.0f;
//B_h[i*ncol+j] = rand()/100.0f;
}
}
/*
printf("\nMatrix A:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", A_h[i*ncol+j]);
}
printf("\n");
}
printf("\n\nMatrix B:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", B_h[i*ncol+j]);
}
printf("\n");
}
*/
cudaMalloc((void **) &A_d,size);
cudaMalloc((void **) &B_d,size);
cudaMalloc((void **) &C_d,size);
// Host to Device transfer
cudaMemcpy(A_d, A_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, size, cudaMemcpyHostToDevice);
//Realizamos el cálculo en el Device
dim3 block_size(BLOCK_SIZE,BLOCK_SIZE);
dim3 n_blocks(div_up(ncol,block_size.x),div_up(nrow,block_size.y)) ;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
multiply_gm<<< n_blocks, block_size >>> (C_d,A_d,B_d,nrow,ncol);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Time : %f ms\n",time);
// Result from Device to Host
cudaMemcpy(C_h, C_d, size,cudaMemcpyDeviceToHost);
/*
//Results
printf("\n\nMatrix C:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", C_h[i*ncol+j]);
}
printf("\n");
}
*/
system("sleep 1");
free(A_h);
free(B_h);
free(C_h);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
ae9ee04324893b7b94edd406f0939b3fb18d6f77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void func(int *dataArr,int *x,int *ptr,int *y,int *index) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int val = 0;
for(int j = ptr[i]; j < ptr[i+1]; j++) {
val = val + dataArr[j]*x[index[j]];
}
y[i] = val;
}
int main() {
int m,n,i,j;
int *d_a,*d_x,*d_y,*d_ptr,*d_index;
printf("Enter value of m: ");
scanf("%d",&m);
printf("Enter value of n: ");
scanf("%d",&n);
int size = sizeof(int);
int a[m][n];
int x[n];
int y[n];
int ptr[100];
int ptrid = 0;
int index[100];
int indexid = 0;
int dataArr[100];
int temp,f;
printf("Enter input matrix: ");
for(i = 0; i<m; i++) {
f = 0;
for(j = 0; j<n; j++) {
scanf("%d",&a[i][j]);
if(a[i][j] != 0) {
if(f == 0)
temp = indexid;
f = 1;
index[indexid] = j;
dataArr[indexid] = a[i][j];
indexid++;
}
}
if(f == 1) {
ptr[ptrid] = temp;
ptrid++;
}
}
ptr[ptrid] = indexid;
ptrid++;
printf("Enter input vector: ");
for(i = 0; i<n; i++) {
scanf("%d",&x[i]);
}
hipMalloc((void **)&d_a,size*indexid);
hipMalloc((void **)&d_x,size*n);
hipMalloc((void **)&d_y,size*n);
hipMalloc((void **)&d_ptr,size*ptrid);
hipMalloc((void **)&d_index,size*indexid);
hipMemcpy(d_a,dataArr,size*indexid,hipMemcpyHostToDevice);
hipMemcpy(d_x,x,size*n,hipMemcpyHostToDevice);
hipMemcpy(d_ptr,ptr,size*ptrid,hipMemcpyHostToDevice);
hipMemcpy(d_index,index,size*indexid,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( func), dim3(1),dim3(n), 0, 0, d_a,d_x,d_ptr,d_y,d_index);
hipMemcpy(y,d_y,size*n,hipMemcpyDeviceToHost);
printf("Result vector is: \n");
for(j = 0; j < n; j++) {
printf("%d\t",y[j]);
}
printf("\n");
hipFree(d_a);
hipFree(d_x);
hipFree(d_y);
hipFree(d_ptr);
hipFree(d_index);
return 0;
} | ae9ee04324893b7b94edd406f0939b3fb18d6f77.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void func(int *dataArr,int *x,int *ptr,int *y,int *index) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int val = 0;
for(int j = ptr[i]; j < ptr[i+1]; j++) {
val = val + dataArr[j]*x[index[j]];
}
y[i] = val;
}
int main() {
int m,n,i,j;
int *d_a,*d_x,*d_y,*d_ptr,*d_index;
printf("Enter value of m: ");
scanf("%d",&m);
printf("Enter value of n: ");
scanf("%d",&n);
int size = sizeof(int);
int a[m][n];
int x[n];
int y[n];
int ptr[100];
int ptrid = 0;
int index[100];
int indexid = 0;
int dataArr[100];
int temp,f;
printf("Enter input matrix: ");
for(i = 0; i<m; i++) {
f = 0;
for(j = 0; j<n; j++) {
scanf("%d",&a[i][j]);
if(a[i][j] != 0) {
if(f == 0)
temp = indexid;
f = 1;
index[indexid] = j;
dataArr[indexid] = a[i][j];
indexid++;
}
}
if(f == 1) {
ptr[ptrid] = temp;
ptrid++;
}
}
ptr[ptrid] = indexid;
ptrid++;
printf("Enter input vector: ");
for(i = 0; i<n; i++) {
scanf("%d",&x[i]);
}
cudaMalloc((void **)&d_a,size*indexid);
cudaMalloc((void **)&d_x,size*n);
cudaMalloc((void **)&d_y,size*n);
cudaMalloc((void **)&d_ptr,size*ptrid);
cudaMalloc((void **)&d_index,size*indexid);
cudaMemcpy(d_a,dataArr,size*indexid,cudaMemcpyHostToDevice);
cudaMemcpy(d_x,x,size*n,cudaMemcpyHostToDevice);
cudaMemcpy(d_ptr,ptr,size*ptrid,cudaMemcpyHostToDevice);
cudaMemcpy(d_index,index,size*indexid,cudaMemcpyHostToDevice);
func<<<1,n>>>(d_a,d_x,d_ptr,d_y,d_index);
cudaMemcpy(y,d_y,size*n,cudaMemcpyDeviceToHost);
printf("Result vector is: \n");
for(j = 0; j < n; j++) {
printf("%d\t",y[j]);
}
printf("\n");
cudaFree(d_a);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_ptr);
cudaFree(d_index);
return 0;
} |
6a605de5c6ca685d30fe311968027e4ef3816ed9.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2017 LGPL, Inc. All Rights Reserved
* @author Chen Qian (chinahbcq@qq.com)
* @date 2017.04.22 14:32:13
* @author Fred Ware (fred.w.ware@gmail.com)
* @date 2019.08.24
* @brief modified work
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hiprand/hiprand.h>
#include "yuv2bgr.h"
__global__ void
cvtNV12_BGR(unsigned char *A,
unsigned char *B,
const int height,
const int width,
const int linesize)
{
int IDX = blockDim.x * blockIdx.x + threadIdx.x;
long len = width * height;
if (IDX < len) {
int j = IDX % width;
int i = (IDX - j) / width;
int bgr[3];
int yIdx, uvIdx, idx;
int y,u,v;
yIdx = i * linesize + j;
uvIdx = linesize * height + (i / 2) * linesize + j - j % 2;
y = A[yIdx];
u = A[uvIdx];
v = A[uvIdx + 1];
bgr[0] = y + 1.772 * (u-128);
bgr[1] = y - 0.34414 * (u -128) - 0.71414 * (v-128);
bgr[2] = y + 1.402 * (v - 128);
for (int k = 0; k < 3; k++) {
idx = (i * width + j) * 3 + k;
if (bgr[k] >=0 && bgr[k] < 255) {
B[idx] = bgr[k];
} else {
B[idx] = bgr[k] < 0 ? 0 : 255;
}
}
}
}
int cvtColor(
unsigned char *d_req,
unsigned char *d_res,
int resolution,
int height,
int width,
int linesize) {
int threadsPerBlock = 256;
int blocksPerGrid =(resolution + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( cvtNV12_BGR), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_req, d_res, height, width, linesize);
return 0;
}
| 6a605de5c6ca685d30fe311968027e4ef3816ed9.cu | /**
* Copyright (c) 2017 LGPL, Inc. All Rights Reserved
* @author Chen Qian (chinahbcq@qq.com)
* @date 2017.04.22 14:32:13
* @author Fred Ware (fred.w.ware@gmail.com)
* @date 2019.08.24
* @brief modified work
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
#include <curand.h>
#include "yuv2bgr.h"
__global__ void
cvtNV12_BGR(unsigned char *A,
unsigned char *B,
const int height,
const int width,
const int linesize)
{
int IDX = blockDim.x * blockIdx.x + threadIdx.x;
long len = width * height;
if (IDX < len) {
int j = IDX % width;
int i = (IDX - j) / width;
int bgr[3];
int yIdx, uvIdx, idx;
int y,u,v;
yIdx = i * linesize + j;
uvIdx = linesize * height + (i / 2) * linesize + j - j % 2;
y = A[yIdx];
u = A[uvIdx];
v = A[uvIdx + 1];
bgr[0] = y + 1.772 * (u-128);
bgr[1] = y - 0.34414 * (u -128) - 0.71414 * (v-128);
bgr[2] = y + 1.402 * (v - 128);
for (int k = 0; k < 3; k++) {
idx = (i * width + j) * 3 + k;
if (bgr[k] >=0 && bgr[k] < 255) {
B[idx] = bgr[k];
} else {
B[idx] = bgr[k] < 0 ? 0 : 255;
}
}
}
}
int cvtColor(
unsigned char *d_req,
unsigned char *d_res,
int resolution,
int height,
int width,
int linesize) {
int threadsPerBlock = 256;
int blocksPerGrid =(resolution + threadsPerBlock - 1) / threadsPerBlock;
cvtNV12_BGR<<<blocksPerGrid, threadsPerBlock>>>(d_req, d_res, height, width, linesize);
return 0;
}
|
875007edd02df39b9e3f203f395d67ecd2cb7007.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "src/cuda/warp_perspective/common.h"
#include <cstdio>
#include "megdnn/dtype.h"
#include "src/common/rounding_converter.cuh"
#include "src/cuda/error_info.cuh"
#include "src/cuda/integer_subbyte_utils.cuh"
#include "src/cuda/kernel_common/diagnostic_prologue.cuh"
#include "src/cuda/utils.cuh"
#include "src/cuda/warp_perspective/common.cuh"
using namespace megdnn;
using namespace cuda;
using namespace warp_perspective;
using namespace integer_subbyte;
namespace {
template <typename ctype>
struct CtypeHelper;
template <>
struct CtypeHelper<float> {
static constexpr int bit_width = 32;
};
template <>
struct CtypeHelper<dt_float16> {
static constexpr int bit_width = 16;
};
template <>
struct CtypeHelper<dt_uint8> {
static constexpr int bit_width = 8;
};
template <>
struct CtypeHelper<dt_int8> {
static constexpr int bit_width = 8;
};
template <>
struct CtypeHelper<dt_qint4> {
static constexpr int bit_width = 4;
};
template <>
struct CtypeHelper<dt_quint4> {
static constexpr int bit_width = 4;
};
template <typename ctype>
struct DirectSrcVisitor {
const void* ptr;
const void** ptrs;
__device__ __forceinline__ const ctype* get(int batch, int im_size) {
return (ctype*)((char*)ptr + static_cast<int64_t>(batch) * static_cast<int64_t>(im_size) * CtypeHelper<ctype>::bit_width / 8);
}
__device__ __forceinline__ const ctype* get(int batch) {
return (ctype*)(ptrs[batch]);
}
void move_batch(size_t batch, size_t im_size) {
ptr = (char*)ptr + batch * im_size * CtypeHelper<ctype>::bit_width / 8;
}
};
template <typename ctype>
struct IndexedSrcVisitor {
const void* ptr;
const void** ptrs;
const int* idx;
int N_SRC;
AsyncErrorInfo* error_info;
void* error_tracker;
__device__ __forceinline__ const ctype* get(int batch, int im_size) {
int orig_batch = batch;
batch = idx[batch];
if (batch < 0 || batch >= N_SRC) {
set_async_error_info(
error_info, error_tracker,
"mat_idx out of bound: mat_idx[%d]=%d src_batch=%d", orig_batch,
batch, N_SRC);
batch = 0;
}
return (ctype*)((char*)ptr + static_cast<int64_t>(batch) * static_cast<int64_t>(im_size) * CtypeHelper<ctype>::bit_width / 8);
}
__device__ __forceinline__ const ctype* get(int batch) {
int orig_batch = batch;
batch = idx[batch];
if (batch < 0 || batch >= N_SRC) {
set_async_error_info(
error_info, error_tracker,
"mat_idx out of bound: mat_idx[%d]=%d src_batch=%d", orig_batch,
batch, N_SRC);
batch = 0;
}
return (ctype*)(ptrs[batch]);
}
void move_batch(size_t batch, size_t) { idx += batch; }
};
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter>
__global__ void kern_general_multi_src(
SrcVisitor srcs, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW) {
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = srcs.get(blockIdx.z);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
for (int c = 0; c < C; ++c) {
dst[oh * OW + ow] = output_converter(
sptr[ih0 * IW + iw0] * nalpha * nbeta +
sptr[ih0 * IW + iw1] * nalpha * pbeta +
sptr[ih1 * IW + iw0] * palpha * nbeta +
sptr[ih1 * IW + iw1] * palpha * pbeta);
sptr += IH * IW;
dst += OH * OW;
}
}
}
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter>
__global__ void kern_general(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW) {
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
for (int c = 0; c < C; ++c) {
dst[oh * OW + ow] = output_converter(
sptr[ih0 * IW + iw0] * nalpha * nbeta +
sptr[ih0 * IW + iw1] * nalpha * pbeta +
sptr[ih1 * IW + iw0] * palpha * nbeta +
sptr[ih1 * IW + iw1] * palpha * pbeta);
sptr += IH * IW;
dst += OH * OW;
}
}
}
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter>
__global__ void kern_general_nchw4(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW) {
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
int o_coor = (oh * OW + ow) << 2;
int i_coor_00 = (ih0 * IW + iw0) << 2;
int i_coor_01 = (ih0 * IW + iw1) << 2;
int i_coor_10 = (ih1 * IW + iw0) << 2;
int i_coor_11 = (ih1 * IW + iw1) << 2;
for (int c0 = 0, nr_chan = C / 4; c0 < nr_chan; ++c0) {
#pragma unroll
for (int c1 = 0; c1 < 4; ++c1) {
dst[o_coor + c1] = output_converter(
sptr[i_coor_00 + c1] * nalpha * nbeta +
sptr[i_coor_01 + c1] * nalpha * pbeta +
sptr[i_coor_10 + c1] * palpha * nbeta +
sptr[i_coor_11 + c1] * palpha * pbeta);
}
sptr += IH * IW * 4;
dst += OH * OW * 4;
}
}
}
template <bool signedness, typename OutputConverter>
MEGDNN_DEVICE __forceinline__ int pack_output_func(
OutputConverter& output_converter, int (&s00)[8], int (&s01)[8], int (&s10)[8],
int (&s11)[8], float w00, float w01, float w10, float w11) {
#define warp_perspective_transform(idx) \
static_cast<int>( \
output_converter( \
s00[idx] * w00 + s01[idx] * w01 + s10[idx] * w10 + s11[idx] * w11) \
.as_storage())
return transform_int8_to_b4x8<signedness>(
warp_perspective_transform(0), warp_perspective_transform(1),
warp_perspective_transform(2), warp_perspective_transform(3),
warp_perspective_transform(4), warp_perspective_transform(5),
warp_perspective_transform(6), warp_perspective_transform(7));
#undef warp_perspective_transform
}
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter>
__global__ void kern_general_nchw64(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW) {
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int c1 = ow % 2;
ow = ow / 2;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW / 2;
mat += blockIdx.z * 3 * 3;
const int4* sptr_int4 = reinterpret_cast<const int4*>(sptr);
int4* dst_int4 = reinterpret_cast<int4*>(dst);
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
int o_coor = (oh * OW + ow) << 1;
int i_coor_00 = (ih0 * IW + iw0) << 1;
int i_coor_01 = (ih0 * IW + iw1) << 1;
int i_coor_10 = (ih1 * IW + iw0) << 1;
int i_coor_11 = (ih1 * IW + iw1) << 1;
int s00[8], s01[8], s10[8], s11[8];
int4 s[4], d;
for (int c0 = 0, nr_chan = C / 64; c0 < nr_chan; ++c0) {
s[0] = __ldg(sptr_int4 + i_coor_00 + c1);
s[1] = __ldg(sptr_int4 + i_coor_01 + c1);
s[2] = __ldg(sptr_int4 + i_coor_10 + c1);
s[3] = __ldg(sptr_int4 + i_coor_11 + c1);
transform_b4x8_to_int8<signedness>(s00, s[0].x);
transform_b4x8_to_int8<signedness>(s01, s[1].x);
transform_b4x8_to_int8<signedness>(s10, s[2].x);
transform_b4x8_to_int8<signedness>(s11, s[3].x);
d.x = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].y);
transform_b4x8_to_int8<signedness>(s01, s[1].y);
transform_b4x8_to_int8<signedness>(s10, s[2].y);
transform_b4x8_to_int8<signedness>(s11, s[3].y);
d.y = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].z);
transform_b4x8_to_int8<signedness>(s01, s[1].z);
transform_b4x8_to_int8<signedness>(s10, s[2].z);
transform_b4x8_to_int8<signedness>(s11, s[3].z);
d.z = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].w);
transform_b4x8_to_int8<signedness>(s01, s[1].w);
transform_b4x8_to_int8<signedness>(s10, s[2].w);
transform_b4x8_to_int8<signedness>(s11, s[3].w);
d.w = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
dst_int4[o_coor + c1] = d;
sptr_int4 += IH * IW * 2;
dst_int4 += OH * OW * 2;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border_multi_src(
SrcVisitor srcs, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, ctype bval) {
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = srcs.get(blockIdx.z);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
for (int c = 0; c < C; ++c) {
ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval);
ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval);
ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval);
ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval);
ctype val = output_converter(
v00 * nalpha * nbeta + v01 * nalpha * pbeta + v10 * palpha * nbeta +
v11 * palpha * pbeta);
dst[oh * OW + ow] = val;
sptr += IH * IW;
dst += OH * OW;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, ctype bval) {
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
for (int c = 0; c < C; ++c) {
ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval);
ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval);
ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval);
ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval);
ctype val = output_converter(
v00 * nalpha * nbeta + v01 * nalpha * pbeta + v10 * palpha * nbeta +
v11 * palpha * pbeta);
dst[oh * OW + ow] = val;
sptr += IH * IW;
dst += OH * OW;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border_nchw4(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, ctype bval) {
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
int i_coor_00 = (ih0 * IW + iw0) << 2;
int i_coor_01 = (ih0 * IW + iw1) << 2;
int i_coor_10 = (ih1 * IW + iw0) << 2;
int i_coor_11 = (ih1 * IW + iw1) << 2;
int o_coor = (oh * OW + ow) << 2;
for (int c0 = 0, nr_chan = C / 4; c0 < nr_chan; ++c0) {
#pragma unroll
for (int c1 = 0; c1 < 4; ++c1) {
ctype v00 = (okh0 && okw0 ? sptr[i_coor_00 + c1] : bval);
ctype v01 = (okh0 && okw1 ? sptr[i_coor_01 + c1] : bval);
ctype v10 = (okh1 && okw0 ? sptr[i_coor_10 + c1] : bval);
ctype v11 = (okh1 && okw1 ? sptr[i_coor_11 + c1] : bval);
ctype val = output_converter(
v00 * nalpha * nbeta + v01 * nalpha * pbeta +
v10 * palpha * nbeta + v11 * palpha * pbeta);
dst[o_coor + c1] = val;
}
sptr += IH * IW * 4;
dst += OH * OW * 4;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border_nchw64(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, ctype bval) {
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int c1 = ow % 2;
ow = ow / 2;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW / 2;
mat += blockIdx.z * 3 * 3;
const int4* sptr_int4 = reinterpret_cast<const int4*>(sptr);
int4* dst_int4 = reinterpret_cast<int4*>(dst);
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
int o_coor = (oh * OW + ow) << 1;
int i_coor_00 = (ih0 * IW + iw0) << 1;
int i_coor_01 = (ih0 * IW + iw1) << 1;
int i_coor_10 = (ih1 * IW + iw0) << 1;
int i_coor_11 = (ih1 * IW + iw1) << 1;
bool flag00 = okh0 && okw0, flag01 = okh0 && okw1, flag10 = okh1 && okw0,
flag11 = okh1 && okw1;
int8_t bval_4 = bval.as_storage() & 0xF;
int bval_8 = transform_int8_to_b4x8<signedness>(
bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4);
int4 bval_int4;
bval_int4.x = bval_8;
bval_int4.y = bval_8;
bval_int4.z = bval_8;
bval_int4.w = bval_8;
int s00[8], s01[8], s10[8], s11[8];
int4 s[4], d;
for (int c0 = 0, nr_chan = C / 64; c0 < nr_chan; ++c0) {
if (flag00) {
s[0] = __ldg(sptr_int4 + i_coor_00 + c1);
} else {
s[0] = bval_int4;
}
if (flag01) {
s[1] = __ldg(sptr_int4 + i_coor_01 + c1);
} else {
s[1] = bval_int4;
}
if (flag10) {
s[2] = __ldg(sptr_int4 + i_coor_10 + c1);
} else {
s[2] = bval_int4;
}
if (flag11) {
s[3] = __ldg(sptr_int4 + i_coor_11 + c1);
} else {
s[3] = bval_int4;
}
transform_b4x8_to_int8<signedness>(s00, s[0].x);
transform_b4x8_to_int8<signedness>(s01, s[1].x);
transform_b4x8_to_int8<signedness>(s10, s[2].x);
transform_b4x8_to_int8<signedness>(s11, s[3].x);
d.x = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].y);
transform_b4x8_to_int8<signedness>(s01, s[1].y);
transform_b4x8_to_int8<signedness>(s10, s[2].y);
transform_b4x8_to_int8<signedness>(s11, s[3].y);
d.y = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].z);
transform_b4x8_to_int8<signedness>(s01, s[1].z);
transform_b4x8_to_int8<signedness>(s10, s[2].z);
transform_b4x8_to_int8<signedness>(s11, s[3].z);
d.z = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].w);
transform_b4x8_to_int8<signedness>(s01, s[1].w);
transform_b4x8_to_int8<signedness>(s10, s[2].w);
transform_b4x8_to_int8<signedness>(s11, s[3].w);
d.w = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
dst_int4[o_coor + c1] = d;
sptr_int4 += IH * IW * 2;
dst_int4 += OH * OW * 2;
}
}
}
template <typename ctype, typename OutputConverter, int pack_c>
struct KernCoreNHWC {
MEGDNN_DEVICE __forceinline__ static void func(
char* dst_ptr, const char* src_ptr0, const char* src_ptr1,
const char* src_ptr2, const char* src_ptr3, const int offset, float w00,
float w01, float w10, float w11, OutputConverter& output_converter,
const bool src0_ok, const bool src1_ok, const bool src2_ok,
const bool src3_ok, const ctype bval) {
static_assert(pack_c == 1, "static_assert pack_c == 1");
ctype v00 = src0_ok ? *(ctype*)(src_ptr0 + offset) : bval;
ctype v01 = src1_ok ? *(ctype*)(src_ptr1 + offset) : bval;
ctype v10 = src2_ok ? *(ctype*)(src_ptr2 + offset) : bval;
ctype v11 = src3_ok ? *(ctype*)(src_ptr3 + offset) : bval;
ctype res = output_converter(v00 * w00 + v01 * w01 + v10 * w10 + v11 * w11);
*(ctype*)(dst_ptr + offset) = res;
}
};
template <typename ctype, typename OutputConverter>
struct KernCoreNHWC<ctype, OutputConverter, 8> {
MEGDNN_DEVICE __forceinline__ static void func(
char* dst_ptr, const char* src_ptr0, const char* src_ptr1,
const char* src_ptr2, const char* src_ptr3, const int offset, float w00,
float w01, float w10, float w11, OutputConverter& output_converter,
const bool src0_ok, const bool src1_ok, const bool src2_ok,
const bool src3_ok, const ctype bval) {
static_assert(
std::is_same<ctype, dt_quint4>::value ||
std::is_same<ctype, dt_qint4>::value,
"assert qu4 or q4");
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
int8_t bval_4 = bval.as_storage() & 0xF;
const int bval_int = transform_int8_to_b4x8<signedness>(
bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4);
int src_ori[4];
src_ori[0] = src0_ok ? *(int*)(src_ptr0 + offset) : bval_int;
src_ori[1] = src1_ok ? *(int*)(src_ptr1 + offset) : bval_int;
src_ori[2] = src2_ok ? *(int*)(src_ptr2 + offset) : bval_int;
src_ori[3] = src3_ok ? *(int*)(src_ptr3 + offset) : bval_int;
int src[4][8];
transform_b4x8_to_int8<signedness>(src[0], src_ori[0]);
transform_b4x8_to_int8<signedness>(src[1], src_ori[1]);
transform_b4x8_to_int8<signedness>(src[2], src_ori[2]);
transform_b4x8_to_int8<signedness>(src[3], src_ori[3]);
int res = pack_output_func<signedness>(
output_converter, src[0], src[1], src[2], src[3], w00, w01, w10, w11);
*(int*)(dst_ptr + offset) = res;
}
};
template <typename ctype, typename OutputConverter>
struct KernCoreNHWC<ctype, OutputConverter, 16> {
MEGDNN_DEVICE __forceinline__ static void func(
char* dst_ptr, const char* src_ptr0, const char* src_ptr1,
const char* src_ptr2, const char* src_ptr3, const int offset, float w00,
float w01, float w10, float w11, OutputConverter& output_converter,
const bool src0_ok, const bool src1_ok, const bool src2_ok,
const bool src3_ok, const ctype bval) {
static_assert(
std::is_same<ctype, dt_quint4>::value ||
std::is_same<ctype, dt_qint4>::value,
"assert qu4 or q4");
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
int8_t bval_4 = bval.as_storage() & 0xF;
const int bval_int_temp = transform_int8_to_b4x8<signedness>(
bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4);
const int2 bval_int{bval_int_temp, bval_int_temp};
int2 src_ori[4];
src_ori[0] = src0_ok ? *(int2*)(src_ptr0 + offset) : bval_int;
src_ori[1] = src1_ok ? *(int2*)(src_ptr1 + offset) : bval_int;
src_ori[2] = src2_ok ? *(int2*)(src_ptr2 + offset) : bval_int;
src_ori[3] = src3_ok ? *(int2*)(src_ptr3 + offset) : bval_int;
int src[8][8];
transform_b4x8_to_int8<signedness>(src[0], src_ori[0].x);
transform_b4x8_to_int8<signedness>(src[1], src_ori[1].x);
transform_b4x8_to_int8<signedness>(src[2], src_ori[2].x);
transform_b4x8_to_int8<signedness>(src[3], src_ori[3].x);
transform_b4x8_to_int8<signedness>(src[4], src_ori[0].y);
transform_b4x8_to_int8<signedness>(src[5], src_ori[1].y);
transform_b4x8_to_int8<signedness>(src[6], src_ori[2].y);
transform_b4x8_to_int8<signedness>(src[7], src_ori[3].y);
int2 res;
res.x = pack_output_func<signedness>(
output_converter, src[0], src[1], src[2], src[3], w00, w01, w10, w11);
res.y = pack_output_func<signedness>(
output_converter, src[4], src[5], src[6], src[7], w00, w01, w10, w11);
*(int2*)(dst_ptr + offset) = res;
}
};
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter,
int pack_c>
__global__ void kern_general_nhwc_multi_src(
SrcVisitor srcs, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW) {
Getter getter;
OutputConverter output_converter;
constexpr int bit_width = CtypeHelper<ctype>::bit_width;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = srcs.get(blockIdx.z);
dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8);
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
const char* src_ptr0 = (char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8;
const char* src_ptr1 = (char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8;
const char* src_ptr2 = (char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8;
const char* src_ptr3 = (char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8;
char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8;
for (int c = 0; c < C; c += pack_c) {
KernCoreNHWC<ctype, OutputConverter, pack_c>::func(
dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3, c * bit_width / 8,
w00, w01, w10, w11, output_converter, true, true, true, true,
(ctype)0);
}
}
}
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter,
int pack_c>
__global__ void kern_general_nhwc(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW) {
Getter getter;
OutputConverter output_converter;
constexpr int bit_width = CtypeHelper<ctype>::bit_width;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8);
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
const char* src_ptr0 = (char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8;
const char* src_ptr1 = (char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8;
const char* src_ptr2 = (char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8;
const char* src_ptr3 = (char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8;
char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8;
for (int c = 0; c < C; c += pack_c) {
KernCoreNHWC<ctype, OutputConverter, pack_c>::func(
dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3, c * bit_width / 8,
w00, w01, w10, w11, output_converter, true, true, true, true,
(ctype)0);
}
}
}
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter,
int pack_c>
__global__ void kern_general_nhwc_const_multi_src(
SrcVisitor srcs, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, ctype bval) {
Getter getter;
OutputConverter output_converter;
constexpr int bit_width = CtypeHelper<ctype>::bit_width;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = srcs.get(blockIdx.z);
dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8);
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
const char* src_ptr0 = (char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8;
const char* src_ptr1 = (char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8;
const char* src_ptr2 = (char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8;
const char* src_ptr3 = (char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8;
char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
bool src0_ok = okh0 && okw0;
bool src1_ok = okh0 && okw1;
bool src2_ok = okh1 && okw0;
bool src3_ok = okh1 && okw1;
for (int c = 0; c < C; c += pack_c) {
KernCoreNHWC<ctype, OutputConverter, pack_c>::func(
dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3, c * bit_width / 8,
w00, w01, w10, w11, output_converter, src0_ok, src1_ok, src2_ok,
src3_ok, bval);
}
}
}
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter,
int pack_c>
__global__ void kern_general_nhwc_const(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, ctype bval) {
Getter getter;
OutputConverter output_converter;
constexpr int bit_width = CtypeHelper<ctype>::bit_width;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8);
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
const char* src_ptr0 = (char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8;
const char* src_ptr1 = (char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8;
const char* src_ptr2 = (char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8;
const char* src_ptr3 = (char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8;
char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
bool src0_ok = okh0 && okw0;
bool src1_ok = okh0 && okw1;
bool src2_ok = okh1 && okw0;
bool src3_ok = okh1 && okw1;
for (int c = 0; c < C; c += pack_c) {
KernCoreNHWC<ctype, OutputConverter, pack_c>::func(
dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3, c * bit_width / 8,
w00, w01, w10, w11, output_converter, src0_ok, src1_ok, src2_ok,
src3_ok, bval);
}
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor_multi_src(
bool is_nhwc, SrcVisitor srcs, const float* mat, ctype* dst, int N, int C,
int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode,
hipStream_t stream) {
constexpr int pack_c = 1;
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
if (is_nhwc) { \
hipLaunchKernelGGL(( kern_general_nhwc_multi_src< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>, \
pack_c>), dim3(blocks), dim3(threads), 0, stream, \
srcs, mat, dst, C, IH, IW, OH, OW); \
} else { \
hipLaunchKernelGGL(( kern_general_multi_src< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>>) \
, dim3(blocks), dim3(threads), 0, stream, \
srcs, mat, dst, C, IH, IW, OH, OW); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (is_nhwc) {
hipLaunchKernelGGL(( kern_general_nhwc_const_multi_src<
ctype, ConstGetter, SrcVisitor,
rounding::RoundingConverter<ctype>, pack_c>)
, dim3(blocks), dim3(threads), 0, stream,
srcs, mat, dst, C, IH, IW, OH, OW, bval);
} else {
hipLaunchKernelGGL(( kern_const_border_multi_src<
ctype, SrcVisitor, rounding::RoundingConverter<ctype>>)
, dim3(blocks), dim3(threads), 0, stream,
srcs, mat, dst, C, IH, IW, OH, OW, bval);
}
break;
default:
break;
}
N -= curr_batch_size;
srcs.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor(
bool is_nhwc, SrcVisitor src, const float* mat, ctype* dst, int N, int C,
int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode,
hipStream_t stream) {
constexpr int pack_c = 1;
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
if (is_nhwc) { \
hipLaunchKernelGGL(( kern_general_nhwc< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>, \
pack_c>), dim3(blocks), dim3(threads), 0, stream, \
src, mat, dst, C, IH, IW, OH, OW); \
} else { \
hipLaunchKernelGGL(( kern_general< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>>) \
, dim3(blocks), dim3(threads), 0, stream, \
src, mat, dst, C, IH, IW, OH, OW); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (is_nhwc) {
hipLaunchKernelGGL(( kern_general_nhwc_const<
ctype, ConstGetter, SrcVisitor,
rounding::RoundingConverter<ctype>, pack_c>)
, dim3(blocks), dim3(threads), 0, stream,
src, mat, dst, C, IH, IW, OH, OW, bval);
} else {
hipLaunchKernelGGL(( kern_const_border<
ctype, SrcVisitor, rounding::RoundingConverter<ctype>>)
, dim3(blocks), dim3(threads), 0, stream,
src, mat, dst, C, IH, IW, OH, OW, bval);
}
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
template <typename ctype, typename SrcVisitor, int pack_c>
void dispatch_with_visitor_nhwc_bit4(
SrcVisitor src, const float* mat, ctype* dst, int N, int C, int IH, int IW,
int OH, int OW, ctype bval, BorderMode bmode, hipStream_t stream) {
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
hipLaunchKernelGGL(( kern_general_nhwc< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>, pack_c>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH, IW, OH, OW); \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
case BORDER_CONSTANT: {
hipLaunchKernelGGL(( kern_general_nhwc_const<
ctype, ConstGetter, SrcVisitor,
rounding::RoundingConverter<ctype>, pack_c>)
, dim3(blocks), dim3(threads), 0, stream,
src, mat, dst, C, IH, IW, OH, OW, bval);
} break;
default:
break;
}
#undef DISPATCH
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW / 2);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW / 2;
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor_nchw4(
SrcVisitor src, const float* mat, ctype* dst, int N, int C, int IH, int IW,
int OH, int OW, ctype bval, BorderMode bmode, hipStream_t stream) {
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
hipLaunchKernelGGL(( kern_general_nchw4< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH, IW, OH, OW); \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
hipLaunchKernelGGL(( kern_const_border_nchw4<
ctype, SrcVisitor, rounding::RoundingConverter<ctype>>)
, dim3(blocks), dim3(threads), 0, stream,
src, mat, dst, C, IH, IW, OH, OW, bval);
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor_nchw64(
SrcVisitor src, const float* mat, ctype* dst, int N, int C, int IH, int IW,
int OH, int OW, ctype bval, BorderMode bmode, hipStream_t stream) {
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
hipLaunchKernelGGL(( kern_general_nchw64< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH, IW, OH, OW); \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW * 2 + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
hipLaunchKernelGGL(( kern_const_border_nchw64<
ctype, SrcVisitor, rounding::RoundingConverter<ctype>>)
, dim3(blocks), dim3(threads), 0, stream,
src, mat, dst, C, IH, IW, OH, OW, bval);
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW / 2);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW / 2;
}
}
template <typename SrcType, typename DstType>
struct CudaTypeCvt;
template <>
struct CudaTypeCvt<dt_quint8, int8_t> {
CudaDTypeParamImpl<dt_quint8> m_src_param;
CudaTypeCvt(CudaDTypeParamImpl<dt_quint8> src_param) { m_src_param = src_param; };
inline __device__ int8_t operator()(uint8_t val) {
return val - m_src_param.zero_point;
}
};
template <>
struct CudaTypeCvt<dt_quint8, float> {
CudaDTypeParamImpl<dt_quint8> m_src_param;
CudaTypeCvt(CudaDTypeParamImpl<dt_quint8> src_param) { m_src_param = src_param; };
__device__ __forceinline__ float operator()(uint8_t val) {
return m_src_param.dequantize(dt_quint8(val));
}
};
#define INST(dst_ctype, vec_dst_type) \
template < \
typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nhw_nchw4( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype val_x = warp_out_converter( \
sptr[ih0 * IW + iw0] * nalpha * nbeta + \
sptr[ih0 * IW + iw1] * nalpha * pbeta + \
sptr[ih1 * IW + iw0] * palpha * nbeta + \
sptr[ih1 * IW + iw1] * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = result.z = result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nhw_nchw4( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int IH, int IW, int OH, int OW, src_ctype bval, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \
src_ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \
src_ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \
src_ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \
src_ctype val_x = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = result.z = result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template < \
typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_n3hw_nchw4( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype val_x = warp_out_converter( \
sptr[ih0 * IW + iw0] * nalpha * nbeta + \
sptr[ih0 * IW + iw1] * nalpha * pbeta + \
sptr[ih1 * IW + iw0] * palpha * nbeta + \
sptr[ih1 * IW + iw1] * palpha * pbeta); \
src_ctype val_y = warp_out_converter( \
sptr[IW * IH + ih0 * IW + iw0] * nalpha * nbeta + \
sptr[IW * IH + ih0 * IW + iw1] * nalpha * pbeta + \
sptr[IW * IH + ih1 * IW + iw0] * palpha * nbeta + \
sptr[IW * IH + ih1 * IW + iw1] * palpha * pbeta); \
src_ctype val_z = warp_out_converter( \
sptr[2 * IW * IH + ih0 * IW + iw0] * nalpha * nbeta + \
sptr[2 * IW * IH + ih0 * IW + iw1] * nalpha * pbeta + \
sptr[2 * IW * IH + ih1 * IW + iw0] * palpha * nbeta + \
sptr[2 * IW * IH + ih1 * IW + iw1] * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_n3hw_nchw4( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int IH, int IW, int OH, int OW, src_ctype bval, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype v00, v01, v10, v11; \
v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \
v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \
v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \
v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \
src_ctype val_x = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[IH * IW + ih0 * IW + iw0] : bval); \
v01 = (okh0 && okw1 ? sptr[IH * IW + ih0 * IW + iw1] : bval); \
v10 = (okh1 && okw0 ? sptr[IH * IW + ih1 * IW + iw0] : bval); \
v11 = (okh1 && okw1 ? sptr[IH * IW + ih1 * IW + iw1] : bval); \
src_ctype val_y = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[2 * IH * IW + ih0 * IW + iw0] : bval); \
v01 = (okh0 && okw1 ? sptr[2 * IH * IW + ih0 * IW + iw1] : bval); \
v10 = (okh1 && okw0 ? sptr[2 * IH * IW + ih1 * IW + iw0] : bval); \
v11 = (okh1 && okw1 ? sptr[2 * IH * IW + ih1 * IW + iw1] : bval); \
src_ctype val_z = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template < \
typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nhw3_nchw4( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype val_x = warp_out_converter( \
sptr[(ih0 * IW + iw0) * 3] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * 3] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * 3] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * 3] * palpha * pbeta); \
src_ctype val_y = warp_out_converter( \
sptr[(ih0 * IW + iw0) * 3 + 1] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * 3 + 1] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * 3 + 1] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * 3 + 1] * palpha * pbeta); \
src_ctype val_z = warp_out_converter( \
sptr[(ih0 * IW + iw0) * 3 + 2] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * 3 + 2] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * 3 + 2] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * 3 + 2] * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nhw3_nchw4( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int IH, int IW, int OH, int OW, src_ctype bval, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype v00, v01, v10, v11; \
v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3] : bval); \
v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3] : bval); \
v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3] : bval); \
v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3] : bval); \
src_ctype val_x = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3 + 1] : bval); \
v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3 + 1] : bval); \
v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3 + 1] : bval); \
v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3 + 1] : bval); \
src_ctype val_y = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3 + 2] : bval); \
v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3 + 2] : bval); \
v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3 + 2] : bval); \
v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3 + 2] : bval); \
src_ctype val_z = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
template <
typename src_dtype, typename src_ctype, typename dst_ctype, typename SrcVisitor>
void dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4(
bool is_nhwc, SrcVisitor src, const float* mat, dst_ctype* dst, int N, int C,
int IH, int IW, int OH, int OW, src_ctype bval,
CudaDTypeParamImpl<src_dtype> param, BorderMode bmode, hipStream_t stream) {
const int BY = 16, BX = 32;
CudaTypeCvt<src_dtype, dst_ctype> type_cvt(param);
#define DISPATCH(Getter) \
do { \
if (C == 1) { \
hipLaunchKernelGGL(( kern_general_quint8_nhw_nchw4<src_dtype, src_ctype, Getter, SrcVisitor>) \
, dim3(blocks), dim3(threads), 0, stream, \
src, mat, dst, IH, IW, OH, OW, type_cvt); \
} else if (is_nhwc) { \
hipLaunchKernelGGL(( kern_general_quint8_nhw3_nchw4<src_dtype, src_ctype, Getter, SrcVisitor>) \
, dim3(blocks), dim3(threads), 0, stream, \
src, mat, dst, IH, IW, OH, OW, type_cvt); \
} else { \
hipLaunchKernelGGL(( kern_general_quint8_n3hw_nchw4<src_dtype, src_ctype, Getter, SrcVisitor>) \
, dim3(blocks), dim3(threads), 0, stream, \
src, mat, dst, IH, IW, OH, OW, type_cvt); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (C == 1) {
hipLaunchKernelGGL(( kern_const_border_quint8_nhw_nchw4<src_dtype, src_ctype, SrcVisitor>)
, dim3(blocks), dim3(threads), 0, stream,
src, mat, dst, IH, IW, OH, OW, bval, type_cvt);
} else if (is_nhwc) {
hipLaunchKernelGGL(( kern_const_border_quint8_nhw3_nchw4<
src_dtype, src_ctype, SrcVisitor>)
, dim3(blocks), dim3(threads), 0, stream,
src, mat, dst, IH, IW, OH, OW, bval, type_cvt);
} else {
hipLaunchKernelGGL(( kern_const_border_quint8_n3hw_nchw4<
src_dtype, src_ctype, SrcVisitor>)
, dim3(blocks), dim3(threads), 0, stream,
src, mat, dst, IH, IW, OH, OW, bval, type_cvt);
}
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * 4 * OH * OW;
}
}
#define INST(dst_ctype) \
template < \
typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nchw( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int C, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype val = warp_out_converter( \
sptr[ih0 * IW + iw0] * nalpha * nbeta + \
sptr[ih0 * IW + iw1] * nalpha * pbeta + \
sptr[ih1 * IW + iw0] * palpha * nbeta + \
sptr[ih1 * IW + iw1] * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
sptr += IH * IW; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
#define INST(dst_ctype) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nchw( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int C, int IH, int IW, int OH, int OW, src_ctype bval, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \
src_ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \
src_ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \
src_ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \
src_ctype val = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
sptr += IH * IW; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
#define INST(dst_ctype) \
template < \
typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nhwc_nchw( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int C, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype val = warp_out_converter( \
sptr[(ih0 * IW + iw0) * C + c] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * C + c] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * C + c] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * C + c] * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
#define INST(dst_ctype) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nhwc_nchw( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int C, int IH, int IW, int OH, int OW, src_ctype bval, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype v00 = \
(okh0 && okw0 ? sptr[(ih0 * IW + iw0) * C + c] : bval); \
src_ctype v01 = \
(okh0 && okw1 ? sptr[(ih0 * IW + iw1) * C + c] : bval); \
src_ctype v10 = \
(okh1 && okw0 ? sptr[(ih1 * IW + iw0) * C + c] : bval); \
src_ctype v11 = \
(okh1 && okw1 ? sptr[(ih1 * IW + iw1) * C + c] : bval); \
float val = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
template <
typename src_dtype, typename src_ctype, typename dst_ctype, typename SrcVisitor>
void dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw(
bool is_nhwc, SrcVisitor src, const float* mat, dst_ctype* dst, int N, int C,
int IH, int IW, int OH, int OW, src_ctype bval,
CudaDTypeParamImpl<src_dtype> param, BorderMode bmode, hipStream_t stream) {
const int BY = 16, BX = 32;
CudaTypeCvt<src_dtype, dst_ctype> type_cvt(param);
#define DISPATCH(Getter) \
do { \
if (is_nhwc) { \
hipLaunchKernelGGL(( kern_general_quint8_nhwc_nchw<src_dtype, src_ctype, Getter, SrcVisitor>) \
, dim3(blocks), dim3(threads), 0, stream, \
src, mat, dst, C, IH, IW, OH, OW, type_cvt); \
} else { \
hipLaunchKernelGGL(( kern_general_quint8_nchw<src_dtype, src_ctype, Getter, SrcVisitor>) \
, dim3(blocks), dim3(threads), 0, stream, \
src, mat, dst, C, IH, IW, OH, OW, type_cvt); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (is_nhwc) {
hipLaunchKernelGGL(( kern_const_border_quint8_nhwc_nchw<src_dtype, src_ctype, SrcVisitor>)
, dim3(blocks), dim3(threads), 0, stream,
src, mat, dst, C, IH, IW, OH, OW, bval, type_cvt);
} else {
hipLaunchKernelGGL(( kern_const_border_quint8_nchw<src_dtype, src_ctype, SrcVisitor>)
, dim3(blocks), dim3(threads), 0, stream,
src, mat, dst, C, IH, IW, OH, OW, bval, type_cvt);
}
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace warp_perspective {
template <typename ctype>
void forward_proxy_multi_src(
bool is_nhwc, const ctype** srcs, const float* mat, const int* mat_idx,
ctype* dst, int N_SRC, int N_MAT, int C, int IH, int IW, int OH, int OW,
ctype bval, BorderMode bmode, megcore::AsyncErrorInfo* error_info,
void* error_tracker, hipStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptrs = reinterpret_cast<const void**>(srcs);
visitor.ptr = srcs;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_multi_src(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode,
stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptrs = reinterpret_cast<const void**>(srcs);
visitor.ptr = srcs;
dispatch_with_visitor_multi_src(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode,
stream);
}
after_kernel_launch();
}
template <typename ctype>
void forward_proxy(
bool is_nhwc, const ctype* src, const float* mat, const int* mat_idx,
ctype* dst, int N_SRC, int N_MAT, int C, int IH, int IW, int OH, int OW,
ctype bval, BorderMode bmode, megcore::AsyncErrorInfo* error_info,
void* error_tracker, hipStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode,
stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode,
stream);
}
after_kernel_launch();
}
template <typename ctype, int pack_c>
void forward_proxy_nhwc_bit4(
const ctype* src, const float* mat, const int* mat_idx, ctype* dst, int N_SRC,
int N_MAT, int C, int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info, void* error_tracker, hipStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_nhwc_bit4<ctype, IndexedSrcVisitor<ctype>, pack_c>(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_nhwc_bit4<ctype, DirectSrcVisitor<ctype>, pack_c>(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream);
}
after_kernel_launch();
}
template <typename ctype>
void forward_proxy_nchw4(
const ctype* src, const float* mat, const int* mat_idx, ctype* dst, int N_SRC,
int N_MAT, int C, int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info, void* error_tracker, hipStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_nchw4(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_nchw4(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream);
}
after_kernel_launch();
}
template <typename ctype>
void forward_proxy_nchw64(
const ctype* src, const float* mat, const int* mat_idx, ctype* dst, int N_SRC,
int N_MAT, int C, int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info, void* error_tracker, hipStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_nchw64(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_nchw64(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream);
}
after_kernel_launch();
}
#define INST(ctype) \
template void forward_proxy( \
bool, const ctype*, const float*, const int*, ctype*, int, int, int, int, \
int, int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \
hipStream_t);
INST(float)
INST(uint8_t)
#if !MEGDNN_DISABLE_FLOAT16
INST(dt_float16)
#endif
INST(int8_t)
#undef INST
#define INST(ctype) \
template void forward_proxy_multi_src( \
bool, const ctype**, const float*, const int*, ctype*, int, int, int, int, \
int, int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \
hipStream_t);
INST(float)
#if !MEGDNN_DISABLE_FLOAT16
INST(dt_float16)
#endif
#undef INST
#define INST(ctype) \
template void forward_proxy_nchw4( \
const ctype*, const float*, const int*, ctype*, int, int, int, int, int, \
int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \
hipStream_t);
INST(int8_t)
#undef INST
#define INST(ctype) \
template void forward_proxy_nchw64( \
const ctype*, const float*, const int*, ctype*, int, int, int, int, int, \
int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \
hipStream_t);
INST(dt_qint4)
INST(dt_quint4)
#undef INST
#define INST(ctype, pack_c) \
template void forward_proxy_nhwc_bit4<ctype, pack_c>( \
const ctype*, const float*, const int*, ctype*, int, int, int, int, int, \
int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \
hipStream_t);
INST(dt_qint4, 8)
INST(dt_quint4, 8)
INST(dt_qint4, 16)
INST(dt_quint4, 16)
#undef INST
template <typename src_dtype, typename src_ctype, typename dst_ctype>
void forward_proxy_quint8_dimshuffle_typecvt_nchw4(
bool is_nhwc, const src_ctype* src, const float* mat, const int* mat_idx,
dst_ctype* dst, int N_SRC, int N_MAT, int C, int IH, int IW, int OH, int OW,
src_ctype bval, DTypeParamImpl<src_dtype> param, BorderMode bmode,
megcore::AsyncErrorInfo* error_info, void* error_tracker, hipStream_t stream) {
CudaDTypeParamImpl<src_dtype> dtype_param(param);
if (mat_idx) {
IndexedSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, dtype_param,
bmode, stream);
} else {
DirectSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, dtype_param,
bmode, stream);
}
after_kernel_launch();
}
#define INST(src_dtype, src_ctype, dst_ctype) \
template void forward_proxy_quint8_dimshuffle_typecvt_nchw4( \
bool is_nhwc, const src_ctype*, const float*, const int*, dst_ctype*, int, \
int, int, int, int, int, int, src_ctype, DTypeParamImpl<src_dtype> param, \
BorderMode, megcore::AsyncErrorInfo*, void*, hipStream_t);
INST(dt_quint8, uint8_t, int8_t)
#undef INST
template <typename src_dtype, typename src_ctype, typename dst_ctype>
void forward_proxy_quint8_dimshuffle_typecvt_nchw(
bool is_nhwc, const src_ctype* src, const float* mat, const int* mat_idx,
dst_ctype* dst, int N_SRC, int N_MAT, int C, int IH, int IW, int OH, int OW,
src_ctype bval, DTypeParamImpl<src_dtype> param, BorderMode bmode,
megcore::AsyncErrorInfo* error_info, void* error_tracker, hipStream_t stream) {
CudaDTypeParamImpl<src_dtype> dtype_param(param);
if (mat_idx) {
IndexedSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, dtype_param,
bmode, stream);
} else {
DirectSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, dtype_param,
bmode, stream);
}
after_kernel_launch();
}
#define INST(src_dtype, src_ctype, dst_ctype) \
template void forward_proxy_quint8_dimshuffle_typecvt_nchw( \
bool is_nhwc, const src_ctype*, const float*, const int*, dst_ctype*, int, \
int, int, int, int, int, int, src_ctype, DTypeParamImpl<src_dtype> param, \
BorderMode, megcore::AsyncErrorInfo*, void*, hipStream_t);
INST(dt_quint8, uint8_t, float)
#undef INST
} // namespace warp_perspective
} // namespace cuda
} // namespace megdnn
#include "src/cuda/kernel_common/diagnostic_epilogue.cuh"
// vim: syntax=cpp.doxygen
| 875007edd02df39b9e3f203f395d67ecd2cb7007.cu | #include "src/cuda/warp_perspective/common.h"
#include <cstdio>
#include "megdnn/dtype.h"
#include "src/common/rounding_converter.cuh"
#include "src/cuda/error_info.cuh"
#include "src/cuda/integer_subbyte_utils.cuh"
#include "src/cuda/kernel_common/diagnostic_prologue.cuh"
#include "src/cuda/utils.cuh"
#include "src/cuda/warp_perspective/common.cuh"
using namespace megdnn;
using namespace cuda;
using namespace warp_perspective;
using namespace integer_subbyte;
namespace {
template <typename ctype>
struct CtypeHelper;
template <>
struct CtypeHelper<float> {
static constexpr int bit_width = 32;
};
template <>
struct CtypeHelper<dt_float16> {
static constexpr int bit_width = 16;
};
template <>
struct CtypeHelper<dt_uint8> {
static constexpr int bit_width = 8;
};
template <>
struct CtypeHelper<dt_int8> {
static constexpr int bit_width = 8;
};
template <>
struct CtypeHelper<dt_qint4> {
static constexpr int bit_width = 4;
};
template <>
struct CtypeHelper<dt_quint4> {
static constexpr int bit_width = 4;
};
template <typename ctype>
struct DirectSrcVisitor {
const void* ptr;
const void** ptrs;
__device__ __forceinline__ const ctype* get(int batch, int im_size) {
return (ctype*)((char*)ptr + static_cast<int64_t>(batch) * static_cast<int64_t>(im_size) * CtypeHelper<ctype>::bit_width / 8);
}
__device__ __forceinline__ const ctype* get(int batch) {
return (ctype*)(ptrs[batch]);
}
void move_batch(size_t batch, size_t im_size) {
ptr = (char*)ptr + batch * im_size * CtypeHelper<ctype>::bit_width / 8;
}
};
template <typename ctype>
struct IndexedSrcVisitor {
const void* ptr;
const void** ptrs;
const int* idx;
int N_SRC;
AsyncErrorInfo* error_info;
void* error_tracker;
__device__ __forceinline__ const ctype* get(int batch, int im_size) {
int orig_batch = batch;
batch = idx[batch];
if (batch < 0 || batch >= N_SRC) {
set_async_error_info(
error_info, error_tracker,
"mat_idx out of bound: mat_idx[%d]=%d src_batch=%d", orig_batch,
batch, N_SRC);
batch = 0;
}
return (ctype*)((char*)ptr + static_cast<int64_t>(batch) * static_cast<int64_t>(im_size) * CtypeHelper<ctype>::bit_width / 8);
}
__device__ __forceinline__ const ctype* get(int batch) {
int orig_batch = batch;
batch = idx[batch];
if (batch < 0 || batch >= N_SRC) {
set_async_error_info(
error_info, error_tracker,
"mat_idx out of bound: mat_idx[%d]=%d src_batch=%d", orig_batch,
batch, N_SRC);
batch = 0;
}
return (ctype*)(ptrs[batch]);
}
void move_batch(size_t batch, size_t) { idx += batch; }
};
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter>
__global__ void kern_general_multi_src(
SrcVisitor srcs, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW) {
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = srcs.get(blockIdx.z);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
for (int c = 0; c < C; ++c) {
dst[oh * OW + ow] = output_converter(
sptr[ih0 * IW + iw0] * nalpha * nbeta +
sptr[ih0 * IW + iw1] * nalpha * pbeta +
sptr[ih1 * IW + iw0] * palpha * nbeta +
sptr[ih1 * IW + iw1] * palpha * pbeta);
sptr += IH * IW;
dst += OH * OW;
}
}
}
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter>
__global__ void kern_general(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW) {
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
for (int c = 0; c < C; ++c) {
dst[oh * OW + ow] = output_converter(
sptr[ih0 * IW + iw0] * nalpha * nbeta +
sptr[ih0 * IW + iw1] * nalpha * pbeta +
sptr[ih1 * IW + iw0] * palpha * nbeta +
sptr[ih1 * IW + iw1] * palpha * pbeta);
sptr += IH * IW;
dst += OH * OW;
}
}
}
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter>
__global__ void kern_general_nchw4(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW) {
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
int o_coor = (oh * OW + ow) << 2;
int i_coor_00 = (ih0 * IW + iw0) << 2;
int i_coor_01 = (ih0 * IW + iw1) << 2;
int i_coor_10 = (ih1 * IW + iw0) << 2;
int i_coor_11 = (ih1 * IW + iw1) << 2;
for (int c0 = 0, nr_chan = C / 4; c0 < nr_chan; ++c0) {
#pragma unroll
for (int c1 = 0; c1 < 4; ++c1) {
dst[o_coor + c1] = output_converter(
sptr[i_coor_00 + c1] * nalpha * nbeta +
sptr[i_coor_01 + c1] * nalpha * pbeta +
sptr[i_coor_10 + c1] * palpha * nbeta +
sptr[i_coor_11 + c1] * palpha * pbeta);
}
sptr += IH * IW * 4;
dst += OH * OW * 4;
}
}
}
template <bool signedness, typename OutputConverter>
MEGDNN_DEVICE __forceinline__ int pack_output_func(
OutputConverter& output_converter, int (&s00)[8], int (&s01)[8], int (&s10)[8],
int (&s11)[8], float w00, float w01, float w10, float w11) {
#define warp_perspective_transform(idx) \
static_cast<int>( \
output_converter( \
s00[idx] * w00 + s01[idx] * w01 + s10[idx] * w10 + s11[idx] * w11) \
.as_storage())
return transform_int8_to_b4x8<signedness>(
warp_perspective_transform(0), warp_perspective_transform(1),
warp_perspective_transform(2), warp_perspective_transform(3),
warp_perspective_transform(4), warp_perspective_transform(5),
warp_perspective_transform(6), warp_perspective_transform(7));
#undef warp_perspective_transform
}
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter>
__global__ void kern_general_nchw64(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW) {
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int c1 = ow % 2;
ow = ow / 2;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW / 2;
mat += blockIdx.z * 3 * 3;
const int4* sptr_int4 = reinterpret_cast<const int4*>(sptr);
int4* dst_int4 = reinterpret_cast<int4*>(dst);
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
int o_coor = (oh * OW + ow) << 1;
int i_coor_00 = (ih0 * IW + iw0) << 1;
int i_coor_01 = (ih0 * IW + iw1) << 1;
int i_coor_10 = (ih1 * IW + iw0) << 1;
int i_coor_11 = (ih1 * IW + iw1) << 1;
int s00[8], s01[8], s10[8], s11[8];
int4 s[4], d;
for (int c0 = 0, nr_chan = C / 64; c0 < nr_chan; ++c0) {
s[0] = __ldg(sptr_int4 + i_coor_00 + c1);
s[1] = __ldg(sptr_int4 + i_coor_01 + c1);
s[2] = __ldg(sptr_int4 + i_coor_10 + c1);
s[3] = __ldg(sptr_int4 + i_coor_11 + c1);
transform_b4x8_to_int8<signedness>(s00, s[0].x);
transform_b4x8_to_int8<signedness>(s01, s[1].x);
transform_b4x8_to_int8<signedness>(s10, s[2].x);
transform_b4x8_to_int8<signedness>(s11, s[3].x);
d.x = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].y);
transform_b4x8_to_int8<signedness>(s01, s[1].y);
transform_b4x8_to_int8<signedness>(s10, s[2].y);
transform_b4x8_to_int8<signedness>(s11, s[3].y);
d.y = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].z);
transform_b4x8_to_int8<signedness>(s01, s[1].z);
transform_b4x8_to_int8<signedness>(s10, s[2].z);
transform_b4x8_to_int8<signedness>(s11, s[3].z);
d.z = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].w);
transform_b4x8_to_int8<signedness>(s01, s[1].w);
transform_b4x8_to_int8<signedness>(s10, s[2].w);
transform_b4x8_to_int8<signedness>(s11, s[3].w);
d.w = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
dst_int4[o_coor + c1] = d;
sptr_int4 += IH * IW * 2;
dst_int4 += OH * OW * 2;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border_multi_src(
SrcVisitor srcs, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, ctype bval) {
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = srcs.get(blockIdx.z);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
for (int c = 0; c < C; ++c) {
ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval);
ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval);
ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval);
ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval);
ctype val = output_converter(
v00 * nalpha * nbeta + v01 * nalpha * pbeta + v10 * palpha * nbeta +
v11 * palpha * pbeta);
dst[oh * OW + ow] = val;
sptr += IH * IW;
dst += OH * OW;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, ctype bval) {
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
for (int c = 0; c < C; ++c) {
ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval);
ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval);
ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval);
ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval);
ctype val = output_converter(
v00 * nalpha * nbeta + v01 * nalpha * pbeta + v10 * palpha * nbeta +
v11 * palpha * pbeta);
dst[oh * OW + ow] = val;
sptr += IH * IW;
dst += OH * OW;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border_nchw4(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, ctype bval) {
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
int i_coor_00 = (ih0 * IW + iw0) << 2;
int i_coor_01 = (ih0 * IW + iw1) << 2;
int i_coor_10 = (ih1 * IW + iw0) << 2;
int i_coor_11 = (ih1 * IW + iw1) << 2;
int o_coor = (oh * OW + ow) << 2;
for (int c0 = 0, nr_chan = C / 4; c0 < nr_chan; ++c0) {
#pragma unroll
for (int c1 = 0; c1 < 4; ++c1) {
ctype v00 = (okh0 && okw0 ? sptr[i_coor_00 + c1] : bval);
ctype v01 = (okh0 && okw1 ? sptr[i_coor_01 + c1] : bval);
ctype v10 = (okh1 && okw0 ? sptr[i_coor_10 + c1] : bval);
ctype v11 = (okh1 && okw1 ? sptr[i_coor_11 + c1] : bval);
ctype val = output_converter(
v00 * nalpha * nbeta + v01 * nalpha * pbeta +
v10 * palpha * nbeta + v11 * palpha * pbeta);
dst[o_coor + c1] = val;
}
sptr += IH * IW * 4;
dst += OH * OW * 4;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border_nchw64(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, ctype bval) {
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int c1 = ow % 2;
ow = ow / 2;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW / 2;
mat += blockIdx.z * 3 * 3;
const int4* sptr_int4 = reinterpret_cast<const int4*>(sptr);
int4* dst_int4 = reinterpret_cast<int4*>(dst);
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
int o_coor = (oh * OW + ow) << 1;
int i_coor_00 = (ih0 * IW + iw0) << 1;
int i_coor_01 = (ih0 * IW + iw1) << 1;
int i_coor_10 = (ih1 * IW + iw0) << 1;
int i_coor_11 = (ih1 * IW + iw1) << 1;
bool flag00 = okh0 && okw0, flag01 = okh0 && okw1, flag10 = okh1 && okw0,
flag11 = okh1 && okw1;
int8_t bval_4 = bval.as_storage() & 0xF;
int bval_8 = transform_int8_to_b4x8<signedness>(
bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4);
int4 bval_int4;
bval_int4.x = bval_8;
bval_int4.y = bval_8;
bval_int4.z = bval_8;
bval_int4.w = bval_8;
int s00[8], s01[8], s10[8], s11[8];
int4 s[4], d;
for (int c0 = 0, nr_chan = C / 64; c0 < nr_chan; ++c0) {
if (flag00) {
s[0] = __ldg(sptr_int4 + i_coor_00 + c1);
} else {
s[0] = bval_int4;
}
if (flag01) {
s[1] = __ldg(sptr_int4 + i_coor_01 + c1);
} else {
s[1] = bval_int4;
}
if (flag10) {
s[2] = __ldg(sptr_int4 + i_coor_10 + c1);
} else {
s[2] = bval_int4;
}
if (flag11) {
s[3] = __ldg(sptr_int4 + i_coor_11 + c1);
} else {
s[3] = bval_int4;
}
transform_b4x8_to_int8<signedness>(s00, s[0].x);
transform_b4x8_to_int8<signedness>(s01, s[1].x);
transform_b4x8_to_int8<signedness>(s10, s[2].x);
transform_b4x8_to_int8<signedness>(s11, s[3].x);
d.x = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].y);
transform_b4x8_to_int8<signedness>(s01, s[1].y);
transform_b4x8_to_int8<signedness>(s10, s[2].y);
transform_b4x8_to_int8<signedness>(s11, s[3].y);
d.y = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].z);
transform_b4x8_to_int8<signedness>(s01, s[1].z);
transform_b4x8_to_int8<signedness>(s10, s[2].z);
transform_b4x8_to_int8<signedness>(s11, s[3].z);
d.z = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].w);
transform_b4x8_to_int8<signedness>(s01, s[1].w);
transform_b4x8_to_int8<signedness>(s10, s[2].w);
transform_b4x8_to_int8<signedness>(s11, s[3].w);
d.w = pack_output_func<signedness>(
output_converter, s00, s01, s10, s11, w00, w01, w10, w11);
dst_int4[o_coor + c1] = d;
sptr_int4 += IH * IW * 2;
dst_int4 += OH * OW * 2;
}
}
}
template <typename ctype, typename OutputConverter, int pack_c>
struct KernCoreNHWC {
MEGDNN_DEVICE __forceinline__ static void func(
char* dst_ptr, const char* src_ptr0, const char* src_ptr1,
const char* src_ptr2, const char* src_ptr3, const int offset, float w00,
float w01, float w10, float w11, OutputConverter& output_converter,
const bool src0_ok, const bool src1_ok, const bool src2_ok,
const bool src3_ok, const ctype bval) {
static_assert(pack_c == 1, "static_assert pack_c == 1");
ctype v00 = src0_ok ? *(ctype*)(src_ptr0 + offset) : bval;
ctype v01 = src1_ok ? *(ctype*)(src_ptr1 + offset) : bval;
ctype v10 = src2_ok ? *(ctype*)(src_ptr2 + offset) : bval;
ctype v11 = src3_ok ? *(ctype*)(src_ptr3 + offset) : bval;
ctype res = output_converter(v00 * w00 + v01 * w01 + v10 * w10 + v11 * w11);
*(ctype*)(dst_ptr + offset) = res;
}
};
template <typename ctype, typename OutputConverter>
struct KernCoreNHWC<ctype, OutputConverter, 8> {
MEGDNN_DEVICE __forceinline__ static void func(
char* dst_ptr, const char* src_ptr0, const char* src_ptr1,
const char* src_ptr2, const char* src_ptr3, const int offset, float w00,
float w01, float w10, float w11, OutputConverter& output_converter,
const bool src0_ok, const bool src1_ok, const bool src2_ok,
const bool src3_ok, const ctype bval) {
static_assert(
std::is_same<ctype, dt_quint4>::value ||
std::is_same<ctype, dt_qint4>::value,
"assert qu4 or q4");
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
int8_t bval_4 = bval.as_storage() & 0xF;
const int bval_int = transform_int8_to_b4x8<signedness>(
bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4);
int src_ori[4];
src_ori[0] = src0_ok ? *(int*)(src_ptr0 + offset) : bval_int;
src_ori[1] = src1_ok ? *(int*)(src_ptr1 + offset) : bval_int;
src_ori[2] = src2_ok ? *(int*)(src_ptr2 + offset) : bval_int;
src_ori[3] = src3_ok ? *(int*)(src_ptr3 + offset) : bval_int;
int src[4][8];
transform_b4x8_to_int8<signedness>(src[0], src_ori[0]);
transform_b4x8_to_int8<signedness>(src[1], src_ori[1]);
transform_b4x8_to_int8<signedness>(src[2], src_ori[2]);
transform_b4x8_to_int8<signedness>(src[3], src_ori[3]);
int res = pack_output_func<signedness>(
output_converter, src[0], src[1], src[2], src[3], w00, w01, w10, w11);
*(int*)(dst_ptr + offset) = res;
}
};
template <typename ctype, typename OutputConverter>
struct KernCoreNHWC<ctype, OutputConverter, 16> {
MEGDNN_DEVICE __forceinline__ static void func(
char* dst_ptr, const char* src_ptr0, const char* src_ptr1,
const char* src_ptr2, const char* src_ptr3, const int offset, float w00,
float w01, float w10, float w11, OutputConverter& output_converter,
const bool src0_ok, const bool src1_ok, const bool src2_ok,
const bool src3_ok, const ctype bval) {
static_assert(
std::is_same<ctype, dt_quint4>::value ||
std::is_same<ctype, dt_qint4>::value,
"assert qu4 or q4");
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
int8_t bval_4 = bval.as_storage() & 0xF;
const int bval_int_temp = transform_int8_to_b4x8<signedness>(
bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4);
const int2 bval_int{bval_int_temp, bval_int_temp};
int2 src_ori[4];
src_ori[0] = src0_ok ? *(int2*)(src_ptr0 + offset) : bval_int;
src_ori[1] = src1_ok ? *(int2*)(src_ptr1 + offset) : bval_int;
src_ori[2] = src2_ok ? *(int2*)(src_ptr2 + offset) : bval_int;
src_ori[3] = src3_ok ? *(int2*)(src_ptr3 + offset) : bval_int;
int src[8][8];
transform_b4x8_to_int8<signedness>(src[0], src_ori[0].x);
transform_b4x8_to_int8<signedness>(src[1], src_ori[1].x);
transform_b4x8_to_int8<signedness>(src[2], src_ori[2].x);
transform_b4x8_to_int8<signedness>(src[3], src_ori[3].x);
transform_b4x8_to_int8<signedness>(src[4], src_ori[0].y);
transform_b4x8_to_int8<signedness>(src[5], src_ori[1].y);
transform_b4x8_to_int8<signedness>(src[6], src_ori[2].y);
transform_b4x8_to_int8<signedness>(src[7], src_ori[3].y);
int2 res;
res.x = pack_output_func<signedness>(
output_converter, src[0], src[1], src[2], src[3], w00, w01, w10, w11);
res.y = pack_output_func<signedness>(
output_converter, src[4], src[5], src[6], src[7], w00, w01, w10, w11);
*(int2*)(dst_ptr + offset) = res;
}
};
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter,
int pack_c>
__global__ void kern_general_nhwc_multi_src(
SrcVisitor srcs, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW) {
Getter getter;
OutputConverter output_converter;
constexpr int bit_width = CtypeHelper<ctype>::bit_width;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = srcs.get(blockIdx.z);
dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8);
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
const char* src_ptr0 = (char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8;
const char* src_ptr1 = (char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8;
const char* src_ptr2 = (char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8;
const char* src_ptr3 = (char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8;
char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8;
for (int c = 0; c < C; c += pack_c) {
KernCoreNHWC<ctype, OutputConverter, pack_c>::func(
dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3, c * bit_width / 8,
w00, w01, w10, w11, output_converter, true, true, true, true,
(ctype)0);
}
}
}
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter,
int pack_c>
__global__ void kern_general_nhwc(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW) {
Getter getter;
OutputConverter output_converter;
constexpr int bit_width = CtypeHelper<ctype>::bit_width;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8);
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
const char* src_ptr0 = (char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8;
const char* src_ptr1 = (char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8;
const char* src_ptr2 = (char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8;
const char* src_ptr3 = (char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8;
char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8;
for (int c = 0; c < C; c += pack_c) {
KernCoreNHWC<ctype, OutputConverter, pack_c>::func(
dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3, c * bit_width / 8,
w00, w01, w10, w11, output_converter, true, true, true, true,
(ctype)0);
}
}
}
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter,
int pack_c>
__global__ void kern_general_nhwc_const_multi_src(
SrcVisitor srcs, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, ctype bval) {
Getter getter;
OutputConverter output_converter;
constexpr int bit_width = CtypeHelper<ctype>::bit_width;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = srcs.get(blockIdx.z);
dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8);
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
const char* src_ptr0 = (char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8;
const char* src_ptr1 = (char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8;
const char* src_ptr2 = (char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8;
const char* src_ptr3 = (char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8;
char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
bool src0_ok = okh0 && okw0;
bool src1_ok = okh0 && okw1;
bool src2_ok = okh1 && okw0;
bool src3_ok = okh1 && okw1;
for (int c = 0; c < C; c += pack_c) {
KernCoreNHWC<ctype, OutputConverter, pack_c>::func(
dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3, c * bit_width / 8,
w00, w01, w10, w11, output_converter, src0_ok, src1_ok, src2_ok,
src3_ok, bval);
}
}
}
template <
typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter,
int pack_c>
__global__ void kern_general_nhwc_const(
SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, ctype bval) {
Getter getter;
OutputConverter output_converter;
constexpr int bit_width = CtypeHelper<ctype>::bit_width;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8);
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
const char* src_ptr0 = (char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8;
const char* src_ptr1 = (char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8;
const char* src_ptr2 = (char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8;
const char* src_ptr3 = (char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8;
char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
bool src0_ok = okh0 && okw0;
bool src1_ok = okh0 && okw1;
bool src2_ok = okh1 && okw0;
bool src3_ok = okh1 && okw1;
for (int c = 0; c < C; c += pack_c) {
KernCoreNHWC<ctype, OutputConverter, pack_c>::func(
dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3, c * bit_width / 8,
w00, w01, w10, w11, output_converter, src0_ok, src1_ok, src2_ok,
src3_ok, bval);
}
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor_multi_src(
bool is_nhwc, SrcVisitor srcs, const float* mat, ctype* dst, int N, int C,
int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode,
cudaStream_t stream) {
constexpr int pack_c = 1;
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
if (is_nhwc) { \
kern_general_nhwc_multi_src< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>, \
pack_c><<<blocks, threads, 0, stream>>>( \
srcs, mat, dst, C, IH, IW, OH, OW); \
} else { \
kern_general_multi_src< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>> \
<<<blocks, threads, 0, stream>>>( \
srcs, mat, dst, C, IH, IW, OH, OW); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (is_nhwc) {
kern_general_nhwc_const_multi_src<
ctype, ConstGetter, SrcVisitor,
rounding::RoundingConverter<ctype>, pack_c>
<<<blocks, threads, 0, stream>>>(
srcs, mat, dst, C, IH, IW, OH, OW, bval);
} else {
kern_const_border_multi_src<
ctype, SrcVisitor, rounding::RoundingConverter<ctype>>
<<<blocks, threads, 0, stream>>>(
srcs, mat, dst, C, IH, IW, OH, OW, bval);
}
break;
default:
break;
}
N -= curr_batch_size;
srcs.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor(
bool is_nhwc, SrcVisitor src, const float* mat, ctype* dst, int N, int C,
int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode,
cudaStream_t stream) {
constexpr int pack_c = 1;
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
if (is_nhwc) { \
kern_general_nhwc< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>, \
pack_c><<<blocks, threads, 0, stream>>>( \
src, mat, dst, C, IH, IW, OH, OW); \
} else { \
kern_general< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>> \
<<<blocks, threads, 0, stream>>>( \
src, mat, dst, C, IH, IW, OH, OW); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (is_nhwc) {
kern_general_nhwc_const<
ctype, ConstGetter, SrcVisitor,
rounding::RoundingConverter<ctype>, pack_c>
<<<blocks, threads, 0, stream>>>(
src, mat, dst, C, IH, IW, OH, OW, bval);
} else {
kern_const_border<
ctype, SrcVisitor, rounding::RoundingConverter<ctype>>
<<<blocks, threads, 0, stream>>>(
src, mat, dst, C, IH, IW, OH, OW, bval);
}
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
template <typename ctype, typename SrcVisitor, int pack_c>
void dispatch_with_visitor_nhwc_bit4(
SrcVisitor src, const float* mat, ctype* dst, int N, int C, int IH, int IW,
int OH, int OW, ctype bval, BorderMode bmode, cudaStream_t stream) {
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
kern_general_nhwc< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>, pack_c> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH, IW, OH, OW); \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
case BORDER_CONSTANT: {
kern_general_nhwc_const<
ctype, ConstGetter, SrcVisitor,
rounding::RoundingConverter<ctype>, pack_c>
<<<blocks, threads, 0, stream>>>(
src, mat, dst, C, IH, IW, OH, OW, bval);
} break;
default:
break;
}
#undef DISPATCH
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW / 2);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW / 2;
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor_nchw4(
SrcVisitor src, const float* mat, ctype* dst, int N, int C, int IH, int IW,
int OH, int OW, ctype bval, BorderMode bmode, cudaStream_t stream) {
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
kern_general_nchw4< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH, IW, OH, OW); \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
kern_const_border_nchw4<
ctype, SrcVisitor, rounding::RoundingConverter<ctype>>
<<<blocks, threads, 0, stream>>>(
src, mat, dst, C, IH, IW, OH, OW, bval);
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor_nchw64(
SrcVisitor src, const float* mat, ctype* dst, int N, int C, int IH, int IW,
int OH, int OW, ctype bval, BorderMode bmode, cudaStream_t stream) {
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
kern_general_nchw64< \
ctype, Getter, SrcVisitor, rounding::RoundingConverter<ctype>> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH, IW, OH, OW); \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW * 2 + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
kern_const_border_nchw64<
ctype, SrcVisitor, rounding::RoundingConverter<ctype>>
<<<blocks, threads, 0, stream>>>(
src, mat, dst, C, IH, IW, OH, OW, bval);
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW / 2);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW / 2;
}
}
template <typename SrcType, typename DstType>
struct CudaTypeCvt;
template <>
struct CudaTypeCvt<dt_quint8, int8_t> {
CudaDTypeParamImpl<dt_quint8> m_src_param;
CudaTypeCvt(CudaDTypeParamImpl<dt_quint8> src_param) { m_src_param = src_param; };
inline __device__ int8_t operator()(uint8_t val) {
return val - m_src_param.zero_point;
}
};
template <>
struct CudaTypeCvt<dt_quint8, float> {
CudaDTypeParamImpl<dt_quint8> m_src_param;
CudaTypeCvt(CudaDTypeParamImpl<dt_quint8> src_param) { m_src_param = src_param; };
__device__ __forceinline__ float operator()(uint8_t val) {
return m_src_param.dequantize(dt_quint8(val));
}
};
#define INST(dst_ctype, vec_dst_type) \
template < \
typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nhw_nchw4( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype val_x = warp_out_converter( \
sptr[ih0 * IW + iw0] * nalpha * nbeta + \
sptr[ih0 * IW + iw1] * nalpha * pbeta + \
sptr[ih1 * IW + iw0] * palpha * nbeta + \
sptr[ih1 * IW + iw1] * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = result.z = result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nhw_nchw4( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int IH, int IW, int OH, int OW, src_ctype bval, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \
src_ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \
src_ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \
src_ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \
src_ctype val_x = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = result.z = result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template < \
typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_n3hw_nchw4( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype val_x = warp_out_converter( \
sptr[ih0 * IW + iw0] * nalpha * nbeta + \
sptr[ih0 * IW + iw1] * nalpha * pbeta + \
sptr[ih1 * IW + iw0] * palpha * nbeta + \
sptr[ih1 * IW + iw1] * palpha * pbeta); \
src_ctype val_y = warp_out_converter( \
sptr[IW * IH + ih0 * IW + iw0] * nalpha * nbeta + \
sptr[IW * IH + ih0 * IW + iw1] * nalpha * pbeta + \
sptr[IW * IH + ih1 * IW + iw0] * palpha * nbeta + \
sptr[IW * IH + ih1 * IW + iw1] * palpha * pbeta); \
src_ctype val_z = warp_out_converter( \
sptr[2 * IW * IH + ih0 * IW + iw0] * nalpha * nbeta + \
sptr[2 * IW * IH + ih0 * IW + iw1] * nalpha * pbeta + \
sptr[2 * IW * IH + ih1 * IW + iw0] * palpha * nbeta + \
sptr[2 * IW * IH + ih1 * IW + iw1] * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_n3hw_nchw4( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int IH, int IW, int OH, int OW, src_ctype bval, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype v00, v01, v10, v11; \
v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \
v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \
v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \
v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \
src_ctype val_x = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[IH * IW + ih0 * IW + iw0] : bval); \
v01 = (okh0 && okw1 ? sptr[IH * IW + ih0 * IW + iw1] : bval); \
v10 = (okh1 && okw0 ? sptr[IH * IW + ih1 * IW + iw0] : bval); \
v11 = (okh1 && okw1 ? sptr[IH * IW + ih1 * IW + iw1] : bval); \
src_ctype val_y = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[2 * IH * IW + ih0 * IW + iw0] : bval); \
v01 = (okh0 && okw1 ? sptr[2 * IH * IW + ih0 * IW + iw1] : bval); \
v10 = (okh1 && okw0 ? sptr[2 * IH * IW + ih1 * IW + iw0] : bval); \
v11 = (okh1 && okw1 ? sptr[2 * IH * IW + ih1 * IW + iw1] : bval); \
src_ctype val_z = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template < \
typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nhw3_nchw4( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype val_x = warp_out_converter( \
sptr[(ih0 * IW + iw0) * 3] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * 3] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * 3] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * 3] * palpha * pbeta); \
src_ctype val_y = warp_out_converter( \
sptr[(ih0 * IW + iw0) * 3 + 1] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * 3 + 1] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * 3 + 1] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * 3 + 1] * palpha * pbeta); \
src_ctype val_z = warp_out_converter( \
sptr[(ih0 * IW + iw0) * 3 + 2] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * 3 + 2] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * 3 + 2] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * 3 + 2] * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nhw3_nchw4( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int IH, int IW, int OH, int OW, src_ctype bval, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype v00, v01, v10, v11; \
v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3] : bval); \
v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3] : bval); \
v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3] : bval); \
v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3] : bval); \
src_ctype val_x = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3 + 1] : bval); \
v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3 + 1] : bval); \
v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3 + 1] : bval); \
v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3 + 1] : bval); \
src_ctype val_y = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3 + 2] : bval); \
v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3 + 2] : bval); \
v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3 + 2] : bval); \
v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3 + 2] : bval); \
src_ctype val_z = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
template <
typename src_dtype, typename src_ctype, typename dst_ctype, typename SrcVisitor>
void dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4(
bool is_nhwc, SrcVisitor src, const float* mat, dst_ctype* dst, int N, int C,
int IH, int IW, int OH, int OW, src_ctype bval,
CudaDTypeParamImpl<src_dtype> param, BorderMode bmode, cudaStream_t stream) {
const int BY = 16, BX = 32;
CudaTypeCvt<src_dtype, dst_ctype> type_cvt(param);
#define DISPATCH(Getter) \
do { \
if (C == 1) { \
kern_general_quint8_nhw_nchw4<src_dtype, src_ctype, Getter, SrcVisitor> \
<<<blocks, threads, 0, stream>>>( \
src, mat, dst, IH, IW, OH, OW, type_cvt); \
} else if (is_nhwc) { \
kern_general_quint8_nhw3_nchw4<src_dtype, src_ctype, Getter, SrcVisitor> \
<<<blocks, threads, 0, stream>>>( \
src, mat, dst, IH, IW, OH, OW, type_cvt); \
} else { \
kern_general_quint8_n3hw_nchw4<src_dtype, src_ctype, Getter, SrcVisitor> \
<<<blocks, threads, 0, stream>>>( \
src, mat, dst, IH, IW, OH, OW, type_cvt); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (C == 1) {
kern_const_border_quint8_nhw_nchw4<src_dtype, src_ctype, SrcVisitor>
<<<blocks, threads, 0, stream>>>(
src, mat, dst, IH, IW, OH, OW, bval, type_cvt);
} else if (is_nhwc) {
kern_const_border_quint8_nhw3_nchw4<
src_dtype, src_ctype, SrcVisitor>
<<<blocks, threads, 0, stream>>>(
src, mat, dst, IH, IW, OH, OW, bval, type_cvt);
} else {
kern_const_border_quint8_n3hw_nchw4<
src_dtype, src_ctype, SrcVisitor>
<<<blocks, threads, 0, stream>>>(
src, mat, dst, IH, IW, OH, OW, bval, type_cvt);
}
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * 4 * OH * OW;
}
}
#define INST(dst_ctype) \
template < \
typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nchw( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int C, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype val = warp_out_converter( \
sptr[ih0 * IW + iw0] * nalpha * nbeta + \
sptr[ih0 * IW + iw1] * nalpha * pbeta + \
sptr[ih1 * IW + iw0] * palpha * nbeta + \
sptr[ih1 * IW + iw1] * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
sptr += IH * IW; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
#define INST(dst_ctype) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nchw( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int C, int IH, int IW, int OH, int OW, src_ctype bval, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \
src_ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \
src_ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \
src_ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \
src_ctype val = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
sptr += IH * IW; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
#define INST(dst_ctype) \
template < \
typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nhwc_nchw( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int C, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype val = warp_out_converter( \
sptr[(ih0 * IW + iw0) * C + c] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * C + c] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * C + c] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * C + c] * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
#define INST(dst_ctype) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nhwc_nchw( \
SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \
int C, int IH, int IW, int OH, int OW, src_ctype bval, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype v00 = \
(okh0 && okw0 ? sptr[(ih0 * IW + iw0) * C + c] : bval); \
src_ctype v01 = \
(okh0 && okw1 ? sptr[(ih0 * IW + iw1) * C + c] : bval); \
src_ctype v10 = \
(okh1 && okw0 ? sptr[(ih1 * IW + iw0) * C + c] : bval); \
src_ctype v11 = \
(okh1 && okw1 ? sptr[(ih1 * IW + iw1) * C + c] : bval); \
float val = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
template <
typename src_dtype, typename src_ctype, typename dst_ctype, typename SrcVisitor>
void dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw(
bool is_nhwc, SrcVisitor src, const float* mat, dst_ctype* dst, int N, int C,
int IH, int IW, int OH, int OW, src_ctype bval,
CudaDTypeParamImpl<src_dtype> param, BorderMode bmode, cudaStream_t stream) {
const int BY = 16, BX = 32;
CudaTypeCvt<src_dtype, dst_ctype> type_cvt(param);
#define DISPATCH(Getter) \
do { \
if (is_nhwc) { \
kern_general_quint8_nhwc_nchw<src_dtype, src_ctype, Getter, SrcVisitor> \
<<<blocks, threads, 0, stream>>>( \
src, mat, dst, C, IH, IW, OH, OW, type_cvt); \
} else { \
kern_general_quint8_nchw<src_dtype, src_ctype, Getter, SrcVisitor> \
<<<blocks, threads, 0, stream>>>( \
src, mat, dst, C, IH, IW, OH, OW, type_cvt); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (is_nhwc) {
kern_const_border_quint8_nhwc_nchw<src_dtype, src_ctype, SrcVisitor>
<<<blocks, threads, 0, stream>>>(
src, mat, dst, C, IH, IW, OH, OW, bval, type_cvt);
} else {
kern_const_border_quint8_nchw<src_dtype, src_ctype, SrcVisitor>
<<<blocks, threads, 0, stream>>>(
src, mat, dst, C, IH, IW, OH, OW, bval, type_cvt);
}
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace warp_perspective {
template <typename ctype>
void forward_proxy_multi_src(
bool is_nhwc, const ctype** srcs, const float* mat, const int* mat_idx,
ctype* dst, int N_SRC, int N_MAT, int C, int IH, int IW, int OH, int OW,
ctype bval, BorderMode bmode, megcore::AsyncErrorInfo* error_info,
void* error_tracker, cudaStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptrs = reinterpret_cast<const void**>(srcs);
visitor.ptr = srcs;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_multi_src(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode,
stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptrs = reinterpret_cast<const void**>(srcs);
visitor.ptr = srcs;
dispatch_with_visitor_multi_src(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode,
stream);
}
after_kernel_launch();
}
template <typename ctype>
void forward_proxy(
bool is_nhwc, const ctype* src, const float* mat, const int* mat_idx,
ctype* dst, int N_SRC, int N_MAT, int C, int IH, int IW, int OH, int OW,
ctype bval, BorderMode bmode, megcore::AsyncErrorInfo* error_info,
void* error_tracker, cudaStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode,
stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode,
stream);
}
after_kernel_launch();
}
template <typename ctype, int pack_c>
void forward_proxy_nhwc_bit4(
const ctype* src, const float* mat, const int* mat_idx, ctype* dst, int N_SRC,
int N_MAT, int C, int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info, void* error_tracker, cudaStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_nhwc_bit4<ctype, IndexedSrcVisitor<ctype>, pack_c>(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_nhwc_bit4<ctype, DirectSrcVisitor<ctype>, pack_c>(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream);
}
after_kernel_launch();
}
template <typename ctype>
void forward_proxy_nchw4(
const ctype* src, const float* mat, const int* mat_idx, ctype* dst, int N_SRC,
int N_MAT, int C, int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info, void* error_tracker, cudaStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_nchw4(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_nchw4(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream);
}
after_kernel_launch();
}
template <typename ctype>
void forward_proxy_nchw64(
const ctype* src, const float* mat, const int* mat_idx, ctype* dst, int N_SRC,
int N_MAT, int C, int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info, void* error_tracker, cudaStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_nchw64(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_nchw64(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream);
}
after_kernel_launch();
}
#define INST(ctype) \
template void forward_proxy( \
bool, const ctype*, const float*, const int*, ctype*, int, int, int, int, \
int, int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \
cudaStream_t);
INST(float)
INST(uint8_t)
#if !MEGDNN_DISABLE_FLOAT16
INST(dt_float16)
#endif
INST(int8_t)
#undef INST
#define INST(ctype) \
template void forward_proxy_multi_src( \
bool, const ctype**, const float*, const int*, ctype*, int, int, int, int, \
int, int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \
cudaStream_t);
INST(float)
#if !MEGDNN_DISABLE_FLOAT16
INST(dt_float16)
#endif
#undef INST
#define INST(ctype) \
template void forward_proxy_nchw4( \
const ctype*, const float*, const int*, ctype*, int, int, int, int, int, \
int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \
cudaStream_t);
INST(int8_t)
#undef INST
#define INST(ctype) \
template void forward_proxy_nchw64( \
const ctype*, const float*, const int*, ctype*, int, int, int, int, int, \
int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \
cudaStream_t);
INST(dt_qint4)
INST(dt_quint4)
#undef INST
#define INST(ctype, pack_c) \
template void forward_proxy_nhwc_bit4<ctype, pack_c>( \
const ctype*, const float*, const int*, ctype*, int, int, int, int, int, \
int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \
cudaStream_t);
INST(dt_qint4, 8)
INST(dt_quint4, 8)
INST(dt_qint4, 16)
INST(dt_quint4, 16)
#undef INST
template <typename src_dtype, typename src_ctype, typename dst_ctype>
void forward_proxy_quint8_dimshuffle_typecvt_nchw4(
bool is_nhwc, const src_ctype* src, const float* mat, const int* mat_idx,
dst_ctype* dst, int N_SRC, int N_MAT, int C, int IH, int IW, int OH, int OW,
src_ctype bval, DTypeParamImpl<src_dtype> param, BorderMode bmode,
megcore::AsyncErrorInfo* error_info, void* error_tracker, cudaStream_t stream) {
CudaDTypeParamImpl<src_dtype> dtype_param(param);
if (mat_idx) {
IndexedSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, dtype_param,
bmode, stream);
} else {
DirectSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, dtype_param,
bmode, stream);
}
after_kernel_launch();
}
#define INST(src_dtype, src_ctype, dst_ctype) \
template void forward_proxy_quint8_dimshuffle_typecvt_nchw4( \
bool is_nhwc, const src_ctype*, const float*, const int*, dst_ctype*, int, \
int, int, int, int, int, int, src_ctype, DTypeParamImpl<src_dtype> param, \
BorderMode, megcore::AsyncErrorInfo*, void*, cudaStream_t);
INST(dt_quint8, uint8_t, int8_t)
#undef INST
template <typename src_dtype, typename src_ctype, typename dst_ctype>
void forward_proxy_quint8_dimshuffle_typecvt_nchw(
bool is_nhwc, const src_ctype* src, const float* mat, const int* mat_idx,
dst_ctype* dst, int N_SRC, int N_MAT, int C, int IH, int IW, int OH, int OW,
src_ctype bval, DTypeParamImpl<src_dtype> param, BorderMode bmode,
megcore::AsyncErrorInfo* error_info, void* error_tracker, cudaStream_t stream) {
CudaDTypeParamImpl<src_dtype> dtype_param(param);
if (mat_idx) {
IndexedSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, dtype_param,
bmode, stream);
} else {
DirectSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, dtype_param,
bmode, stream);
}
after_kernel_launch();
}
#define INST(src_dtype, src_ctype, dst_ctype) \
template void forward_proxy_quint8_dimshuffle_typecvt_nchw( \
bool is_nhwc, const src_ctype*, const float*, const int*, dst_ctype*, int, \
int, int, int, int, int, int, src_ctype, DTypeParamImpl<src_dtype> param, \
BorderMode, megcore::AsyncErrorInfo*, void*, cudaStream_t);
INST(dt_quint8, uint8_t, float)
#undef INST
} // namespace warp_perspective
} // namespace cuda
} // namespace megdnn
#include "src/cuda/kernel_common/diagnostic_epilogue.cuh"
// vim: syntax=cpp.doxygen
|
261c66a15cf2294c2706f6fa9039488da65e48fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "../include/init.h"
#include "../include/dynamic.h"
void check_memory(Grid &par){
int xDim = par.ival("xDim");
int yDim = par.ival("yDim");
int zDim = par.ival("zDim");
bool energy_calc = par.bval("energy_calc");
int gSize = xDim*yDim*zDim;
size_t free = 0;
size_t total = 0;
cudaHandleError( hipMemGetInfo(&free, &total) );
// Note that this check is specifically for the case where we need to keep
// 8 double2* values on the GPU. This is not the case for dynamic fields
// and the test should be updated accordingly as these are used more.
size_t req_memory = 16*8*(size_t)gSize;
if (energy_calc){
req_memory += 4*16*(size_t)gSize;
}
if (free < req_memory){
std::cout << "Not enough GPU memory for gridsize!\n";
std::cout << "Free memory is: " << free << '\n';
std::cout << "Required memory is: " << req_memory << '\n';
if (energy_calc){
std::cout << "Required memory for energy calc is: "
<< 4*16*(size_t)gSize << '\n';
}
std::cout << "xDim is: " << xDim << '\n';
std::cout << "yDim is: " << yDim << '\n';
std::cout << "zDim is: " << zDim << '\n';
std::cout << "gSize is: " << gSize << '\n';
exit(1);
}
}
int init(Grid &par){
check_memory(par);
set_fns(par);
// Re-establishing variables from parsed Grid class
// Initializes uninitialized variables to 0 values
std::string data_dir = par.sval("data_dir");
int dimnum = par.ival("dimnum");
int N = par.ival("atoms");
int xDim = par.ival("xDim");
int yDim = par.ival("yDim");
int zDim = par.ival("zDim");
bool write_file = par.bval("write_file");
bool cyl_coord = par.bval("cyl_coord");
bool corotating = par.bval("corotating");
dim3 threads;
unsigned int gSize = xDim;
if (dimnum > 1){
gSize *= yDim;
}
if (dimnum > 2){
gSize *= zDim;
}
double gdt = par.dval("gdt");
double dt = par.dval("dt");
double omegaX = par.dval("omegaX");
double omegaY = par.dval("omegaY");
double omegaZ = par.dval("omegaZ");
double gammaY = par.dval("gammaY"); //Aspect ratio of trapping geometry.
double winding = par.dval("winding");
double box_size = par.dval("box_size");
double *Energy;
double *r;
double *V_opt;
hipfftDoubleComplex *wfc;
if (par.bval("read_wfc") == true){
wfc = par.cufftDoubleComplexval("wfc");
}
hipfftDoubleComplex *EV_opt;
hipfftDoubleComplex *EappliedField;
std::cout << "gSize is: " << gSize << '\n';
hipfftHandle plan_1d;
hipfftHandle plan_2d;
hipfftHandle plan_3d;
hipfftHandle plan_other2d;
hipfftHandle plan_dim2;
hipfftHandle plan_dim3;
std::string buffer;
double Rxy; //Condensate scaling factor.
double a0x, a0y, a0z; //Harmonic oscillator length in x and y directions
generate_grid(par);
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%//
double mass = 1.4431607e-25; //Rb 87 mass, kg
par.store("mass",mass);
double a_s = 4.76e-9;
par.store("a_s",a_s);
double sum = 0.0;
a0x = sqrt(HBAR/(2*mass*omegaX));
a0y = sqrt(HBAR/(2*mass*omegaY));
a0z = sqrt(HBAR/(2*mass*omegaZ));
par.store("a0x",a0x);
par.store("a0y",a0y);
par.store("a0z",a0z);
// Let's go ahead and define the gDensConst here
// N*4*HBAR*HBAR*PI*(4.67e-9/mass)*sqrt(mass*(omegaZ)/(2*PI*HBAR)
double gDenConst = N*4*HBAR*HBAR*PI*(a_s/mass);
if (dimnum == 2){
gDenConst*= sqrt(mass*(omegaZ)/(2*PI*HBAR));
}
par.store("gDenConst", gDenConst);
Rxy = pow(15,0.2)*pow(N*a_s*sqrt(mass*omegaZ/HBAR),0.2);
par.store("Rxy",Rxy);
//std::cout << "Rxy is: " << Rxy << '\n';
double xMax, yMax, zMax;
if (box_size > 0){
xMax = box_size;
yMax = box_size;
zMax = box_size;
}
else{
xMax = 6*Rxy*a0x;
yMax = 6*Rxy*a0y;
zMax = 6*Rxy*a0z;
}
par.store("xMax",xMax);
par.store("yMax",yMax);
par.store("zMax",zMax);
double pxMax, pyMax, pzMax;
pxMax = (PI/xMax)*(xDim>>1);
pyMax = (PI/yMax)*(yDim>>1);
pzMax = (PI/zMax)*(zDim>>1);
par.store("pyMax",pyMax);
par.store("pxMax",pxMax);
par.store("pzMax",pzMax);
double dx = xMax/(xDim>>1);
double dy = yMax/(yDim>>1);
double dz = zMax/(zDim>>1);
if (dimnum < 3){
dz = 1;
}
if (dimnum < 2){
dy = 1;
}
par.store("dx",dx);
par.store("dy",dy);
par.store("dz",dz);
double dpx, dpy, dpz;
dpx = PI/(xMax);
dpy = PI/(yMax);
dpz = PI/(zMax);
//std::cout << "yMax is: " << yMax << '\t' << "xMax is: " << xMax << '\n';
//std::cout << "dpx and dpy are:" << '\n';
//std::cout << dpx << '\t' << dpy << '\n';
par.store("dpx",dpx);
par.store("dpy",dpy);
par.store("dpz",dpz);
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%//
/* Initialise wavefunction, momentum, position, angular momentum,
imaginary and real-time evolution operators . */
Energy = (double*) malloc(sizeof(double) * gSize);
r = (double *) malloc(sizeof(double) * gSize);
V_opt = (double *) malloc(sizeof(double) * gSize);
EV_opt = (hipfftDoubleComplex *) malloc(sizeof(hipfftDoubleComplex) * gSize);
EappliedField = (hipfftDoubleComplex *) malloc(sizeof(hipfftDoubleComplex) *
gSize);
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%//
/*
#ifdef __linux
int cores = omp_get_num_procs();
par.store("Cores_Total",cores);
// Assuming dev system specifics (Xeon with HT -> cores detected / 2)
par.store("Cores_Max",cores/2);
omp_set_num_threads(cores/2);
//#pragma omp parallel for private(j)
#endif
*/
par.store("gSize", xDim*yDim*zDim);
if (par.bval("use_param_file")){
parse_param_file(par);
}
generate_fields(par);
double *K = par.dsval("K");
double *Ax = par.dsval("Ax");
double *Ay = par.dsval("Ay");
double *Az = par.dsval("Az");
double *V = par.dsval("V");
double *pAx = par.dsval("pAx");
double *pAy = par.dsval("pAy");
double *pAz = par.dsval("pAz");
double *x = par.dsval("x");
double *y = par.dsval("y");
double *z = par.dsval("z");
double2 *GpAx = par.cufftDoubleComplexval("GpAx");
double2 *GpAy = par.cufftDoubleComplexval("GpAy");
double2 *GpAz = par.cufftDoubleComplexval("GpAz");
double2 *EpAx = par.cufftDoubleComplexval("EpAx");
double2 *EpAy = par.cufftDoubleComplexval("EpAy");
double2 *EpAz = par.cufftDoubleComplexval("EpAz");
double2 *GV = par.cufftDoubleComplexval("GV");
double2 *EV = par.cufftDoubleComplexval("EV");
double2 *GK = par.cufftDoubleComplexval("GK");
double2 *EK = par.cufftDoubleComplexval("EK");
wfc = par.cufftDoubleComplexval("wfc");
for(int i=0; i < gSize; i++ ){
sum+=sqrt(wfc[i].x*wfc[i].x + wfc[i].y*wfc[i].y);
}
if (write_file){
double *Bz;
double *Bx;
double *By;
if (dimnum == 2){
Bz = curl2d(par, Ax, Ay);
}
if (dimnum == 3){
std::cout << "Calculating the 3d curl..." << '\n';
Bx = curl3d_x(par, Ax, Ay, Az);
By = curl3d_y(par, Ax, Ay, Az);
Bz = curl3d_z(par, Ax, Ay, Az);
std::cout << "Finished calculating Curl" << '\n';
}
std::cout << "writing initial variables to file..." << '\n';
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%//
//hdfWriteDouble(xDim, V, 0, "V_0"); //HDF COMING SOON!
//hdfWriteComplex(xDim, wfc, 0, "wfc_0");
if (cyl_coord && dimnum > 2){
double *Br = curl3d_r(par, Bx, By);
double *Bphi = curl3d_phi(par, Bx, By);
FileIO::writeOutDouble(buffer, data_dir + "Br",Br,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "Bphi",Bphi,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "Bz",Bz,gSize,0);
free(Br);
free(Bx);
free(By);
free(Bz);
free(Bphi);
}
else{
if (dimnum > 1){
FileIO::writeOutDouble(buffer, data_dir + "Bz",Bz,gSize,0);
free(Bz);
}
if (dimnum > 2){
FileIO::writeOutDouble(buffer, data_dir + "Bx",Bx,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "By",By,gSize,0);
free(Bx);
free(By);
}
}
FileIO::writeOutDouble(buffer, data_dir + "V",V,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "K",K,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "pAy",pAy,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "pAx",pAx,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "Ax",Ax,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "Ay",Ay,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "Az",Az,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "x",x,xDim,0);
FileIO::writeOutDouble(buffer, data_dir + "y",y,yDim,0);
FileIO::writeOutDouble(buffer, data_dir + "z",z,zDim,0);
FileIO::writeOut(buffer, data_dir + "WFC",wfc,gSize,0);
FileIO::writeOut(buffer, data_dir + "EpAz",EpAz,gSize,0);
FileIO::writeOut(buffer, data_dir + "EpAy",EpAy,gSize,0);
FileIO::writeOut(buffer, data_dir + "EpAx",EpAx,gSize,0);
FileIO::writeOut(buffer, data_dir + "GK",GK,gSize,0);
FileIO::writeOut(buffer, data_dir + "GV",GV,gSize,0);
FileIO::writeOut(buffer, data_dir + "GpAx",GpAx,gSize,0);
FileIO::writeOut(buffer, data_dir + "GpAy",GpAy,gSize,0);
FileIO::writeOut(buffer, data_dir + "GpAz",GpAz,gSize,0);
}
if (par.bval("read_wfc") == false){
sum=sqrt(sum*dx*dy*dz);
for (int i = 0; i < gSize; i++){
wfc[i].x = (wfc[i].x)/(sum);
wfc[i].y = (wfc[i].y)/(sum);
}
}
cufftHandleError( hipfftPlan2d(&plan_2d, xDim, yDim, HIPFFT_Z2Z) );
generate_plan_other3d(&plan_1d, par, 0);
if (dimnum == 2){
generate_plan_other2d(&plan_other2d, par);
}
if (dimnum == 3){
generate_plan_other3d(&plan_dim3, par, 2);
generate_plan_other3d(&plan_dim2, par, 1);
}
cufftHandleError( hipfftPlan3d(&plan_3d, xDim, yDim, zDim, HIPFFT_Z2Z) );
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%//
//std::cout << GV[0].x << '\t' << GK[0].x << '\t'
// << pAy[0] << '\t' << pAx[0] << '\n';
//std::cout << "storing variables..." << '\n';
// Storing variables that have been initialized
// Re-establishing variables from parsed Grid class
// Initializes uninitialized variables to 0 values
par.store("Energy", Energy);
par.store("r", r);
par.store("wfc", wfc);
par.store("EV_opt", EV_opt);
par.store("V_opt", V_opt);
par.store("EappliedField", EappliedField);
par.store("plan_1d", plan_1d);
par.store("plan_2d", plan_2d);
par.store("plan_other2d", plan_other2d);
par.store("plan_3d", plan_3d);
par.store("plan_dim2", plan_dim2);
par.store("plan_dim3", plan_dim3);
// Parameters for time-depd variables.
par.store("K_time", false);
par.store("V_time", false);
par.store("Ax_time", false);
par.store("Ay_time", false);
par.store("Az_time", false);
std::cout << "variables stored" << '\n';
return 0;
}
void set_variables(Grid &par, bool ev_type){
// Re-establishing variables from parsed Grid class
// Note that 3d variables are set to nullptr's unless needed
// This might need to be fixed later
double dx = par.dval("dx");
double dy = par.dval("dy");
double *V_opt = par.dsval("V_opt");
double *pAy = par.dsval("pAy");
double *pAx = par.dsval("pAx");
double2 *pAy_gpu;
double2 *pAx_gpu;
double2 *pAz_gpu;
double2 *V_gpu;
double2 *K_gpu;
hipfftDoubleComplex *wfc = par.cufftDoubleComplexval("wfc");
hipfftDoubleComplex *wfc_gpu = par.cufftDoubleComplexval("wfc_gpu");
int dimnum = par.ival("dimnum");
int xDim = par.ival("xDim");
int yDim = par.ival("yDim");
int zDim = par.ival("zDim");
int gsize = xDim;
// Special variables for the 3d case
if (dimnum > 1){
gsize *= yDim;
}
if (dimnum > 2){
gsize *= zDim;
}
if(!par.bval("V_time")){
cudaHandleError( hipMalloc((void**) &V_gpu, sizeof(double2)*gsize) );
}
if(!par.bval("K_time")){
cudaHandleError( hipMalloc((void**) &K_gpu, sizeof(double2)*gsize) );
}
if(!par.bval("Ax_time")){
cudaHandleError( hipMalloc((void**) &pAx_gpu, sizeof(double2)*gsize) );
}
if(!par.bval("Ay_time") && dimnum > 1){
cudaHandleError( hipMalloc((void**) &pAy_gpu, sizeof(double2)*gsize) );
}
if(!par.bval("Az_time") && dimnum > 2){
cudaHandleError( hipMalloc((void**) &pAz_gpu, sizeof(double2)*gsize) );
}
if (ev_type == 0){
hipfftDoubleComplex *GK = par.cufftDoubleComplexval("GK");
hipfftDoubleComplex *GV = par.cufftDoubleComplexval("GV");
hipfftDoubleComplex *GpAx = par.cufftDoubleComplexval("GpAx");
hipfftDoubleComplex *GpAy = nullptr;
hipfftDoubleComplex *GpAz = nullptr;
if(!par.bval("K_time")){
cudaHandleError( hipMemcpy(K_gpu, GK, sizeof(hipfftDoubleComplex)*gsize,
hipMemcpyHostToDevice) );
}
if(!par.bval("V_time")){
cudaHandleError( hipMemcpy(V_gpu, GV, sizeof(hipfftDoubleComplex)*gsize,
hipMemcpyHostToDevice) );
}
if(!par.bval("Ax_time")){
cudaHandleError( hipMemcpy(pAx_gpu, GpAx, sizeof(hipfftDoubleComplex)*gsize,
hipMemcpyHostToDevice) );
}
cudaHandleError( hipMemcpy(wfc_gpu, wfc, sizeof(hipfftDoubleComplex)*gsize,
hipMemcpyHostToDevice) );
par.store("K_gpu", K_gpu);
par.store("V_gpu", V_gpu);
par.store("wfc_gpu", wfc_gpu);
par.store("pAy_gpu", pAy_gpu);
par.store("pAx_gpu", pAx_gpu);
// Special cases for 3d
if (dimnum > 1 && !par.bval("Ay_time")){
GpAy = par.cufftDoubleComplexval("GpAy");
cudaHandleError( hipMemcpy(pAy_gpu, GpAy, sizeof(hipfftDoubleComplex)*gsize,
hipMemcpyHostToDevice) );
par.store("pAy_gpu", pAy_gpu);
}
if (dimnum > 2 && !par.bval("Az_time")){
GpAz = par.cufftDoubleComplexval("GpAz");
cudaHandleError( hipMemcpy(pAz_gpu, GpAz, sizeof(hipfftDoubleComplex)*gsize,
hipMemcpyHostToDevice) );
par.store("pAz_gpu", pAz_gpu);
}
free(GV); free(GK); free(GpAy); free(GpAx); free(GpAz);
}
else if (ev_type == 1){
hipfftDoubleComplex *EV = par.cufftDoubleComplexval("EV");
hipfftDoubleComplex *EK = par.cufftDoubleComplexval("EK");
hipfftDoubleComplex *EpAx = par.cufftDoubleComplexval("EpAx");
hipfftDoubleComplex *EpAy = nullptr;
hipfftDoubleComplex *EpAz = nullptr;
if (!par.bval("K_time")){
cudaHandleError( hipMemcpy(K_gpu, EK, sizeof(hipfftDoubleComplex)*gsize,
hipMemcpyHostToDevice) );
par.store("K_gpu", K_gpu);
}
if(!par.bval("Ax_time")){
cudaHandleError( hipMemcpy(pAx_gpu, EpAx, sizeof(hipfftDoubleComplex)*gsize,
hipMemcpyHostToDevice) );
par.store("pAx_gpu", pAx_gpu);
}
if (!par.bval("V_time")){
cudaHandleError( hipMemcpy(V_gpu, EV, sizeof(hipfftDoubleComplex)*gsize,
hipMemcpyHostToDevice) );
par.store("V_gpu", V_gpu);
}
cudaHandleError( hipMemcpy(wfc_gpu, wfc, sizeof(hipfftDoubleComplex)*gsize,
hipMemcpyHostToDevice) );
par.store("wfc_gpu", wfc_gpu);
// Special variables / instructions for 2/3d case
if (dimnum > 1 && !par.bval("Ay_time")){
EpAy = par.cufftDoubleComplexval("EpAy");
cudaHandleError( hipMemcpy(pAy_gpu, EpAy, sizeof(hipfftDoubleComplex)*gsize,
hipMemcpyHostToDevice) );
par.store("pAy_gpu", pAy_gpu);
}
if (dimnum > 2 && !par.bval("Az_time")){
EpAz = par.cufftDoubleComplexval("EpAz");
cudaHandleError( hipMemcpy(pAz_gpu, EpAz, sizeof(hipfftDoubleComplex)*gsize,
hipMemcpyHostToDevice) );
par.store("pAz_gpu", pAz_gpu);
}
free(EV);
free(EK);
free(EpAy);
free(EpAx);
free(EpAz);
}
}
int main(int argc, char **argv){
Grid par = parseArgs(argc,argv);
//Grid par2 = parseArgs(argc,argv);
int device = par.ival("device");
int dimnum = par.ival("dimnum");
cudaHandleError( hipSetDevice(device) );
std::string buffer;
time_t start,fin;
time(&start);
printf("Start: %s\n", ctime(&start));
//************************************************************//
/*
* Initialise the Params data structure to track params and variables
*/
//************************************************************//
// If we want to read in a wfc, we may also need to imprint a phase. This
// will be done in the init_2d and init_3d functions
// We need a number of parameters for now
int xDim = par.ival("xDim");
int yDim = par.ival("yDim");
int zDim = par.ival("zDim");
if(par.bval("read_wfc") == true){
// Initializing the wfc
int gSize = xDim * yDim * zDim;
hipfftDoubleComplex *wfc;
std::string infile = par.sval("infile");
std::string infilei = par.sval("infilei");
printf("Loading wavefunction...");
wfc=FileIO::readIn(infile,infilei,gSize);
par.store("wfc",wfc);
printf("Wavefunction loaded.\n");
//std::string data_dir = par.sval("data_dir");
//FileIO::writeOut(buffer, data_dir + "WFC_CHECK",wfc,gSize,0);
}
init(par);
int gsteps = par.ival("gsteps");
int esteps = par.ival("esteps");
std::string data_dir = par.sval("data_dir");
std::cout << "variables re-established" << '\n';
if (par.bval("write_file")){
FileIO::writeOutParam(buffer, par, data_dir + "Params.dat");
}
if(gsteps > 0){
std::cout << "Imaginary-time evolution started..." << '\n';
set_variables(par, 0);
evolve(par, gsteps, 0, buffer);
}
if(esteps > 0){
std::cout << "real-time evolution started..." << '\n';
set_variables(par, 1);
evolve(par, esteps, 1, buffer);
}
std::cout << "done evolving" << '\n';
time(&fin);
printf("Finish: %s\n", ctime(&fin));
printf("Total time: %ld seconds\n ",(long)fin-start);
std::cout << '\n';
return 0;
}
| 261c66a15cf2294c2706f6fa9039488da65e48fe.cu | #include "../include/init.h"
#include "../include/dynamic.h"
void check_memory(Grid &par){
int xDim = par.ival("xDim");
int yDim = par.ival("yDim");
int zDim = par.ival("zDim");
bool energy_calc = par.bval("energy_calc");
int gSize = xDim*yDim*zDim;
size_t free = 0;
size_t total = 0;
cudaHandleError( cudaMemGetInfo(&free, &total) );
// Note that this check is specifically for the case where we need to keep
// 8 double2* values on the GPU. This is not the case for dynamic fields
// and the test should be updated accordingly as these are used more.
size_t req_memory = 16*8*(size_t)gSize;
if (energy_calc){
req_memory += 4*16*(size_t)gSize;
}
if (free < req_memory){
std::cout << "Not enough GPU memory for gridsize!\n";
std::cout << "Free memory is: " << free << '\n';
std::cout << "Required memory is: " << req_memory << '\n';
if (energy_calc){
std::cout << "Required memory for energy calc is: "
<< 4*16*(size_t)gSize << '\n';
}
std::cout << "xDim is: " << xDim << '\n';
std::cout << "yDim is: " << yDim << '\n';
std::cout << "zDim is: " << zDim << '\n';
std::cout << "gSize is: " << gSize << '\n';
exit(1);
}
}
int init(Grid &par){
check_memory(par);
set_fns(par);
// Re-establishing variables from parsed Grid class
// Initializes uninitialized variables to 0 values
std::string data_dir = par.sval("data_dir");
int dimnum = par.ival("dimnum");
int N = par.ival("atoms");
int xDim = par.ival("xDim");
int yDim = par.ival("yDim");
int zDim = par.ival("zDim");
bool write_file = par.bval("write_file");
bool cyl_coord = par.bval("cyl_coord");
bool corotating = par.bval("corotating");
dim3 threads;
unsigned int gSize = xDim;
if (dimnum > 1){
gSize *= yDim;
}
if (dimnum > 2){
gSize *= zDim;
}
double gdt = par.dval("gdt");
double dt = par.dval("dt");
double omegaX = par.dval("omegaX");
double omegaY = par.dval("omegaY");
double omegaZ = par.dval("omegaZ");
double gammaY = par.dval("gammaY"); //Aspect ratio of trapping geometry.
double winding = par.dval("winding");
double box_size = par.dval("box_size");
double *Energy;
double *r;
double *V_opt;
cufftDoubleComplex *wfc;
if (par.bval("read_wfc") == true){
wfc = par.cufftDoubleComplexval("wfc");
}
cufftDoubleComplex *EV_opt;
cufftDoubleComplex *EappliedField;
std::cout << "gSize is: " << gSize << '\n';
cufftHandle plan_1d;
cufftHandle plan_2d;
cufftHandle plan_3d;
cufftHandle plan_other2d;
cufftHandle plan_dim2;
cufftHandle plan_dim3;
std::string buffer;
double Rxy; //Condensate scaling factor.
double a0x, a0y, a0z; //Harmonic oscillator length in x and y directions
generate_grid(par);
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%//
double mass = 1.4431607e-25; //Rb 87 mass, kg
par.store("mass",mass);
double a_s = 4.76e-9;
par.store("a_s",a_s);
double sum = 0.0;
a0x = sqrt(HBAR/(2*mass*omegaX));
a0y = sqrt(HBAR/(2*mass*omegaY));
a0z = sqrt(HBAR/(2*mass*omegaZ));
par.store("a0x",a0x);
par.store("a0y",a0y);
par.store("a0z",a0z);
// Let's go ahead and define the gDensConst here
// N*4*HBAR*HBAR*PI*(4.67e-9/mass)*sqrt(mass*(omegaZ)/(2*PI*HBAR)
double gDenConst = N*4*HBAR*HBAR*PI*(a_s/mass);
if (dimnum == 2){
gDenConst*= sqrt(mass*(omegaZ)/(2*PI*HBAR));
}
par.store("gDenConst", gDenConst);
Rxy = pow(15,0.2)*pow(N*a_s*sqrt(mass*omegaZ/HBAR),0.2);
par.store("Rxy",Rxy);
//std::cout << "Rxy is: " << Rxy << '\n';
double xMax, yMax, zMax;
if (box_size > 0){
xMax = box_size;
yMax = box_size;
zMax = box_size;
}
else{
xMax = 6*Rxy*a0x;
yMax = 6*Rxy*a0y;
zMax = 6*Rxy*a0z;
}
par.store("xMax",xMax);
par.store("yMax",yMax);
par.store("zMax",zMax);
double pxMax, pyMax, pzMax;
pxMax = (PI/xMax)*(xDim>>1);
pyMax = (PI/yMax)*(yDim>>1);
pzMax = (PI/zMax)*(zDim>>1);
par.store("pyMax",pyMax);
par.store("pxMax",pxMax);
par.store("pzMax",pzMax);
double dx = xMax/(xDim>>1);
double dy = yMax/(yDim>>1);
double dz = zMax/(zDim>>1);
if (dimnum < 3){
dz = 1;
}
if (dimnum < 2){
dy = 1;
}
par.store("dx",dx);
par.store("dy",dy);
par.store("dz",dz);
double dpx, dpy, dpz;
dpx = PI/(xMax);
dpy = PI/(yMax);
dpz = PI/(zMax);
//std::cout << "yMax is: " << yMax << '\t' << "xMax is: " << xMax << '\n';
//std::cout << "dpx and dpy are:" << '\n';
//std::cout << dpx << '\t' << dpy << '\n';
par.store("dpx",dpx);
par.store("dpy",dpy);
par.store("dpz",dpz);
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%//
/* Initialise wavefunction, momentum, position, angular momentum,
imaginary and real-time evolution operators . */
Energy = (double*) malloc(sizeof(double) * gSize);
r = (double *) malloc(sizeof(double) * gSize);
V_opt = (double *) malloc(sizeof(double) * gSize);
EV_opt = (cufftDoubleComplex *) malloc(sizeof(cufftDoubleComplex) * gSize);
EappliedField = (cufftDoubleComplex *) malloc(sizeof(cufftDoubleComplex) *
gSize);
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%//
/*
#ifdef __linux
int cores = omp_get_num_procs();
par.store("Cores_Total",cores);
// Assuming dev system specifics (Xeon with HT -> cores detected / 2)
par.store("Cores_Max",cores/2);
omp_set_num_threads(cores/2);
//#pragma omp parallel for private(j)
#endif
*/
par.store("gSize", xDim*yDim*zDim);
if (par.bval("use_param_file")){
parse_param_file(par);
}
generate_fields(par);
double *K = par.dsval("K");
double *Ax = par.dsval("Ax");
double *Ay = par.dsval("Ay");
double *Az = par.dsval("Az");
double *V = par.dsval("V");
double *pAx = par.dsval("pAx");
double *pAy = par.dsval("pAy");
double *pAz = par.dsval("pAz");
double *x = par.dsval("x");
double *y = par.dsval("y");
double *z = par.dsval("z");
double2 *GpAx = par.cufftDoubleComplexval("GpAx");
double2 *GpAy = par.cufftDoubleComplexval("GpAy");
double2 *GpAz = par.cufftDoubleComplexval("GpAz");
double2 *EpAx = par.cufftDoubleComplexval("EpAx");
double2 *EpAy = par.cufftDoubleComplexval("EpAy");
double2 *EpAz = par.cufftDoubleComplexval("EpAz");
double2 *GV = par.cufftDoubleComplexval("GV");
double2 *EV = par.cufftDoubleComplexval("EV");
double2 *GK = par.cufftDoubleComplexval("GK");
double2 *EK = par.cufftDoubleComplexval("EK");
wfc = par.cufftDoubleComplexval("wfc");
for(int i=0; i < gSize; i++ ){
sum+=sqrt(wfc[i].x*wfc[i].x + wfc[i].y*wfc[i].y);
}
if (write_file){
double *Bz;
double *Bx;
double *By;
if (dimnum == 2){
Bz = curl2d(par, Ax, Ay);
}
if (dimnum == 3){
std::cout << "Calculating the 3d curl..." << '\n';
Bx = curl3d_x(par, Ax, Ay, Az);
By = curl3d_y(par, Ax, Ay, Az);
Bz = curl3d_z(par, Ax, Ay, Az);
std::cout << "Finished calculating Curl" << '\n';
}
std::cout << "writing initial variables to file..." << '\n';
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%//
//hdfWriteDouble(xDim, V, 0, "V_0"); //HDF COMING SOON!
//hdfWriteComplex(xDim, wfc, 0, "wfc_0");
if (cyl_coord && dimnum > 2){
double *Br = curl3d_r(par, Bx, By);
double *Bphi = curl3d_phi(par, Bx, By);
FileIO::writeOutDouble(buffer, data_dir + "Br",Br,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "Bphi",Bphi,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "Bz",Bz,gSize,0);
free(Br);
free(Bx);
free(By);
free(Bz);
free(Bphi);
}
else{
if (dimnum > 1){
FileIO::writeOutDouble(buffer, data_dir + "Bz",Bz,gSize,0);
free(Bz);
}
if (dimnum > 2){
FileIO::writeOutDouble(buffer, data_dir + "Bx",Bx,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "By",By,gSize,0);
free(Bx);
free(By);
}
}
FileIO::writeOutDouble(buffer, data_dir + "V",V,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "K",K,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "pAy",pAy,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "pAx",pAx,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "Ax",Ax,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "Ay",Ay,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "Az",Az,gSize,0);
FileIO::writeOutDouble(buffer, data_dir + "x",x,xDim,0);
FileIO::writeOutDouble(buffer, data_dir + "y",y,yDim,0);
FileIO::writeOutDouble(buffer, data_dir + "z",z,zDim,0);
FileIO::writeOut(buffer, data_dir + "WFC",wfc,gSize,0);
FileIO::writeOut(buffer, data_dir + "EpAz",EpAz,gSize,0);
FileIO::writeOut(buffer, data_dir + "EpAy",EpAy,gSize,0);
FileIO::writeOut(buffer, data_dir + "EpAx",EpAx,gSize,0);
FileIO::writeOut(buffer, data_dir + "GK",GK,gSize,0);
FileIO::writeOut(buffer, data_dir + "GV",GV,gSize,0);
FileIO::writeOut(buffer, data_dir + "GpAx",GpAx,gSize,0);
FileIO::writeOut(buffer, data_dir + "GpAy",GpAy,gSize,0);
FileIO::writeOut(buffer, data_dir + "GpAz",GpAz,gSize,0);
}
if (par.bval("read_wfc") == false){
sum=sqrt(sum*dx*dy*dz);
for (int i = 0; i < gSize; i++){
wfc[i].x = (wfc[i].x)/(sum);
wfc[i].y = (wfc[i].y)/(sum);
}
}
cufftHandleError( cufftPlan2d(&plan_2d, xDim, yDim, CUFFT_Z2Z) );
generate_plan_other3d(&plan_1d, par, 0);
if (dimnum == 2){
generate_plan_other2d(&plan_other2d, par);
}
if (dimnum == 3){
generate_plan_other3d(&plan_dim3, par, 2);
generate_plan_other3d(&plan_dim2, par, 1);
}
cufftHandleError( cufftPlan3d(&plan_3d, xDim, yDim, zDim, CUFFT_Z2Z) );
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%//
//std::cout << GV[0].x << '\t' << GK[0].x << '\t'
// << pAy[0] << '\t' << pAx[0] << '\n';
//std::cout << "storing variables..." << '\n';
// Storing variables that have been initialized
// Re-establishing variables from parsed Grid class
// Initializes uninitialized variables to 0 values
par.store("Energy", Energy);
par.store("r", r);
par.store("wfc", wfc);
par.store("EV_opt", EV_opt);
par.store("V_opt", V_opt);
par.store("EappliedField", EappliedField);
par.store("plan_1d", plan_1d);
par.store("plan_2d", plan_2d);
par.store("plan_other2d", plan_other2d);
par.store("plan_3d", plan_3d);
par.store("plan_dim2", plan_dim2);
par.store("plan_dim3", plan_dim3);
// Parameters for time-depd variables.
par.store("K_time", false);
par.store("V_time", false);
par.store("Ax_time", false);
par.store("Ay_time", false);
par.store("Az_time", false);
std::cout << "variables stored" << '\n';
return 0;
}
void set_variables(Grid &par, bool ev_type){
// Re-establishing variables from parsed Grid class
// Note that 3d variables are set to nullptr's unless needed
// This might need to be fixed later
double dx = par.dval("dx");
double dy = par.dval("dy");
double *V_opt = par.dsval("V_opt");
double *pAy = par.dsval("pAy");
double *pAx = par.dsval("pAx");
double2 *pAy_gpu;
double2 *pAx_gpu;
double2 *pAz_gpu;
double2 *V_gpu;
double2 *K_gpu;
cufftDoubleComplex *wfc = par.cufftDoubleComplexval("wfc");
cufftDoubleComplex *wfc_gpu = par.cufftDoubleComplexval("wfc_gpu");
int dimnum = par.ival("dimnum");
int xDim = par.ival("xDim");
int yDim = par.ival("yDim");
int zDim = par.ival("zDim");
int gsize = xDim;
// Special variables for the 3d case
if (dimnum > 1){
gsize *= yDim;
}
if (dimnum > 2){
gsize *= zDim;
}
if(!par.bval("V_time")){
cudaHandleError( cudaMalloc((void**) &V_gpu, sizeof(double2)*gsize) );
}
if(!par.bval("K_time")){
cudaHandleError( cudaMalloc((void**) &K_gpu, sizeof(double2)*gsize) );
}
if(!par.bval("Ax_time")){
cudaHandleError( cudaMalloc((void**) &pAx_gpu, sizeof(double2)*gsize) );
}
if(!par.bval("Ay_time") && dimnum > 1){
cudaHandleError( cudaMalloc((void**) &pAy_gpu, sizeof(double2)*gsize) );
}
if(!par.bval("Az_time") && dimnum > 2){
cudaHandleError( cudaMalloc((void**) &pAz_gpu, sizeof(double2)*gsize) );
}
if (ev_type == 0){
cufftDoubleComplex *GK = par.cufftDoubleComplexval("GK");
cufftDoubleComplex *GV = par.cufftDoubleComplexval("GV");
cufftDoubleComplex *GpAx = par.cufftDoubleComplexval("GpAx");
cufftDoubleComplex *GpAy = nullptr;
cufftDoubleComplex *GpAz = nullptr;
if(!par.bval("K_time")){
cudaHandleError( cudaMemcpy(K_gpu, GK, sizeof(cufftDoubleComplex)*gsize,
cudaMemcpyHostToDevice) );
}
if(!par.bval("V_time")){
cudaHandleError( cudaMemcpy(V_gpu, GV, sizeof(cufftDoubleComplex)*gsize,
cudaMemcpyHostToDevice) );
}
if(!par.bval("Ax_time")){
cudaHandleError( cudaMemcpy(pAx_gpu, GpAx, sizeof(cufftDoubleComplex)*gsize,
cudaMemcpyHostToDevice) );
}
cudaHandleError( cudaMemcpy(wfc_gpu, wfc, sizeof(cufftDoubleComplex)*gsize,
cudaMemcpyHostToDevice) );
par.store("K_gpu", K_gpu);
par.store("V_gpu", V_gpu);
par.store("wfc_gpu", wfc_gpu);
par.store("pAy_gpu", pAy_gpu);
par.store("pAx_gpu", pAx_gpu);
// Special cases for 3d
if (dimnum > 1 && !par.bval("Ay_time")){
GpAy = par.cufftDoubleComplexval("GpAy");
cudaHandleError( cudaMemcpy(pAy_gpu, GpAy, sizeof(cufftDoubleComplex)*gsize,
cudaMemcpyHostToDevice) );
par.store("pAy_gpu", pAy_gpu);
}
if (dimnum > 2 && !par.bval("Az_time")){
GpAz = par.cufftDoubleComplexval("GpAz");
cudaHandleError( cudaMemcpy(pAz_gpu, GpAz, sizeof(cufftDoubleComplex)*gsize,
cudaMemcpyHostToDevice) );
par.store("pAz_gpu", pAz_gpu);
}
free(GV); free(GK); free(GpAy); free(GpAx); free(GpAz);
}
else if (ev_type == 1){
cufftDoubleComplex *EV = par.cufftDoubleComplexval("EV");
cufftDoubleComplex *EK = par.cufftDoubleComplexval("EK");
cufftDoubleComplex *EpAx = par.cufftDoubleComplexval("EpAx");
cufftDoubleComplex *EpAy = nullptr;
cufftDoubleComplex *EpAz = nullptr;
if (!par.bval("K_time")){
cudaHandleError( cudaMemcpy(K_gpu, EK, sizeof(cufftDoubleComplex)*gsize,
cudaMemcpyHostToDevice) );
par.store("K_gpu", K_gpu);
}
if(!par.bval("Ax_time")){
cudaHandleError( cudaMemcpy(pAx_gpu, EpAx, sizeof(cufftDoubleComplex)*gsize,
cudaMemcpyHostToDevice) );
par.store("pAx_gpu", pAx_gpu);
}
if (!par.bval("V_time")){
cudaHandleError( cudaMemcpy(V_gpu, EV, sizeof(cufftDoubleComplex)*gsize,
cudaMemcpyHostToDevice) );
par.store("V_gpu", V_gpu);
}
cudaHandleError( cudaMemcpy(wfc_gpu, wfc, sizeof(cufftDoubleComplex)*gsize,
cudaMemcpyHostToDevice) );
par.store("wfc_gpu", wfc_gpu);
// Special variables / instructions for 2/3d case
if (dimnum > 1 && !par.bval("Ay_time")){
EpAy = par.cufftDoubleComplexval("EpAy");
cudaHandleError( cudaMemcpy(pAy_gpu, EpAy, sizeof(cufftDoubleComplex)*gsize,
cudaMemcpyHostToDevice) );
par.store("pAy_gpu", pAy_gpu);
}
if (dimnum > 2 && !par.bval("Az_time")){
EpAz = par.cufftDoubleComplexval("EpAz");
cudaHandleError( cudaMemcpy(pAz_gpu, EpAz, sizeof(cufftDoubleComplex)*gsize,
cudaMemcpyHostToDevice) );
par.store("pAz_gpu", pAz_gpu);
}
free(EV);
free(EK);
free(EpAy);
free(EpAx);
free(EpAz);
}
}
int main(int argc, char **argv){
Grid par = parseArgs(argc,argv);
//Grid par2 = parseArgs(argc,argv);
int device = par.ival("device");
int dimnum = par.ival("dimnum");
cudaHandleError( cudaSetDevice(device) );
std::string buffer;
time_t start,fin;
time(&start);
printf("Start: %s\n", ctime(&start));
//************************************************************//
/*
* Initialise the Params data structure to track params and variables
*/
//************************************************************//
// If we want to read in a wfc, we may also need to imprint a phase. This
// will be done in the init_2d and init_3d functions
// We need a number of parameters for now
int xDim = par.ival("xDim");
int yDim = par.ival("yDim");
int zDim = par.ival("zDim");
if(par.bval("read_wfc") == true){
// Initializing the wfc
int gSize = xDim * yDim * zDim;
cufftDoubleComplex *wfc;
std::string infile = par.sval("infile");
std::string infilei = par.sval("infilei");
printf("Loading wavefunction...");
wfc=FileIO::readIn(infile,infilei,gSize);
par.store("wfc",wfc);
printf("Wavefunction loaded.\n");
//std::string data_dir = par.sval("data_dir");
//FileIO::writeOut(buffer, data_dir + "WFC_CHECK",wfc,gSize,0);
}
init(par);
int gsteps = par.ival("gsteps");
int esteps = par.ival("esteps");
std::string data_dir = par.sval("data_dir");
std::cout << "variables re-established" << '\n';
if (par.bval("write_file")){
FileIO::writeOutParam(buffer, par, data_dir + "Params.dat");
}
if(gsteps > 0){
std::cout << "Imaginary-time evolution started..." << '\n';
set_variables(par, 0);
evolve(par, gsteps, 0, buffer);
}
if(esteps > 0){
std::cout << "real-time evolution started..." << '\n';
set_variables(par, 1);
evolve(par, esteps, 1, buffer);
}
std::cout << "done evolving" << '\n';
time(&fin);
printf("Finish: %s\n", ctime(&fin));
printf("Total time: %ld seconds\n ",(long)fin-start);
std::cout << '\n';
return 0;
}
|
ba455ebfa2728b9bc5c78eca72b0fbea1ef9be4b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <stdexcept>
#include <vector>
#include <string>
#include <cmath>
#define __CUDA_INTERNAL_COMPILATION__
#include <math_functions.h>
#undef __CUDA_INTERNAL_COMPILATION__
//#include <hip/hip_runtime.h>
//#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <helper_functions.h>
#include <device_double_functions.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
typedef float2 Complex;
#include "const.h"
#include "cochlea_common.h"
#include "cochlea.cuh"
//#include <thrust\device_vector.h>
//#include <thrust\host_vector.h>
//#include <thrust\fill.h>
#ifdef CUDA_MEX_PROJECT
#include <mex.h>
#endif
// nvcc does not seem to like variadic macros, so we have to define
// one for each kernel parameter list:
#ifdef __HIPCC__
#define KERNEL_ARGS2(grid,hipLaunchKernelGGL(( block)) , dim3(grid), dim3(block) , 0, 0,
#define KERNEL_ARGS3grid, block,hipLaunchKernelGGL(( sh_mem)) , dim3(grid), dim3(block), sh_mem , 0,
#define KERNEL_ARGS4grid, block, sh_mem,hipLaunchKernelGGL(( stream)) , dim3(grid), dim3(block), sh_mem, stream ,
#else
#define KERNEL_ARGS2grid, block)
#define KERNEL_ARGS3(grid, block, sh_mem)
#define KERNEL_ARGS4(grid, block, sh_mem, stream)
#endif
#ifndef gpuAssert
#define gpuAssert( condition ) { if( (condition) != 0 ) { printf( "\n FAILURE %s in %s, line %d\n", hipGetErrorString(condition), __FILE__, __LINE__ ); throw std::runtime_error("GPU Failure aborts..."); } }
#endif
struct cudaHolderGeneratedData {
float *generated_model_max_m1_sp_tolerance;
float *generated_model_throw_tolerance;
float *generated_calculated_power_array; // in case of loaded from input boundary calculation write max power in dBSPL
int *generated_model_out_sample_index;
int *generated_model_end_sample_index;
int generated_sections = 0;
void allocateGenerated() {
if (generated_sections == 0) {
generated_sections = MAX_NUMBER_OF_BLOCKS;
int blocks_pointers_size = generated_sections * sizeof(float);
int blocks_pointers_size_int = generated_sections * sizeof(int);
// generated array for blocks thresholds
gpuAssert(hipMalloc((void **)&generated_calculated_power_array, blocks_pointers_size));
gpuAssert(hipMalloc((void **)&generated_model_max_m1_sp_tolerance, blocks_pointers_size));
gpuAssert(hipMalloc((void **)&generated_model_throw_tolerance, blocks_pointers_size));
gpuAssert(hipMalloc((void **)&generated_model_out_sample_index, blocks_pointers_size_int));
gpuAssert(hipMalloc((void **)&generated_model_end_sample_index, blocks_pointers_size_int));
}
}
void releaseGenerated() {
if (generated_sections > 0) {
generated_sections = 0;
// generated array for blocks thresholds
gpuAssert(hipFree(generated_calculated_power_array));
generated_calculated_power_array = NULL;
gpuAssert(hipFree(generated_model_max_m1_sp_tolerance));
generated_model_max_m1_sp_tolerance = NULL;
gpuAssert(hipFree(generated_model_throw_tolerance));
generated_model_throw_tolerance = NULL;
gpuAssert(hipFree(generated_model_end_sample_index));
generated_model_end_sample_index = NULL;
gpuAssert(hipFree(generated_model_out_sample_index));
generated_model_out_sample_index = NULL;
}
}
} cudaHolderGeneratedData;
struct cudaModelAihc {
int aihc_loaded = 0;
int is_loaded() { return aihc_loaded; }
void loadAihc(float *Aihc) {
if (!is_loaded()) {
gpuAssert(hipMemcpyToSymbol(model_Aihc, Aihc, SECTIONS*LAMBDA_COUNT * sizeof(float),0,hipMemcpyHostToDevice));
aihc_loaded = 1;
}
}
void enableLoadAihc() {
aihc_loaded = 0;
}
} cudaModelAihc;
struct cudaHolderData {
int cochlear_parametrs_initialized = 0;
int cochlea_sections;
float *cuda_input_samples;
float *cuda_saved_speeds;
float *cuda_Rd;
float *cuda_Sd;
float *cuda_Qd;
float *cuda_Yd;
float *cuda_gammad;
float *converge_speed;
float *converge_speed_blocks;
float *cuda_massd;
float *cuda_Md;
float *cuda_Ud;
float *cuda_Ld;
float *cuda_S_ohcd;
float *cuda_S_tmd;
float *cuda_R_tmd;
int *time_filter_params;
int last_saved_nodes_per_time_block_for_cuda;
int *cuda_Failed_Converged_Time_Node;
int *cuda_Failed_Converged_Blocks;
float *cuda_Converged_Time_Node;
float *cuda_Converged_Blocks;
float *cuda_convergence_jacoby_loops_per_iteration;
float *cuda_convergence_jacoby_loops_per_iteration_blocks;
size_t numBlocks_data = 0;
size_t inputBufferNodes_data = 0;
size_t resultBufferNodes_data = 0;
void allocateCochlearData(const int& Sections) {
if (cochlear_parametrs_initialized == 0) {
cochlea_sections = Sections;
cochlear_parametrs_initialized = 1;
int cochlear_allocated = cochlea_sections * sizeof(float);
// cuda Rd,Sd,Qd,Yd
gpuAssert(hipMalloc((void **)&cuda_Rd, cochlear_allocated));
gpuAssert(hipMalloc((void **)&cuda_Sd, cochlear_allocated));
gpuAssert(hipMalloc((void **)&cuda_Qd, cochlear_allocated));
gpuAssert(hipMalloc((void **)&cuda_Yd, cochlear_allocated));
// cuda S_ohcd,S_tmd,gammad,R_tmd
gpuAssert(hipMalloc((void **)&cuda_S_ohcd, cochlear_allocated));
gpuAssert(hipMalloc((void **)&cuda_S_tmd, cochlear_allocated));
gpuAssert(hipMalloc((void **)&cuda_gammad, cochlear_allocated));
gpuAssert(hipMalloc((void **)&cuda_R_tmd, cochlear_allocated));
// cuda massd,Md,Ud,Ld
gpuAssert(hipMalloc((void **)&cuda_massd, cochlear_allocated));
gpuAssert(hipMalloc((void **)&cuda_Md, cochlear_allocated));
gpuAssert(hipMalloc((void **)&cuda_Ud, cochlear_allocated));
gpuAssert(hipMalloc((void **)&cuda_Ld, cochlear_allocated));
}
}
void releaseCochlearData() {
if (cochlear_parametrs_initialized == 1) {
// cuda Rd,Sd,Qd,Yd
gpuAssert(hipFree(cuda_Rd));
cuda_Rd = NULL;
gpuAssert(hipFree(cuda_Sd));
cuda_Sd = NULL;
gpuAssert(hipFree(cuda_Qd));
cuda_Qd = NULL;
gpuAssert(hipFree(cuda_Yd));
cuda_Yd = NULL;
// cuda S_ohcd,S_tmd,gammad,R_tmd
gpuAssert(hipFree(cuda_S_ohcd));
cuda_S_ohcd = NULL;
gpuAssert(hipFree(cuda_S_tmd));
cuda_S_tmd = NULL;
gpuAssert(hipFree(cuda_gammad));
cuda_gammad = NULL;
gpuAssert(hipFree(cuda_R_tmd));
cuda_R_tmd = NULL;
// cuda massd,Md,Ud,Ld
gpuAssert(hipFree(cuda_massd));
cuda_massd = NULL;
gpuAssert(hipFree(cuda_Md));
cuda_Md = NULL;
gpuAssert(hipFree(cuda_Ud));
cuda_Ud = NULL;
gpuAssert(hipFree(cuda_Ld));
cuda_Ld = NULL;
cochlear_parametrs_initialized = 0;
}
}
void loadCochlearData(float *S_ohc, float *S_tm, float *gamma, float *R_tm, float *mass, float *M, float *U, float *L, float *R, float *S,float *Q ) {
if (cochlear_parametrs_initialized == 1) {
int cochlea_allocated = cochlea_sections * sizeof(float);
gpuAssert(hipMemcpy(cuda_S_ohcd, S_ohc, cochlea_allocated, hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cuda_S_tmd, S_tm, cochlea_allocated, hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cuda_gammad, gamma, cochlea_allocated, hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cuda_R_tmd, R_tm, cochlea_allocated, hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cuda_massd, mass, cochlea_allocated, hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cuda_Md, M, cochlea_allocated, hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cuda_Ud, U, cochlea_allocated, hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cuda_Ld, L, cochlea_allocated, hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cuda_Rd, R, cochlea_allocated, hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cuda_Sd, S, cochlea_allocated, hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cuda_Qd, Q, cochlea_allocated, hipMemcpyHostToDevice));
}
}
int isInputMemorySufficent(size_t test_nodes) { return inputBufferNodes_data >= test_nodes; }
int isInputMemoryAllocated() { return isInputMemorySufficent(1); }
int isOutputMemorySufficent(size_t test_nodes) { return resultBufferNodes_data >= test_nodes; }
int isOutputMemoryAllocated() { return isOutputMemorySufficent(1); }
int isBlocksMemorySufficent(size_t test_nodes) { return numBlocks_data >= test_nodes; }
int isBlocksMemoryAllocated() { return isBlocksMemorySufficent(1); }
void allocateOHCIOData(const size_t& inputBufferNodes,const size_t& resultBufferNodes) {
if (!isInputMemorySufficent(inputBufferNodes)) {
releaseInputData();
size_t tsizep = inputBufferNodes * sizeof(float);
size_t isizep = inputBufferNodes * sizeof(int);
inputBufferNodes_data = inputBufferNodes;
// input samples and results
gpuAssert(hipMalloc((void **)&cuda_input_samples, tsizep));
gpuAssert(hipMalloc((void **)&cuda_Failed_Converged_Time_Node, isizep));
gpuAssert(hipMalloc((void **)&cuda_Converged_Time_Node, tsizep));
gpuAssert(hipMalloc((void **)&cuda_convergence_jacoby_loops_per_iteration, tsizep));
gpuAssert(hipMalloc((void **)&converge_speed, tsizep));
}
if (!isOutputMemorySufficent(resultBufferNodes)) {
releaseOutputData();
size_t ssizep = resultBufferNodes * sizeof(float);
resultBufferNodes_data = resultBufferNodes;
printf("allocated %d bytes for BM velocity\n", ssizep);
gpuAssert(hipMalloc((void **)&cuda_saved_speeds, ssizep));
}
}
void releaseInputData() {
if (isInputMemoryAllocated()) {
inputBufferNodes_data = 0;
gpuAssert(hipFree(cuda_input_samples));
cuda_input_samples = NULL;
gpuAssert(hipFree(cuda_Failed_Converged_Time_Node));
gpuAssert(hipFree(converge_speed));
gpuAssert(hipFree(cuda_Converged_Time_Node));
gpuAssert(hipFree(cuda_convergence_jacoby_loops_per_iteration));
cuda_convergence_jacoby_loops_per_iteration = NULL;
cuda_Failed_Converged_Time_Node = NULL;
cuda_Converged_Time_Node = NULL;
converge_speed = NULL;
}
}
void releaseOutputData() {
if (isOutputMemoryAllocated()) {
resultBufferNodes_data = 0;
gpuAssert(hipFree(cuda_saved_speeds));
cuda_saved_speeds = NULL;
}
}
void releaseOHCIOData() {
// input samples and results
releaseInputData();
releaseOutputData();
}
void allocateBlocksConverganceArray(size_t nodes) {
if (!isBlocksMemorySufficent(nodes)) {
releaseBlocksConverganceArray();
numBlocks_data = nodes;
gpuAssert(hipMalloc((void **)&cuda_Failed_Converged_Blocks, numBlocks_data*sizeof(int)));
gpuAssert(hipMalloc((void **)&converge_speed_blocks, numBlocks_data * sizeof(float)));
gpuAssert(hipMalloc((void **)&cuda_Converged_Blocks, numBlocks_data * sizeof(float)));
gpuAssert(hipMalloc((void **)&cuda_convergence_jacoby_loops_per_iteration_blocks, numBlocks_data * sizeof(float)));
//converge_speed = thr
}
}
void releaseBlocksConverganceArray() {
if (isBlocksMemoryAllocated()) {
gpuAssert(hipFree(cuda_Failed_Converged_Blocks));
cuda_Failed_Converged_Blocks = NULL;
gpuAssert(hipFree(converge_speed_blocks));
gpuAssert(hipFree(cuda_Converged_Blocks));
gpuAssert(hipFree(cuda_convergence_jacoby_loops_per_iteration_blocks));
cuda_Converged_Blocks = NULL;
converge_speed_blocks = NULL;
cuda_convergence_jacoby_loops_per_iteration_blocks = NULL;
numBlocks_data = 0;
}
}
} cudaHolderData;
struct cudaLambdaHolderData {
// cuda IHC constant size data
double *cuda_nIHC = NULL;
int cochlea_sections = SECTIONS;
int allocatedIHCDataVar = 0;
inline int isIHCDataAllocated() { return allocatedIHCDataVar; }
void allocateIHCData();
void releaseIHCData();
size_t allocatedLambdaNodes = 0; // to know if data need, allocated or reallocated
inline size_t isLambdaMemorySufficent(size_t test_nodes) { return allocatedLambdaNodes >= LAMBDA_COUNT*test_nodes; }
inline size_t isLambdaMemoryAllocated() { return isLambdaMemorySufficent(1); }
size_t allocatedBufferNodes = 0; // to know if data need, allocated or reallocated
inline int isBufferMemorySufficent(size_t test_nodes) { return allocatedBufferNodes >= test_nodes; }
inline int isBufferMemoryAllocated() { return isBufferMemorySufficent(1); }
JNDFloat *cuda_JND_Lambda = NULL; // will use as buffer too for memory conversions purposes
JNDFloat *cuda_Buffer1 = NULL;
// nodes count for single buffer, for full lambda count multiple by LAMBDA constant
void allocateLambdaMemory(size_t nodes);
void releaseLambdaMemory();
// nodes count for single buffer, for full lambda count multiple by LAMBDA constant
void allocateBufferMemory(size_t nodes);
void releaseBufferMemory();
} cudaLambdaHolderData;
void cudaLambdaHolderData::releaseIHCData() {
if (isIHCDataAllocated()) {
gpuAssert(hipFree(cuda_nIHC));
cuda_nIHC = NULL;
allocatedIHCDataVar = 0;
}
}
void cudaLambdaHolderData::allocateIHCData() {
if (!isIHCDataAllocated()) {
gpuAssert(hipMalloc((void **)&cuda_nIHC, cochlea_sections * sizeof(double)));
allocatedIHCDataVar = 1;
}
}
// nodes count for single buffer, for full lambda count multiple by LAMBDA constant
void cudaLambdaHolderData::allocateLambdaMemory(size_t nodes) {
if (!isLambdaMemorySufficent(nodes)) {
releaseLambdaMemory();
allocatedLambdaNodes = nodes*LAMBDA_COUNT; // now its sufficent
size_t lambda_memory_size_in_bytes = allocatedLambdaNodes * sizeof(JNDFloat);
printf("allocated %lu bytes for lambda nodes\n", lambda_memory_size_in_bytes);
gpuAssert(hipMalloc((void **)&cuda_JND_Lambda, lambda_memory_size_in_bytes));
}
}
void cudaLambdaHolderData::releaseLambdaMemory() {
if (isLambdaMemoryAllocated()) {
gpuAssert(hipFree(cuda_JND_Lambda));
cuda_JND_Lambda = NULL;
allocatedLambdaNodes = 0;
}
}
// nodes count for single buffer, for full lambda count multiple by LAMBDA constant
void cudaLambdaHolderData::allocateBufferMemory(size_t nodes) {
if (!isBufferMemorySufficent(nodes)) {
releaseBufferMemory();
allocatedBufferNodes = nodes; // now its sufficent
size_t lambda_memory_size_in_bytes = allocatedBufferNodes * sizeof(JNDFloat);
printf("allocated %lu bytes for buffer nodes\n", lambda_memory_size_in_bytes);
gpuAssert(hipMalloc((void **)&cuda_Buffer1, lambda_memory_size_in_bytes));
}
}
void cudaLambdaHolderData::releaseBufferMemory() {
if (isBufferMemoryAllocated()) {
gpuAssert(hipFree(cuda_Buffer1));
cuda_Buffer1 = NULL;
allocatedBufferNodes = 0;
}
}
extern "C" JNDFloat *getCudaBuffer() {
return cudaLambdaHolderData.cuda_Buffer1;
}
extern "C" JNDFloat *getCudaLambda() {
return cudaLambdaHolderData.cuda_JND_Lambda;
}
extern "C" int *getCudaFailedTimeNodes() {
return cudaHolderData.cuda_Failed_Converged_Time_Node;
}
extern "C" int *getCudaFailedBlocks() {
return cudaHolderData.cuda_Failed_Converged_Blocks;
}
extern "C" float *getCudaConvergedTimeNodes() {
return cudaHolderData.cuda_Converged_Time_Node;
}
extern "C" float *getCudaConvergedJacobyLoopsPerIteration() {
return cudaHolderData.cuda_convergence_jacoby_loops_per_iteration;
}
extern "C" float *getCudaConvergedJacobyLoopsPerIterationBlocks() {
return cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks;
}
extern "C" float *getCudaConvergedBlocks() {
return cudaHolderData.cuda_Converged_Blocks;
}
extern "C" void loadAihc(float *Aihc) noexcept(false) {
cudaModelAihc.loadAihc(Aihc);
}
extern "C" void enableloadAihc() noexcept(false) {
cudaModelAihc.enableLoadAihc();
}
extern "C" void extractConvergenceTimes(float *convergence_times, size_t nodes) {
gpuAssert(hipMemcpy(convergence_times, cudaHolderData.converge_speed, nodes * sizeof(float), hipMemcpyDeviceToHost));
}
int *host_params_time_filter,*host_time_filters;
struct cudaJNDHolder {
//JNDFloat *cuda_dLambda = NULL;
JNDFloat *cuda_MeanRate = NULL;
/**
* pointer to parameters array of structures on the host
*/
device_params *host_local_param;
/**
* pointer to parameters array of structures on global device memory
*/
device_params *global_device_params;
vectors_sum_linear_coefficents *vectors_sums_coefficents;
int cuda_device_jnd_structs_allocated = 0;
int isDeviceStructsAllocated() { return cuda_device_jnd_structs_allocated; }
void ReleaseDeviceStructs() {
if (isDeviceStructsAllocated()) {
cuda_device_jnd_structs_allocated = 0;
delete[](host_local_param);
gpuAssert(hipFree(global_device_params));
global_device_params = NULL;
gpuAssert(hipFree(vectors_sums_coefficents));
vectors_sums_coefficents = NULL;
}
}
void allocateDeviceStructs() {
if (!isDeviceStructsAllocated()) {
gpuAssert(hipMalloc((void **)&vectors_sums_coefficents, 2 * sizeof(vectors_sum_linear_coefficents)));
gpuAssert(hipMalloc((void **)&global_device_params, 2 * sizeof(device_params))); // just one set filter includes its own size
host_local_param = new device_params[2];
cuda_device_jnd_structs_allocated = 1;
}
}
size_t cuda_jnd_intervals_num = 0;
int isSufficentIntervalAllocated(size_t nodes) { return cuda_jnd_intervals_num >= nodes; }
int isIntervalsAllocated() { return isSufficentIntervalAllocated(1); }
device_jnd_params *cuda_jnd_params = NULL;
int *cuda_JND_Serial_Intervals_Positions = NULL;
int *cuda_JND_Interval_To_Reference = NULL;
int *cuda_JND_Calculated_Intervals = NULL;
int *cuda_JND_Refrence_Intervals = NULL;
void releaseIntervals() {
if (isIntervalsAllocated()) {
printf("clearing %d params\n", cuda_jnd_intervals_num);
gpuAssert(hipFree(cuda_JND_Serial_Intervals_Positions));
gpuAssert(hipFree(cuda_JND_Interval_To_Reference));
gpuAssert(hipFree(cuda_JND_Calculated_Intervals));
gpuAssert(hipFree(cuda_JND_Refrence_Intervals));
gpuAssert(hipFree(cuda_jnd_params));
cuda_JND_Serial_Intervals_Positions = NULL;
cuda_JND_Interval_To_Reference = NULL;
cuda_JND_Calculated_Intervals = NULL;
cuda_JND_Refrence_Intervals = NULL;
cuda_jnd_params = NULL;
cuda_jnd_intervals_num = 0;
}
}
void allocateIntervals(int intervals_num) {
if (!isSufficentIntervalAllocated(intervals_num)) {
releaseIntervals();
cuda_jnd_intervals_num = intervals_num;
size_t jndRefrencesSizeInBytes = cuda_jnd_intervals_num * sizeof(device_jnd_params);
size_t dA_size_in_bytes = cuda_jnd_intervals_num * sizeof(device_jnd_params);
gpuAssert(hipMalloc((void **)&cuda_jnd_params, dA_size_in_bytes));
gpuAssert(hipMalloc((void **)&cuda_JND_Serial_Intervals_Positions, jndRefrencesSizeInBytes));
gpuAssert(hipMalloc((void **)&cuda_JND_Interval_To_Reference, jndRefrencesSizeInBytes));
gpuAssert(hipMalloc((void **)&cuda_JND_Calculated_Intervals, jndRefrencesSizeInBytes));
gpuAssert(hipMalloc((void **)&cuda_JND_Refrence_Intervals, jndRefrencesSizeInBytes));
}
}
JNDFloat *cuda_FisherAISum = NULL;
JNDFloat *cuda_F_RA = NULL;
size_t cuda_fisher_size = 0;
int isSufficentFisherNodesAllocated(size_t nodes) { return cuda_fisher_size >= nodes; }
int isFisherNodesAllocated() { return isSufficentFisherNodesAllocated(1); }
void allocateFisherNodes(size_t nodes);
void releaseFisherNodes();
size_t cuda_mean_nodes = 0;
int isSufficentMeanNodesAllocated(size_t nodes) { return cuda_mean_nodes >= nodes; }
int isMeanNodesAllocated() { return isSufficentMeanNodesAllocated(1); }
void allocateMeanNodes(size_t nodes);
void releaseMeanNodes();
} cudaJNDHolder;
void cudaJNDHolder::releaseFisherNodes() {
if (isFisherNodesAllocated()) {
printf("clearing %d fisher nodes\n", cuda_fisher_size);
gpuAssert(hipFree(cuda_FisherAISum));
gpuAssert(hipFree(cuda_F_RA));
cuda_FisherAISum = NULL;
cuda_F_RA = NULL;
cuda_fisher_size = 0;
}
}
void cudaJNDHolder::allocateFisherNodes(size_t nodes) {
if (!isSufficentFisherNodesAllocated(nodes)) {
releaseFisherNodes();
cuda_fisher_size = nodes;
size_t fisher_size_in_bytes = cuda_fisher_size * sizeof(JNDFloat);
gpuAssert(hipMalloc((void **)&cuda_FisherAISum, fisher_size_in_bytes));
//gpuAssert(hipMalloc((void **)&cuda_AvgMeanRate, fisher_size_in_bytes));
gpuAssert(hipMalloc((void **)&cuda_F_RA, fisher_size_in_bytes));
}
}
void cudaJNDHolder::releaseMeanNodes() {
if (isMeanNodesAllocated()) {
printf("clearing %d mean nodes\n", cuda_mean_nodes);
gpuAssert(hipFree(cuda_MeanRate));
cuda_MeanRate = NULL;
cuda_mean_nodes = 0;
}
}
void cudaJNDHolder::allocateMeanNodes(size_t nodes) {
if (!isSufficentMeanNodesAllocated(nodes)) {
releaseMeanNodes();
cuda_mean_nodes = nodes;
size_t mean_size_in_bytes = cuda_mean_nodes * sizeof(JNDFloat);
gpuAssert(hipMalloc((void **)&cuda_MeanRate, mean_size_in_bytes));
}
}
extern "C" JNDFloat *getCudaMeanRate() {
return cudaJNDHolder.cuda_MeanRate;
}
struct cudaSignalHolder {
size_t cuda_signal_nodes = 0;
float *cuda_WN = NULL; // white noise for input generation
float *cuda_Signal = NULL;
int isSufficentSignalNodesAllocated(size_t nodes) { return cuda_signal_nodes >= nodes; }
int isSignalNodesAllocated() { return isSufficentSignalNodesAllocated(1); }
void allocateSignalNodes(int nodes) {
if (!isSufficentSignalNodesAllocated(nodes)) {
releaseSignalNodes();
cuda_signal_nodes = nodes;
size_t wn_length_bytes = cuda_signal_nodes * sizeof(float);
gpuAssert(hipMalloc((void **)&cuda_WN, wn_length_bytes));
gpuAssert(hipMalloc((void **)&cuda_Signal, wn_length_bytes));
}
}
void releaseSignalNodes() {
if (isSignalNodesAllocated()) {
cuda_signal_nodes = 0;
}
}
} cudaSignalHolder;
/****
* cochlea cu global variables
*
*
**/
//float *deviceBackupSpeeds; // save backup speeds from previous ihc run on the device
float *BM_host;
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
// More careful checking. However, this will affect performance.
// Comment away if needed.
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
printf("cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
/**
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );*/
throw std::runtime_error("Cuda check pre device synchronize failed");
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
/**
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
*/
printf("cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
throw std::runtime_error("Cuda check post device synchronize failed");
}
#endif
return;
}
inline void __cudaClearError( const char *file, const int line )
{
// More careful checking. However, this will affect performance.
// Comment away if needed.
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
/**
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );*/
printf("cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
}
return;
}
std::string showDIM3(dim3 d3) {
std::stringstream ss;
ss << "(" << d3.x << ", " << d3.y << "," << d3.z << ")";
return ss.str();
}
extern "C" void cudaEventsCreate(hipEvent_t& start, hipEvent_t& stop, int condition) noexcept(false) {
if (condition) {
gpuAssert(hipEventCreate(&start));
gpuAssert(hipEventCreate(&stop));
}
}
extern "C" void cudaEventsStartTimer(hipEvent_t& start, hipEvent_t& stop, int condition) noexcept(false) {
if (condition) {
gpuAssert(hipEventRecord(start));
}
}
extern "C" void viewGPUStatus(int flags, const std::string& prefix) noexcept(false) {
if (flags & 16) {
size_t free_memory;
size_t total_memory;
gpuAssert(hipMemGetInfo(&free_memory, &total_memory));
printf("%s : GPU Memory, Free(%d MB) / Total(%d MB)\n",prefix.c_str(), static_cast<int>((free_memory / (1024 * 1024))), static_cast<int>(total_memory / (1024 * 1024)));
}
}
extern "C" void cudaEventsStopTimer(hipEvent_t& start, hipEvent_t& stop, int condition) noexcept(false) {
if (condition) {
gpuAssert(hipEventRecord(stop));
}
}
extern "C" float *getCudaConvergeSpeedBlocks() {
return cudaHolderData.converge_speed_blocks;
}
extern "C" float cudaEventsStopQueryTimer(hipEvent_t& start, hipEvent_t& stop, int condition, const std::string& prefix) noexcept(false) {
cudaEventsStopTimer(start, stop, condition);
float milliseconds = 0.0f;
if (condition) {
gpuAssert(hipEventSynchronize(stop));
gpuAssert(hipEventElapsedTime(&milliseconds, start, stop));
printf("%s : %.2f (msec) \n", prefix.c_str(), milliseconds);
}
return milliseconds;
}
template void GeneralKernel_Copy_Results_Template<double>(double *target, double *src, size_t size);
template void GeneralKernel_Copy_Results_Template<float>(float *target, float *src, size_t size);
template void GeneralKernel_Copy_Results_Template<int>(int *target, int *src, size_t size);
template void GeneralKernel_Copy_Results_Template<double>(double *target, double *src, size_t size, size_t offset);
template void GeneralKernel_Copy_Results_Template<float>(float *target, float *src, size_t size, size_t offset);
template void GeneralKernel_Copy_Results_Template<int>(int *target, int *src, size_t size, size_t offset);
template void ReverseKernel_Copy_Results_Template<float>(float *cpu_src, float *cuda_target, size_t start_time_node, size_t time_nodes, int sections);
template void ReverseKernel_Copy_Results_Template<double>(double *cpu_src, double *cuda_target, size_t start_time_node, size_t time_nodes, int sections);
template void updateCUDALambdaArray<float>(float *lambda_array, float *cuda_buffer, size_t calc_time_nodes, int sections, int Show_Run_Time, int Show_Device_Data, int cuda_buffer_update, Log &outer_log);
template void updateCUDALambdaArray<double>(double *lambda_array,double *cuda_buffer, size_t calc_time_nodes, int sections, int Show_Run_Time, int Show_Device_Data, int cuda_buffer_update,Log &outer_log);
extern "C" void BMOHCKernel_Init(
float *gamma,
float *mass,
float *M,
float *U,
float *L,
float *R,
float *S,
float *Q,
float *S_ohc,
float *S_tm,
float *R_tm,
float num_frequencies,
float dbA,
size_t inputBufferNodes,
size_t resultBufferNodes,
size_t lambdaBufferNodes,
bool first_time,
int Show_Run_Time,
int Show_Device_Data,
Log &outer_log
){
hipEvent_t start, stop;
size_t ssizep = resultBufferNodes*sizeof(float);
cudaEventsCreate(start, stop, Show_Run_Time & 2);
cudaEventsStartTimer(start, stop, Show_Run_Time & 2);
if ( first_time ) {
if (Show_Device_Data & 8) {
printf("Saved speeeds allocated size = (%d KB), %d Nodes\n", (ssizep / 1024), (ssizep / 256));
}
} // end of first time memory allocations
cudaHolderGeneratedData.allocateGenerated();
cudaHolderData.allocateOHCIOData(inputBufferNodes, resultBufferNodes);
//printf("memory uploads lambda program arrays\n");
cudaHolderData.allocateCochlearData(SECTIONS);
cudaHolderData.loadCochlearData(S_ohc, S_tm, gamma, R_tm, mass, M, U, L, R, S, Q);
//printf("cuda malloc fisher program arrays\n");
outer_log.timeAtFlag(0,cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 2,"Initialize and allocate Memory for BM run"), Show_Run_Time & 2);
}
extern "C" void BMOHCKernel_Wait_Threads() noexcept(false)
{
gpuAssert(hipDeviceSynchronize());
}
extern "C" void BMOHCKernel_Copy_Results(float *target, size_t resultNodes, size_t offset) noexcept(false) {
size_t ssize = resultNodes*sizeof(float);
gpuAssert(hipMemcpy((void *)(target), cudaHolderData.cuda_saved_speeds+offset, ssize, hipMemcpyDeviceToHost));
}
extern "C" void ReverseKernel_Copy_Results(float *src, size_t size) noexcept(false) {
gpuAssert(hipMemcpy((void *)cudaHolderData.cuda_saved_speeds,src, size*sizeof(float), hipMemcpyHostToDevice));
}
extern "C" void BMOHCKernel_Copy_Lambda(JNDFloat *target, size_t lambdaNodes, int offset) noexcept(false) {
size_t lsizep = lambdaNodes*sizeof(JNDFloat);
gpuAssert(hipMemcpy((void *)(target), cudaLambdaHolderData.cuda_Buffer1+offset, lsizep, hipMemcpyDeviceToHost));
}
extern "C" void BMOHCKernel_Free(
) noexcept(false) {
cudaHolderData.releaseCochlearData();
cudaHolderGeneratedData.releaseGenerated();
cudaHolderData.releaseOHCIOData();
cudaHolderData.releaseBlocksConverganceArray();
}
// non adjusted version for relative error parmeters as 0
__global__ void CudaCalculateThresholdBoundariesForNonGeneratedInputSimple(
float *m1_sp_maximum,
float *tolerance_maximum
) {
m1_sp_maximum[threadIdx.x] = model_constants[23];
tolerance_maximum[threadIdx.x] = model_constants[24];
__syncthreads();
}
// calculate threshold boundaries based on read input from file
__global__ void CudaCalculateThresholdBoundariesForNonGeneratedInput(
float *input_samples,
float *m1_sp_maximum,
float *tolerance_maximum,
float *power_calculated_array
) {
__shared__ float blockMaximum[1024];
//__shared__ float loader[1024];
int start_input = model_constants_integers[8] * blockIdx.x;
int bdim = blockDim.x;
float load_data = input_samples[start_input + threadIdx.x];
load_data = abs(load_data);
blockMaximum[threadIdx.x] = load_data;
for (int t_i = (bdim >> 1); t_i >= 1; t_i >>= 1) {
__syncthreads();
if (threadIdx.x < t_i ) {
blockMaximum[threadIdx.x] = fmax(blockMaximum[threadIdx.x], blockMaximum[threadIdx.x + t_i]);
}
}
__syncthreads();
if (threadIdx.x ==0) {
// calculate thresholds
// first calculate power relative to SPLRef
float power_calculated = model_constants[34] * blockMaximum[0];
power_calculated_array[blockIdx.x] = power_calculated;
}
__syncthreads();
}
// calculate threshold boundaries based on read input from file
__global__ void CudaCalculateThresholdBoundariesForNonGeneratedInputBlocked(
volatile float *input_samples,
volatile float *power_calculated_array,
int block_size,
float rSPLRefVal,
float M1_SP_Fix_Factor,
float Tolerance_Fix_Factor,
float Max_M1_SP_Error_Parameter,
float Max_Tolerance_Parameter,
int *gen_model_end_sample_index
) {
//__shared__ float loader[1024];
int start_input = block_size * threadIdx.x;
int end_output = gen_model_end_sample_index[threadIdx.x];
//int end_output = (model_constants_integers[8]+1) * threadIdx.x;
// determine max output for boundaries conditions
float load_data = 0.0f;
load_data = input_samples[start_input];
for (int t_i = start_input; t_i <end_output; t_i++) {
load_data = fmax(load_data, input_samples[t_i]);
}
// calculate thresholds
// first calculate power relative to SPLRef
//float power_calculated = rSPLRefVal * load_data;
power_calculated_array[threadIdx.x] = rSPLRefVal * load_data;
__syncthreads();
}
extern "C" void calculateBoundariesForNonGeneratedInput(
int Relative_Error_Parameters,
int max_block_length,
int Show_Calculated_Power,
float M1_SP_Fix_Factor,
float Tolerance_Fix_Factor,
float Max_M1_SP_Error_Parameter,
float Max_Tolerance_Parameter,
float rSPLRefVal,
int block_size,
dim3 inputBlockDivision
) noexcept(false) {
dim3 singleton(1, 1, 1);
if (Relative_Error_Parameters == 0) {
printf("Calculating simple configuration for %d blocks\n", inputBlockDivision.x);
CudaCalculateThresholdBoundariesForNonGeneratedInputSimple KERNEL_ARGS2(singleton, inputBlockDivision)(cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance, cudaHolderGeneratedData.generated_model_throw_tolerance);
} else {
float host_generated_m1_sp_array[MAX_NUMBER_OF_BLOCKS];
float host_generated_tolerance_array[MAX_NUMBER_OF_BLOCKS];
float host_generated_calculated_power_array[MAX_NUMBER_OF_BLOCKS];
int threadsNumber = min(max_block_length, 1024);
if (threadsNumber < 1024) {
threadsNumber = static_cast<int>(powf(2.0f, floor(log2f(static_cast<float>(threadsNumber)))));
}
dim3 threadsDivision(threadsNumber, 1, 1);
printf("Calculating alternate complex configuration size of %d\n", inputBlockDivision.x); // " blocks divided to threads at " << showDIM3(threadsDivision) << std::endl;
CudaCalculateThresholdBoundariesForNonGeneratedInput KERNEL_ARGS2(inputBlockDivision, threadsDivision)(cudaHolderData.cuda_input_samples, cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance, cudaHolderGeneratedData.generated_model_throw_tolerance, cudaHolderGeneratedData.generated_calculated_power_array);
gpuAssert(hipMemcpy(host_generated_calculated_power_array, cudaHolderGeneratedData.generated_calculated_power_array, static_cast<int>(inputBlockDivision.x)*sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < static_cast<int>(inputBlockDivision.x); i++) {
float power_calculated = host_generated_calculated_power_array[i] > 0.0f ? 20 * log10f(0.1f*host_generated_calculated_power_array[i]) : 0.0f;
host_generated_m1_sp_array[i] = Max_M1_SP_Error_Parameter*powf(10.0f, M1_SP_Fix_Factor * power_calculated);
host_generated_tolerance_array[i] = Max_Tolerance_Parameter*powf(10.0f, Tolerance_Fix_Factor * power_calculated);
if (Show_Calculated_Power&1) {
printf("generated_calculated_power_array[%d]=%.3e\n",i, power_calculated);
printf("generated_m1_sp_array[%d]=%.3e\n", i, host_generated_m1_sp_array[i]);
printf("generated_tolerance_array[%d]=%.3e\n", i, host_generated_tolerance_array[i]);
}
}
}
}
/////////////////////////////////////////////////
void cudaOccupancyIndicator(int blockSize, const void *MyKernel, hipDeviceProp_t &deviceProp) {
int maxActiveBlocks;
gpuAssert(hipDeviceSynchronize());
gpuAssert(hipOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks,
MyKernel, blockSize,
0));
float occupancy = (maxActiveBlocks * blockSize / deviceProp.warpSize) /
(float)(deviceProp.maxThreadsPerMultiProcessor /
deviceProp.warpSize);
printf("Launched blocks of size %d. Theoretical occupancy: %f,maxActive Blocks: %d\n",
blockSize, occupancy, maxActiveBlocks);
/*
std::cout << "activeWarps: " << activeWarps << std::endl
<< "maxWarps: " << maxWarps << std::endl
<< "GPU Blocks processing capability of BM calculations (" << run_mode_name << ") is : " << (deviceProp.multiProcessorCount*numBlocks) << std::endl
<< "current num blocks: " << grid.x << std::endl
<< "Occupancy: " << (double)activeWarps / maxWarps * 100 << "%" << std::endl;
*/
}
extern "C" void BMOHCNewKernel(
float *input_samples,
bool override_input_samples, // true if input generated, will not upload
float w_ohc,
float time_step,
float time_step_out,
float delta_x,
float alpha_r,
int enable_psi,
int enable_OW,
int base_index,
float Ts,
float _ohc_alpha_l,
float _ohc_alpha_s,
float model_Gme,
float model_a0,
float model_a1,
float model_a2,
float sigma_ow,
float eta_1,
float eta_2,
float *tres,
int Time_Blocks,
int samplesBufferLengthP1,
int overlap_nodes_for_block,
long overlapTimeMicroSec,
int show_transient, // always 1 and will be ignored than
float cuda_max_time_step,
float cuda_min_time_step,
int Decouple_Filter,
float Max_M1_SP_Error_Parameter,
float Max_Tolerance_Parameter,
int Relative_Error_Parameters,
int calculate_boundary_conditions, // if true will calculate max tolerance and max m1 sp error from input, should be used if input is not generated within the program
float M1_SP_Fix_Factor,
float Tolerance_Fix_Factor,
float SPLREfVal,
int Show_Calculated_Power,
int Show_Device_Data,
int Show_Run_Time,
int JACOBBY_Loops_Fast, // number of jcoby loops to perform on fast approximation
int JACOBBY_Loops_Slow, // number of jcoby loops to perform on slow approximation
int Cuda_Outern_Loops, // max control loops
int Run_Fast_BM_Calculation, // will run BM calculation with relaxed memory requirements
int BMOHC_Kernel_Configuration,
hipEvent_t& start,
hipEvent_t& stop,
hipDeviceProp_t deviceProp,
Log &outer_log
) noexcept(false) {
cudaEventsCreate(start, stop, Show_Run_Time & 1);
dim3 threads(FIRST_STAGE_WIDTH, 1);
dim3 grid(Time_Blocks/*FIRST_STAGE_BLOCKS*/, 1);
cudaHolderData.allocateBlocksConverganceArray(Time_Blocks);
//last_saved_nodes_per_time_block_for_cuda = last_saved_nodes_per_block; // this setup for later copy data, fix indexes
int tsizep = (samplesBufferLengthP1)*sizeof(float);
//std::cout << "inputing " << samplesBufferLengthP1 << " nodes\n"<<tsizep<<" Bytes\n";
// allocate memory on device
// copy data to device
// TODO - allocate & memcopy only neccesary data according to enable_ow and enable_psi
//int numBlocks = grid.x; // Occupancy in terms of active blocks
//int activeWarps;
//int maxWarps;
std::string run_mode_name;
if (Run_Fast_BM_Calculation == 3 ) {
run_mode_name = "fast no self analysis";
} else if (Run_Fast_BM_Calculation == 2) {
run_mode_name = "Impercise";
} else if (Run_Fast_BM_Calculation == 1) {
run_mode_name = "Fast";
} else {
run_mode_name = "Precise";
}
/*
if (Show_Device_Data & 2) {
if (Run_Fast_BM_Calculation) {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, BMOHC_FAST_kernel, threads.x, 0);
} else {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, BMOHC_NEW_kernel, threads.x, 0);
}
activeWarps = deviceProp.multiProcessorCount* numBlocks * threads.x / deviceProp.warpSize;
maxWarps = deviceProp.multiProcessorCount*deviceProp.maxThreadsPerMultiProcessor / deviceProp.warpSize;
std::cout << "activeWarps: " << activeWarps << std::endl
<< "maxWarps: " << maxWarps << std::endl
<< "GPU Blocks processing capability of BM calculations ("<<run_mode_name<<") is : " << (deviceProp.multiProcessorCount*numBlocks) << std::endl
<< "current num blocks: " << grid.x << std::endl
<< "Occupancy: " << (double)activeWarps / maxWarps * 100 << "%" << std::endl;
}
*/
if (!override_input_samples) {
gpuAssert(hipMemcpy(cudaHolderData.cuda_input_samples, input_samples, tsizep, hipMemcpyHostToDevice));
}
// load GPU parmeters
float host_model_constants[MODEL_FLOATS_CONSTANTS_SIZE];
int host_model_constants_integers[MODEL_INTEGERS_CONSTANTS_SIZE];
int host_model_constants_longs[MODEL_LONGS_CONSTANTS_SIZE];
float base_time = 0;
host_model_constants[0] = Ts;
host_model_constants[1] = static_cast<float>(1.0/static_cast<double>(Ts)); // Fs
host_model_constants[2] = _ohc_alpha_l;
host_model_constants[3] = _ohc_alpha_s;
host_model_constants[4] = -1.0f*_ohc_alpha_l;
host_model_constants[5] = -1.0f*_ohc_alpha_s;
host_model_constants[6] = static_cast<float>(static_cast<double>(_ohc_alpha_l) / static_cast<double>(_ohc_alpha_s));
host_model_constants[7] = static_cast<float>(1.0/static_cast<double>(sigma_ow));
host_model_constants[8] = delta_x;
host_model_constants[9] = delta_x*delta_x; // dx_pow2
host_model_constants[10] = model_a0;
host_model_constants[11] = model_a1;
host_model_constants[12] = model_a2;
host_model_constants[13] = eta_1;
host_model_constants[14] = eta_2;
host_model_constants[15] = model_Gme;
host_model_constants[16] = enable_OW*model_Gme;
host_model_constants[17] = (1 - enable_OW)*model_Gme;
host_model_constants[18] = -w_ohc; // negated, so all negatives in GPU become positive
host_model_constants[19] = time_step;
host_model_constants[20] = time_step_out;
//std::cout << "host_model_constants[20] = time_step_out = " << time_step_out << std::endl;
host_model_constants[21] = cuda_min_time_step;
host_model_constants[22] = cuda_max_time_step;
host_model_constants[23] = Max_M1_SP_Error_Parameter;
host_model_constants[24] = Max_Tolerance_Parameter;
//printf("model_constants[25] = alpha_r=%.2e\n", alpha_r);
host_model_constants[25] = alpha_r;
host_model_constants[26] = host_model_constants[8] * host_model_constants[10];
host_model_constants[31] = -model_a1;
host_model_constants[32] = -model_a2;
host_model_constants[33] = SPLREfVal;
host_model_constants[34] = 1.0f/SPLREfVal;
host_model_constants[35] = M1_SP_Fix_Factor;
host_model_constants[36] = Tolerance_Fix_Factor;
// integer constants
host_model_constants_integers[0] = Decouple_Filter;
//std::cout << "Decouple_Filter = " << Decouple_Filter << std::endl;
host_model_constants_integers[1] = enable_OW;
host_model_constants_integers[2] = enable_psi;
host_model_constants_integers[3] = Time_Blocks;
host_model_constants_integers[4] = samplesBufferLengthP1;
host_model_constants_integers[5] = overlap_nodes_for_block;
host_model_constants_integers[6] = show_transient;
host_model_constants_integers[7] = Relative_Error_Parameters;
host_model_constants_integers[8] = (samplesBufferLengthP1 - overlap_nodes_for_block) / (Time_Blocks + 1);
host_model_constants_integers[9] = JACOBBY_Loops_Fast;
host_model_constants_integers[10] = JACOBBY_Loops_Slow;
host_model_constants_integers[11] = Cuda_Outern_Loops;
int host_model_out_sample_index[MAX_NUMBER_OF_BLOCKS];
int host_model_end_sample_index[MAX_NUMBER_OF_BLOCKS];
int max_block_length = 0;
// calculate division of of timed blocks, which node each cuda block starts procerssing the input
for (int bindex = 0; bindex < static_cast<int>(grid.x); bindex++) {
int transient_offset = (bindex > 0 && (host_model_constants_integers[0] == 0 || (host_model_constants_integers[0] != 1 && bindex%host_model_constants_integers[0] != 0)));
int preDecoupled = host_model_constants_integers[0]>0 && (host_model_constants_integers[0] == 1 || ((bindex + 1) % host_model_constants_integers[0] == 0)) && bindex != grid.x - 1;
int postDecoupled = host_model_constants_integers[0] > 0 && (bindex % host_model_constants_integers[0] == 0);
int input_sample = bindex*host_model_constants_integers[8];
// calculates out_sample
host_model_out_sample_index[bindex] = input_sample + ::max((1 - transient_offset - postDecoupled),0)*host_model_constants_integers[5]; // use as start output
// calculate end output as constant for convience
int block_length = host_model_constants_integers[8] + (1 - preDecoupled)*host_model_constants_integers[5];
max_block_length = max(block_length, max_block_length);
host_model_end_sample_index[bindex] = input_sample + block_length;
if (bindex > 0) {
host_model_out_sample_index[bindex] = max(host_model_out_sample_index[bindex], host_model_end_sample_index[bindex-1]);
}
if (Show_Calculated_Power & 2) {
std::cout << "Block[" << bindex << "] ={'start_input'=" << input_sample << ",'start_output'=" << host_model_out_sample_index[bindex] << ",'end_output'=" << host_model_end_sample_index[bindex] << "}" << std::endl;
}
if (Show_Calculated_Power & 4) {
std::cout << "Block[" << bindex << "] ={'preDecoupled'=" << preDecoupled << ",'transient_offset'=" << transient_offset << ",'block_length'=" << block_length << "}" << std::endl;
}
}
// long constants
host_model_constants_longs[0] = overlapTimeMicroSec;
// preapare symbols
gpuAssert(hipMemcpyToSymbol(model_constants, host_model_constants, MODEL_FLOATS_CONSTANTS_SIZE*sizeof(float)));
gpuAssert(hipMemcpyToSymbol(model_constants_integers, host_model_constants_integers, MODEL_INTEGERS_CONSTANTS_SIZE*sizeof(int)));
gpuAssert(hipMemcpy(cudaHolderGeneratedData.generated_model_out_sample_index, host_model_out_sample_index, MAX_NUMBER_OF_BLOCKS*sizeof(int),hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cudaHolderGeneratedData.generated_model_end_sample_index, host_model_end_sample_index, MAX_NUMBER_OF_BLOCKS*sizeof(int), hipMemcpyHostToDevice));
gpuAssert(hipMemcpyToSymbol(model_constants_longs, host_model_constants_longs, MODEL_LONGS_CONSTANTS_SIZE*sizeof(long)));
// calculate convergence criteria
if (calculate_boundary_conditions) {
calculateBoundariesForNonGeneratedInput(
Relative_Error_Parameters,
max_block_length,
Show_Calculated_Power,
M1_SP_Fix_Factor,
Tolerance_Fix_Factor,
Max_M1_SP_Error_Parameter,
Max_Tolerance_Parameter,
host_model_constants[34],
host_model_constants_integers[8],
grid // tell us block partition of current run
);
}
/**
void *params[] = { cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
&w_ohc,
&time_step,
&time_step_out,
&delta_x,
&alpha_r,
&enable_psi,
&enable_OW,
&base_time,
&Ts,
&_ohc_alpha_l,
&_ohc_alpha_s,
&model_Gme,
&model_a0,
&model_a1,
&model_a2,
&sigma_ow,
&eta_1,
&eta_2,
&samplesBufferLengthP1,
&overlap_nodes_for_block,
&cuda_min_time_step,
&cuda_max_time_step };
hipModuleLaunchKernel(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_5B, threads.x, threads.y, threads.z, grid.x, grid.y, grid.z, 0, NULL, params, NULL);
*/
// choose and execute kernel version
printf("prefered BMOHC_Kernel_Configuration: %d\n", BMOHC_Kernel_Configuration);
cudaEventsStartTimer(start, stop, Show_Run_Time & 1);
if (Run_Fast_BM_Calculation == 18) {
hipFuncSetCacheConfig(BMOHC_Triple_Aggragation_FAST_kernel, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_Triple_Aggragation_FAST_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_Triple_Aggragation_FAST_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_Triple_Aggragation_FAST_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 17) {
hipFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_triple_aggregations, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_triple_aggregations << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_triple_aggregations KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_triple_aggregations, deviceProp);
} else if (Run_Fast_BM_Calculation == 16) {
hipFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_advanced_aggregations, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_advanced_aggregations << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_advanced_aggregations KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_advanced_aggregations, deviceProp);
} else if (Run_Fast_BM_Calculation == 15) {
hipFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized, deviceProp);
} else if (Run_Fast_BM_Calculation == 7) {
hipFuncSetCacheConfig(BMOHC_OLD_2017_01_13_kernel, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_OLD_2017_01_13_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_OLD_2017_01_13_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
static_cast<int>(1000000*Ts*overlap_nodes_for_block),
1,
cuda_max_time_step,
cuda_min_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_OLD_2017_01_13_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 8) {
hipFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_2B, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_2B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_2B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_2B, deviceProp);
} else if (Run_Fast_BM_Calculation == 9) {
hipFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_3B, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_3B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_3B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_3B, deviceProp);
} else if (Run_Fast_BM_Calculation == 10) {
hipFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B, deviceProp);
} else if (Run_Fast_BM_Calculation == 14) {
hipFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_8B, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_8B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_8B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_8B, deviceProp);
} else if (Run_Fast_BM_Calculation == 13) {
hipFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_7B, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_7B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_7B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_7B, deviceProp);
} else if (Run_Fast_BM_Calculation == 12) {
hipFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_6B, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_6B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_6B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_6B, deviceProp);
} else if (Run_Fast_BM_Calculation == 11) {
hipFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_5B, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_5B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_5B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_5B, deviceProp);
} else if (Run_Fast_BM_Calculation == 6) {
hipFuncSetCacheConfig(BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Constants_kernel, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Constants_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Constants_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
//cudaHolderData.cuda_Failed_Converged_Time_Node,
//cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
//cudaHolderData.converge_speed,
//cudaHolderData.converge_speed_blocks,
//cudaHolderData.cuda_Converged_Time_Node,
//cudaHolderData.cuda_Converged_Blocks,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Constants_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 5) {
hipFuncSetCacheConfig(BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Or_Sync_kernel, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Or_Sync_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Or_Sync_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
//cudaHolderData.cuda_Failed_Converged_Time_Node,
//cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index
//cudaHolderData.converge_speed,
//cudaHolderData.converge_speed_blocks,
//cudaHolderData.cuda_Converged_Time_Node,
//cudaHolderData.cuda_Converged_Blocks,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Or_Sync_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 4) {
hipFuncSetCacheConfig(BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_kernel, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
//cudaHolderData.cuda_Failed_Converged_Time_Node,
//cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index
//cudaHolderData.converge_speed,
//cudaHolderData.converge_speed_blocks,
//cudaHolderData.cuda_Converged_Time_Node,
//cudaHolderData.cuda_Converged_Blocks,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 3) {
hipFuncSetCacheConfig(BMOHC_FAST_NO_SelfAnalysis_kernel, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_NO_SelfAnalysis_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_NO_SelfAnalysis_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
//cudaHolderData.cuda_Failed_Converged_Time_Node,
//cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index
//cudaHolderData.converge_speed,
//cudaHolderData.converge_speed_blocks,
//cudaHolderData.cuda_Converged_Time_Node,
//cudaHolderData.cuda_Converged_Blocks,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_NO_SelfAnalysis_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 2) {
hipFuncSetCacheConfig(BMOHC_IMPERCISE_kernel, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_IMPERCISE_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_IMPERCISE_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_IMPERCISE_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 1) {
//if (deviceProp.major < 5) { // correct onlt for kepler architecture
hipFuncSetCacheConfig(BMOHC_FAST_kernel, static_cast<hipFuncCache_t>(BMOHC_Kernel_Configuration));
//}
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_kernel, deviceProp);
} else {
if (Show_Calculated_Power & 16) {
printf("BMOHC_NEW_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_NEW_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_NEW_kernel, deviceProp);
}
std::ostringstream oss("");
oss << "BM (" << run_mode_name << ") run time";
outer_log.timeAtFlag(33, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 1, oss.str()), Show_Run_Time & 1);
// copy results to host
//std::cout << "passed kernel...\n";
cutilCheckMsg("OHCBM_kernel<<<>>> execution failed\n");
}
extern "C" void cudaMallocatingByMode(void **ptr,size_t bytes_num,bool disable_advanced_mode) noexcept(false) {
if (disable_advanced_mode) { gpuAssert(hipMalloc(ptr, bytes_num)); }
else {
gpuAssert(hipMallocManaged(ptr, bytes_num));
}
}
/* IHC combined variables */
size_t input_max_size;
size_t input_max_size_in_bytes;
size_t double_input_max_size_in_bytes;
size_t lambda_float_input_max_size_in_bytes;
size_t lambda_max_size;
size_t lambda_max_size_in_bytes;
size_t lambda_double_max_size_in_bytes;
size_t lambda_forced_double_max_size_in_bytes;
size_t backup_speeds_size_in_bytes;
size_t matrixSizeOfBackupTime; // ensure position of taking the backup array
size_t matrixSizeOfBackupTimeInBytes; // ensure position of taking the backup array
size_t matrixSizeOfCalcTime; // ensure position of taking the backup array
size_t matrixSizeOfCalcTimeInBytes; // ensure position of taking the backup array
size_t matrixSizeOfWriteTime;
size_t matrixSizeOfWriteTimeInBytes;
bool localloadedFromHD;
bool local_first_time_for_param_set;
bool local_CalculateJNDOnGPU;
float *local_backup_speeds;
size_t local_backup_speeds_length;
/**
* ac filter device includes its size in first place
*/
__constant__ double device_time_filter[DEVICE_MAX_FILTER_ORDER];
/**
* ihc damage vector (nerves converted to 0 to 10^8)
*/
__constant__ double CUDA_IHC_DAMAGE[SECTIONS];
/**
* nerves cluster parmeters, A and W and spont (nerves converted to 0 to 10^8)
*/
__constant__ float CUDA_Nerves_Clusters[3*LAMBDA_COUNT];
/**
* ac filters includes its size in first place
*/
double host_filter[DEVICE_MAX_FILTER_ORDER];
/*end of ihc combined variables */
#define ORDER_OF_DC_FILTER_SIZE_IN_PARAMS 4
#define ORDER_OF_AC_FILTER_SIZE_IN_PARAMS 5
extern "C" void InputProfilesArrayInitialization(
int maxJNDIntervals,
int wn_length,
int signal_length,
int signal_mode,
int Show_Generated_Input
) {
cudaJNDHolder.allocateIntervals(maxJNDIntervals);
if ( wn_length > 0 ) {
cudaSignalHolder.allocateSignalNodes(wn_length);
if (Show_Generated_Input & 8) std::cout << "WN array allocated length: " << wn_length << std::endl;
}
}
// release input array
extern "C" void InputProfilesArrayTermination() {
cudaJNDHolder.releaseIntervals();
cudaSignalHolder.releaseSignalNodes();
}
// calculates IHC see description in cochlea_common.h
extern "C" void IHCNewKernel(
double *IHC_Damage_Factor,
float Nerves_Clusters[3 * LAMBDA_COUNT],
double *dc_filter,
int order_of_dc_filter,
double *ac_b_filter,
double *ac_a_filter,
bool is_ac_iir_filter,
int order_of_ac_filter,
int cochlea_sections,
int time_blocks,
double SPLRefVal,
float *backup_speeds,
int backup_speeds_length,
int calcTime,
int writeTime,
int allocateTime,
int intervalTimeNodes, // single time block time nodes
int max_backup_nodes_len,
int lambda_offset, // offset of time nodes in order to compensate for larger lambda than necessary
float Lambda_SAT,
float eta_AC, // IHC AC coupling [V/s/cm]
float eta_DC, // IHC DC coupling [V/s/cm]
bool first_time,
bool first_time_for_param_set,
bool loadedFromHD,
bool disable_advanced_memory_handling,
bool review_memory_handling,
bool asMemoryHandlingOnly,
float scaleBMVelocityForLambdaCalculation, // params[params_set_counter].scaleBMVelocityForLambdaCalculation
bool CalculateJNDOnGPU,
int maxJNDIntervals,
int overlapNodes,
int Decouple_Filter, // filter is decoupled if this parameter largeer than 0,if filter decoupled than output blocks will not use input with time start before output block start
int Show_Run_Time,
Log &outer_log
) noexcept(false) {
hipEvent_t start, stop;
cudaEventsCreate(start, stop, Show_Run_Time & 4);
local_CalculateJNDOnGPU = CalculateJNDOnGPU;
// calculate sizes of Input and output arrays
input_max_size = allocateTime*cochlea_sections + max_backup_nodes_len;
int mean_size = maxJNDIntervals*LAMBDA_COUNT*cochlea_sections;
int fisher_size = maxJNDIntervals*LAMBDA_COUNT;
int dA_size = maxJNDIntervals;
input_max_size_in_bytes = input_max_size*sizeof(float);
double_input_max_size_in_bytes = input_max_size*sizeof(double);
lambda_float_input_max_size_in_bytes = input_max_size*sizeof(lambdaFloat);
lambda_max_size = LAMBDA_COUNT*input_max_size;
//std::cout << "allocated " << lambda_max_size << " nodes for calculation\n";
lambda_max_size_in_bytes = lambda_max_size*sizeof(float);
lambda_double_max_size_in_bytes = lambda_max_size*sizeof(lambdaFloat);
lambda_forced_double_max_size_in_bytes = lambda_max_size*sizeof(JNDFloat);
matrixSizeOfBackupTime = calcTime*cochlea_sections - backup_speeds_length;
matrixSizeOfBackupTimeInBytes = matrixSizeOfBackupTime*sizeof(float);
matrixSizeOfCalcTime = calcTime*cochlea_sections;
matrixSizeOfCalcTimeInBytes = matrixSizeOfBackupTime*sizeof(float);
backup_speeds_size_in_bytes = max_backup_nodes_len*sizeof(float);
matrixSizeOfWriteTime = writeTime*cochlea_sections;
matrixSizeOfWriteTimeInBytes = matrixSizeOfWriteTime*sizeof(float);
local_backup_speeds = backup_speeds;
local_first_time_for_param_set = first_time_for_param_set;
localloadedFromHD = loadedFromHD;
local_backup_speeds_length = backup_speeds_length;
int size_of_device_params = sizeof(device_params);
int size_of_vectors_sum_linear_coefficents = sizeof(vectors_sum_linear_coefficents);
vectors_sum_linear_coefficents host_vectors_sum_linear_coefficents[2];
for ( int i=0;i<DEVICE_MAX_FILTER_ORDER;i++){
host_filter[i] = 0;
}
// allocate AC/DC filters on GPU and copy them
host_filter[0] = (double)order_of_dc_filter;
int ac_filter_b_index = (int)host_filter[0]+1;
int ac_filter_a_index = -1;
host_filter[ac_filter_b_index] = (double)order_of_ac_filter;
memcpy_s(&host_filter[1],sizeof(double)*(DEVICE_MAX_FILTER_ORDER-1),dc_filter,order_of_dc_filter*sizeof(double));
memcpy_s(&host_filter[ac_filter_b_index + 1], sizeof(double)*(DEVICE_MAX_FILTER_ORDER - order_of_dc_filter - 2), ac_b_filter, order_of_ac_filter*sizeof(double));
std::cout.precision(5);
// update ac filter for auto scale of scaleBMVelocityForLambdaCalculation
for (int ix = 0; ix < order_of_ac_filter; ix++) {
host_filter[ac_filter_b_index + 1 + ix] = scaleBMVelocityForLambdaCalculation*host_filter[ac_filter_b_index + 1 + ix];
//std::cout << "host_filter[" << (ac_filter_b_index + ix + 1) << "] = " << host_filter[ac_filter_b_index + 1 + ix] << "\n";
}
if ( is_ac_iir_filter) {
ac_filter_a_index = ac_filter_b_index + order_of_ac_filter + 1;
host_filter[ac_filter_a_index] = (double)order_of_ac_filter;
memcpy_s(&host_filter[ac_filter_a_index + 1], sizeof(double)*(DEVICE_MAX_FILTER_ORDER - order_of_dc_filter - order_of_ac_filter - 3), ac_a_filter, order_of_ac_filter*sizeof(double));
/*for (int ix = 0; ix < order_of_ac_filter; ix++) {
std::cout << "host_filter[" << (ac_filter_a_index + ix + 1) << "] = " << host_filter[ac_filter_a_index + 1 + ix] << "\n";
}*/
}
if (review_memory_handling) {
printf("Lambda Memory Size %lu KB\n", (lambda_max_size_in_bytes / 1024));
printf("calcTime %ld Nodes\n", calcTime);
printf("Input (Results BM Speeds) Allocated Memory Size %lu KB\n", (input_max_size_in_bytes / 1024));
printf("Input (Results BM Speeds) Array Allocated length %lu \n ", input_max_size);
printf("Allocate time %ld \n", allocateTime);
printf("Backup speeds size %lu KB\n ",(backup_speeds_size_in_bytes / 1024));
}
// allocate lambda memory
cudaLambdaHolderData.allocateLambdaMemory(input_max_size);
cudaJNDHolder.allocateDeviceStructs();
cudaEventsStartTimer(start, stop, Show_Run_Time & 4);
// allocate JND intermidiate arrays (Eq. 17-24 in Cochlear Model for Hearing Loss)
if (CalculateJNDOnGPU) {
cudaJNDHolder.allocateMeanNodes(mean_size);
cudaLambdaHolderData.allocateIHCData();
cudaJNDHolder.allocateIntervals(dA_size);
cudaJNDHolder.allocateFisherNodes(fisher_size);
}
outer_log.timeAtFlag(38, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 4, "Initialize and allocate Memory for Lambda Calculation run"), Show_Run_Time & 4);
if (!asMemoryHandlingOnly) {
// copy all IHC parmeters necessary due to changed profile to GPU
cudaEventsStartTimer(start, stop, Show_Run_Time & 4);
if (first_time_for_param_set) {
/**
* define the coefficents for both vectors sums
* at node 0 A_coefficent=1,B_coeffient=-1 for SHigh = BM_input - AC
* at node 1 A_coefficent=eta_AC,B_coeffient=eta_DC for IHC=eta_AC*AC+eta_DC*DC
*/
host_vectors_sum_linear_coefficents[0].A_coefficent = scaleBMVelocityForLambdaCalculation;
host_vectors_sum_linear_coefficents[0].B_coefficent = -1;
host_vectors_sum_linear_coefficents[0].reverseCoefficents = 0;
host_vectors_sum_linear_coefficents[1].A_coefficent = eta_AC;// (eta_AC*scaleBMVelocityForLambdaCalculation);
host_vectors_sum_linear_coefficents[1].B_coefficent = eta_DC;// (eta_DC*scaleBMVelocityForLambdaCalculation*scaleBMVelocityForLambdaCalculation);
host_vectors_sum_linear_coefficents[1].reverseCoefficents = 0;
if (host_vectors_sum_linear_coefficents[1].B_coefficent < 1 && sizeof(host_vectors_sum_linear_coefficents[1].B_coefficent) == sizeof(float)) {
host_vectors_sum_linear_coefficents[1].reverseCoefficents = 1;
host_vectors_sum_linear_coefficents[1].A_coefficent = 1.0 / host_vectors_sum_linear_coefficents[1].A_coefficent;
host_vectors_sum_linear_coefficents[1].B_coefficent = 1.0 / host_vectors_sum_linear_coefficents[1].B_coefficent;
}
gpuAssert(hipMemcpy(cudaJNDHolder.vectors_sums_coefficents, host_vectors_sum_linear_coefficents, 2 * size_of_vectors_sum_linear_coefficents, hipMemcpyHostToDevice));
// copy ihc damage factor
gpuAssert(hipMemcpyToSymbol(CUDA_IHC_DAMAGE, IHC_Damage_Factor, SECTIONS*sizeof(double), 0, hipMemcpyHostToDevice));
// copy nerves parameters
gpuAssert(hipMemcpyToSymbol(CUDA_Nerves_Clusters, Nerves_Clusters, 3 * LAMBDA_COUNT*sizeof(float), 0, hipMemcpyHostToDevice));
gpuAssert(hipMemcpyToSymbol(device_time_filter, host_filter, DEVICE_MAX_FILTER_ORDER*sizeof(double), 0, hipMemcpyHostToDevice));
// here I prime and load to the device both the filters and and the genral params
}
cudaJNDHolder.host_local_param[0].Decouple_Filter = Decouple_Filter;
cudaJNDHolder.host_local_param[0].cochlea_sections = cochlea_sections;
cudaJNDHolder.host_local_param[0].intervalTimeNodes = intervalTimeNodes;
cudaJNDHolder.host_local_param[0].time_blocks = time_blocks;
cudaJNDHolder.host_local_param[0].ovelapNodes = overlapNodes;
cudaJNDHolder.host_local_param[0].lambda_offset = lambda_offset;
cudaJNDHolder.host_local_param[0].order_of_dc_filter = order_of_dc_filter;
cudaJNDHolder.host_local_param[0].order_of_ac_filter = order_of_ac_filter;
cudaJNDHolder.host_local_param[0].lambda_count = LAMBDA_COUNT;
cudaJNDHolder.host_local_param[0].time_block = calcTime / time_blocks;
cudaJNDHolder.host_local_param[0].FilterDecoupledMode = Decouple_Filter>0;
cudaJNDHolder.host_local_param[0].reverseSQRTScaleBMVelocityForLambdaCalculation = 1 / sqrtf(scaleBMVelocityForLambdaCalculation); // necessary adjustment for dA fix
//std::cout << "reverseSQRTScaleBMVelocityForLambdaCalculation = " << cudaJNDHolder.host_local_param[0].reverseSQRTScaleBMVelocityForLambdaCalculation << "\n";
//std::cout << "prepares host local setups" << std::endl;
cudaJNDHolder.host_local_param[0].Lambda_SAT = Lambda_SAT;
cudaJNDHolder.host_local_param[0].calcTime = calcTime;
//std::cout << "cudaJNDHolder.host_local_param[0].intervalTimeNodes*host_local_params[0].time_blocks= " << (cudaJNDHolder.host_local_param[0].intervalTimeNodes*host_local_params[0].time_blocks) << "\n";
cudaJNDHolder.host_local_param[0].writeTime = writeTime;
cudaJNDHolder.host_local_param[0].filter_b_start_index = ac_filter_b_index; // first filter is ac
cudaJNDHolder.host_local_param[0].filter_a_start_index = ac_filter_a_index; // first filter is ac
/*std::cout << "cudaJNDHolder.host_local_param[0].filter_b_start_index: " << cudaJNDHolder.host_local_param[0].filter_b_start_index << "\n";
std::cout << "cudaJNDHolder.host_local_param[0].filter_a_start_index: " << cudaJNDHolder.host_local_param[0].filter_a_start_index << "\n";*/
//std::cout << "prepares host local copy" << std::endl;
memcpy_s(&cudaJNDHolder.host_local_param[1], size_of_device_params, &cudaJNDHolder.host_local_param[0], size_of_device_params);
cudaJNDHolder.host_local_param[1].filter_b_start_index = 0; // second filter is dc
cudaJNDHolder.host_local_param[1].filter_a_start_index = -1; // second filter is dc
//std::cout << "prepares gpu copy" << std::endl;
gpuAssert(hipMemcpy(cudaJNDHolder.global_device_params, cudaJNDHolder.host_local_param, 2 * size_of_device_params, hipMemcpyHostToDevice));
//std::cout << "passed allocations sequence..." << std::endl;
outer_log.timeAtFlag(39, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 4, "Load Parmeters for Lambda Calculation run"), Show_Run_Time & 4);
}
}
// allocate GPU buffer
extern "C" void allocateBuffer(const int size_in_nodes, int Show_Run_Time, hipEvent_t& start,
hipEvent_t& stop,
hipDeviceProp_t deviceProp) noexcept(false) {
cudaEventsCreate(start, stop, Show_Run_Time & 32);
std::ostringstream oss("");
oss << "Allocate Debug Buffer (" << size_in_nodes << " Nodes)";
std::string rec = oss.str();
cudaEventsStartTimer(start, stop, Show_Run_Time & 32);
cudaLambdaHolderData.allocateBufferMemory(size_in_nodes);
cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 32, rec);
}
// release GPU buffer
extern "C" void releaseBuffer( int Show_Run_Time, hipEvent_t& start,
hipEvent_t& stop,
hipDeviceProp_t deviceProp) noexcept(false) {
cudaEventsCreate(start, stop, Show_Run_Time & 32);
cudaEventsStartTimer(start, stop, Show_Run_Time & 32);
cudaLambdaHolderData.releaseBufferMemory();
cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 32, "release Debug Buffer");
}
extern "C" void IHCKernel_Free() noexcept(false) {
cudaLambdaHolderData.releaseLambdaMemory();
cudaJNDHolder.releaseIntervals();
cudaJNDHolder.ReleaseDeviceStructs();
cudaJNDHolder.releaseFisherNodes();
cudaJNDHolder.releaseMeanNodes();
}
/**
* calculate correct section for the thread to handle out of cuda parameters of calling function
* number of sections per block mult by block number + thread id per section in the block
* blockCoordinates=blockIdx
* blocksDimensions=blockDim
* threadCoordinates=threadIdx
*/
#define calcCochleaSection(blockCoordinates,blocksDimensions,threadCoordinates) (blocksDimensions.x*blockCoordinates.x + threadCoordinates.x)
/**
* calculate number of time nodes each thread will jump every consecutive calculated point
* block dimension y represents number of threads working on the same section
* blockCoordinates=blockIdx
* blocksDimensions=blockDim
* threadCoordinates=threadIdx
*/
#define calcTimeNodesJump(blockCoordinates,blocksDimensions,threadCoordinates) (blocksDimensions.y)
/**
* calculate the lambda cluster for the thread by blockIdx.y==blockCoordinates.y
* blockCoordinates=blockIdx
* blocksDimensions=blockDim
* threadCoordinates=threadIdx
*/
#define calcLambdaBlock(blockCoordinates,blocksDimensions,threadCoordinates) (blockCoordinates.y)
/**
* calculate number of time nodes offset from the begginning for the thread to work on
* thread dimension y represent thread id of working in the same section
* blockCoordinates=blockIdx
* blocksDimensions=blockDim
* threadCoordinates=threadIdx
*/
#define calcTimeNodesOffset(blockCoordinates,blocksDimensions,threadCoordinates) (threadCoordinates.y)
/**
* calculate start offset per thread on unified calculations of indexes
* blockCoordinates=blockIdx
* blocksDimensions=blockDim
* threadCoordinates=threadIdx
*/
#define calcStartMainOffset(blockCoordinates,blocksDimensions,threadCoordinates,lambda_offset,cochlea_sections) (calcCochleaSection(blockCoordinates,blocksDimensions,threadCoordinates)+ cochlea_sections*lambda_offset)
/**
* decoupled block id
*/
#define intervalDecoupledBlockId(blockCoordinates,blocksDimensions) (blockCoordinates.z+blocksDimensions.z*blockCoordinates.y)
/**
* number of decoupled blocks per interval for unified IHC/ Lambda calculation
*
*/
#define intervalDecoupledBlocks(blocksDimensions) (blocksDimensions.z*blocksDimensions.y)
#define totalDecoupledBlocks(blocksDimensions) (blocksDimensions.x*intervalDecoupledBlocks(blocksDimensions))
/**
* decoupled block id
*/
#define decoupledBlockId(blockCoordinates,blocksDimensions) (intervalDecoupledBlockId(blockCoordinates,blocksDimensions)+blockCoordinates.x*intervalDecoupledBlocks(blocksDimensions))
/**
* device run for single time block of single index
* note offset here is the offset for start of time node name
* \p X is global input array
* \p Y is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p filter_size is order of FIR filter
* \p filter_index is position on unfied device filter array to start the filter from
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
* \p singleBlockLength in case of decoupled mode will be able to ignore "tails" on necessary positions, time length analysis for not decoupled mode
*
*/
template<typename T1, typename T2> __device__ void DeviceCudaFIRFilter(T1 *X, T2 *Y, int offset, int time_length_analysis, int cochlea_sections, int filter_size, int filter_index, int time_node_offset, int time_node_jumps_in_cluster, int singleBlockLength, int final_regular_division_position) {
int k = 0;
int i = 0;
int current_offset; // progress on the input since we jump in sections every time
int offset_boundary;
double Ycurrent;
int sgny = 0;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
sgny = (k / (final_regular_division_position - 1));
sgny = sgny*time_length_analysis + (1 - sgny)*singleBlockLength;
offset_boundary = min((k + 1) % sgny, filter_size); // note if singleBlockLength == time_length_analysis
Ycurrent = 0.0f;
for (i = 0; i<offset_boundary; i++) {
Ycurrent = Ycurrent + device_time_filter[i + filter_index] * X[current_offset - i*cochlea_sections]; // untransposed jumping by sections each time
}
Y[current_offset] = T2(Ycurrent);
}
}
/**
* device run for single sesction
* \p X is global input array
* \p Y is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p filter_size is order of FIR filter
* \p filter_a_index is position on unfied device filter array to start the a coefficents of filter
* \p filter_b_index is position on unfied device filter array to start the b coefficents of filter
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<class T1, class T2> __device__ void DeviceCudaIIRFilter(T1 *X,T2 *Y, int offset, int time_length_analysis, int IntervalLength, int final_regular_division_position, int cochlea_sections, int filter_size, int filter_b_index, int filter_a_index)
{
int k = 0;
int i = 0;
int j;
int offset_boundarya;
int current_offset; // progress on the input since we jump in sections every time
int offset_boundary;
T2 Ycurrent = T2(0.0);
int sgny = 0;
for (k = 0; k<time_length_analysis; k++) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
sgny = (k / (final_regular_division_position - 1));
sgny = sgny*time_length_analysis + (1 - sgny)*IntervalLength;
//offset_boundary = min((k + 1) % ((1-sgny)*IntervalLength + 2*sgny*time_length_analysis), filter_size);
offset_boundary = min((k + 1) % sgny, filter_size);
offset_boundarya = offset_boundary - 1;
Ycurrent = T2(0.0);
for (i = 0; i<offset_boundary; i++) {
//Ycurrent = fmaf( device_time_filter[i + 1 + filter_b_index], X[current_offset - i*cochlea_sections], Ycurrent);
Ycurrent = Ycurrent + device_time_filter[i + filter_b_index] * X[current_offset - i*cochlea_sections]; // untransposed jumping by sections each time
}
for (i = 0; i<offset_boundarya; i++) {
j = i + 1;
//Ycurrent = fmaf(-1 * device_time_filter[j + 1 + filter_a_index], Y[current_offset - j*cochlea_sections], Ycurrent);
Ycurrent = Ycurrent - device_time_filter[j + filter_a_index] * Y[current_offset - j*cochlea_sections]; // untransposed jumping by sections each time
}
Y[current_offset] = Ycurrent;
}
}
/**
* device run for single sesction
* \p X is global input array
* \p Y is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p filter_size is order of FIR filter
* \p filter_a_index is position on unfied device filter array to start the a coefficents of filter
* \p filter_b_index is position on unfied device filter array to start the b coefficents of filter
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<class T1,class T2> __host__ void DeviceCudaIIRFilterHost(T1 *X, T2 *Y, int offset, int time_length_analysis, int cochlea_sections, int filter_size, int filter_b_index, int filter_a_index) {
int k = 0;
int i = 0;
int j;
int offset_boundarya;
int current_offset; // progress on the input since we jump in sections every time
int offset_boundary;
double Ycurrent = 0.0;
for (k = 0; k<time_length_analysis; k++) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
offset_boundary = __tmin(k + 1, filter_size);
offset_boundarya = offset_boundary - 1;
Ycurrent = 0.0f;
for (i = 0; i<offset_boundary; i++) {
//Ycurrent = fmaf( device_time_filter[i + 1 + filter_b_index], X[current_offset - i*cochlea_sections], Ycurrent);
Ycurrent = Ycurrent + host_filter[i + filter_b_index] * X[current_offset - i*cochlea_sections]; // untransposed jumping by sections each time
}
for (i = 0; i<offset_boundarya; i++) {
j = i + 1;
//Ycurrent = fmaf(-1 * device_time_filter[j + 1 + filter_a_index], Y[current_offset - j*cochlea_sections], Ycurrent);
Ycurrent = Ycurrent - host_filter[j + filter_a_index] * Y[current_offset - j*cochlea_sections]; // untransposed jumping by sections each time
}
Y[current_offset] = T2(Ycurrent);
}
}
// possible types for iir on host
template __host__ void DeviceCudaIIRFilterHost<float, double>(float *X, double *Y, int offset, int time_length_analysis, int cochlea_sections, int filter_size, int filter_b_index, int filter_a_index);
template __host__ void DeviceCudaIIRFilterHost<double, double>(double *X, double *Y, int offset, int time_length_analysis, int cochlea_sections, int filter_size, int filter_b_index, int filter_a_index);
template __host__ void DeviceCudaIIRFilterHost<float, float>(float *X, float *Y, int offset, int time_length_analysis, int cochlea_sections, int filter_size, int filter_b_index, int filter_a_index);
// this version runs the entire time line on relevant sections
template<typename T1,typename T2> __global__ void CudaFIRFilter(T1 *X, T2 *Y, device_params *filter_params)
{
int filter_index = filter_params->filter_b_start_index+1;
int filter_size = (int)(device_time_filter[filter_index-1]+0.1f); // very important filter data start from index 1 0 index is size....
//if (threadIdx.x == 0) printf("filter_index=%d,filter_size=%d\n", filter_index, filter_size);
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int total_time_nodes = (filter_params->calcTime - lambda_offset);
int time_length_analysis = filter_params->intervalTimeNodes/gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int grid_block_id = (decoupledBlockId(blockIdx, gridDim) - intervalDecoupledBlockId(blockIdx, gridDim)); // each thread start from its own adjusted offset in the time block offset
int offset = cochlea_offset_section + cochlea_sections*(grid_block_id*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
int time_node_offset = intervalDecoupledBlockId(blockIdx,gridDim)*time_length_analysis;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
time_length_analysis = time_node_offset + time_length_analysis;
// on stage one device filter run from global
int calculatedIntervalTimeNodes = filter_params->FilterDecoupledMode ? filter_params->intervalTimeNodes*filter_params->Decouple_Filter : total_time_nodes;
int final_regular_division_position = filter_params->intervalTimeNodes *gridDim.y;
//if (threadIdx.x==0) printf("block[%d].time_nodes=[%d,%d],interval_offset=%d,grid_block_id=%d,final_regular_division_position=%d\n", decoupledBlockId(blockIdx, gridDim), time_node_offset, time_length_analysis,offset, grid_block_id, final_regular_division_position);
DeviceCudaFIRFilter<T1, T2>(X, Y, offset, time_length_analysis, cochlea_sections, filter_size, filter_index, time_node_offset, time_node_jumps_in_cluster, calculatedIntervalTimeNodes, final_regular_division_position);
__syncthreads();
}
// cuda fir filter
template __global__ void CudaFIRFilter<float, double>(float *X, double *Y, device_params *filter_params);
template __global__ void CudaFIRFilter<double, double>(double *X, double *Y, device_params *filter_params);
// this version runs the entire time can be aprralleized by sections
template<class T1,class T2> __global__ void CudaIIRFilter(T1 *X, T2 *Y, device_params *filter_params) {
int filter_b_index = 1 + filter_params->filter_b_start_index;
int filter_a_index = 1 + filter_params->filter_a_start_index;
int filter_size = filter_params->order_of_ac_filter;//(int)(device_time_filter[filter_b_index-1]+0.1); // very important filter data start from index 1 0 index is size....
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->calcTime - lambda_offset; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
/*
* start offset represents calculated section + lambda offset_in_time*sections +
* time offset per thread per section since each thread handles 1/blockDim.y
*/
// on stage one device filter run from global
int intervalLength = filter_params->FilterDecoupledMode == true ? filter_params->intervalTimeNodes*filter_params->Decouple_Filter : time_length_analysis;
int final_regular_division_position = filter_params->intervalTimeNodes*filter_params->time_blocks;
int startOffset = threadIdx.x;
DeviceCudaIIRFilter<T1, T2>(X, Y, startOffset, time_length_analysis, intervalLength, final_regular_division_position, cochlea_sections, filter_size, filter_b_index, filter_a_index);
__syncthreads();
}
// cuda iir filter
template __global__ void CudaIIRFilter<float, double>(float *X, double *Y, device_params *filter_params);
template __global__ void CudaIIRFilter<float, float>(float *X, float *Y, device_params *filter_params);
template __global__ void CudaIIRFilter<double, double>(double *X, double *Y, device_params *filter_params);
/**
* this version runs multiple decoupled IIR filters for small inputs
*/
template<class T1, class T2> __global__ void CudaIIRFilterDecoupled(T1 *X, T2 *Y, device_params *filter_params) {
int filter_b_index = 1 + filter_params->filter_b_start_index;
int filter_a_index = 1 + filter_params->filter_a_start_index;
int filter_size = filter_params->order_of_ac_filter;//(int)(device_time_filter[filter_b_index-1]+0.1); // very important filter data start from index 1 0 index is size....
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->calcTime - lambda_offset; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
/*
* start offset represents calculated section + lambda offset_in_time*sections +
* time offset per thread per section since each thread handles 1/blockDim.y
*/
// on stage one device filter run from global
int intervalLength = filter_params->intervalTimeNodes*filter_params->Decouple_Filter;
int final_regular_division_position = filter_params->intervalTimeNodes*filter_params->time_blocks;
int startOffset = threadIdx.x + blockIdx.x*intervalLength*cochlea_sections;
//printf("startOffset: %d,threadIdx.x=%d,blockIdx.x=%d,intervalLength=%d\n", startOffset, threadIdx.x, blockIdx.x, intervalLength);
if (blockIdx.x+1==gridDim.x) {
intervalLength += (time_length_analysis - gridDim.x*intervalLength);
}
time_length_analysis = intervalLength;
DeviceCudaIIRFilter<T1, T2>(X, Y, startOffset, time_length_analysis, intervalLength, final_regular_division_position, cochlea_sections, filter_size, filter_b_index, filter_a_index);
__syncthreads();
}
// cuda iir filter
template __global__ void CudaIIRFilterDecoupled<float, double>(float *X, double *Y, device_params *filter_params);
template __global__ void CudaIIRFilterDecoupled<float, float>(float *X, float *Y, device_params *filter_params);
template __global__ void CudaIIRFilterDecoupled<double, double>(double *X, double *Y, device_params *filter_params);
// this version runs the entire time can be aprralleized by sections
template<class T1,class T2> __host__ void CudaIIRFilterHost(T1 *X, T2 *Y, device_params *filter_params) {
int filter_b_index = 1 + filter_params->filter_b_start_index;
int filter_a_index = 1 + filter_params->filter_a_start_index;
int filter_size = filter_params->order_of_ac_filter;//(int)(device_time_filter[filter_b_index-1]+0.1); // very important filter data start from index 1 0 index is size....
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->calcTime - lambda_offset; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
/*
* start offset represents calculated section + lambda offset_in_time*sections +
* time offset per thread per section since each thread handles 1/blockDim.y
*/
for (int tidx = 0; tidx < cochlea_sections; tidx++) {
DeviceCudaIIRFilterHost<T1,T2>(X, Y, tidx, time_length_analysis, cochlea_sections, filter_size, filter_b_index, filter_a_index);
}
}
template __host__ void CudaIIRFilterHost<float, float>(float *X, float *Y, device_params *filter_params);
template __host__ void CudaIIRFilterHost<float, double>(float *X, double *Y, device_params *filter_params);
template __host__ void CudaIIRFilterHost<double, double>(double *X, double *Y, device_params *filter_params);
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p A is first global input array
* \p B is second global input array
* \p C is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p coefficentA is coefficent to multiply vector A
* \p coefficentB is coefficent to multiply vector B
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T1, typename T2> __device__ void DeviceCudaVectorSum(T1 *A, T2 *B, T2 *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster) {
int k;
int current_offset;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
C[current_offset] = (A[current_offset] * coefficentA) + (B[current_offset] * coefficentB);
}
}
// unified IHC divide of types
template __device__ void DeviceCudaVectorSum<float, double>(float *A, double *B, double *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
template __device__ void DeviceCudaVectorSum<float, float>(float *A, float *B, float *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p A is first global input array
* \p B is second global input array
* \p C is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p coefficentA is coefficent to multiply vector A
* \p coefficentB is coefficent to multiply vector B
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T1, typename T2> __device__ void DeviceCudaVectorSumNSquare(T1 *A, T2 *B, T2 *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster) {
int k;
T2 midSum;
int current_offset;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
midSum = fmaf(A[current_offset], coefficentA, B[current_offset] * coefficentB);
C[current_offset] = midSum*midSum;
}
}
// unified IHC divide of types
template __device__ void DeviceCudaVectorSumNSquare<float, double>(float *A, double *B, double *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
template __device__ void DeviceCudaVectorSumNSquare<float, float>(float *A, float *B, float *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p A is first global input array
* \p B is second global input array
* \p C is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p coefficentA is coefficent to multiply vector A
* \p coefficentB is coefficent to multiply vector B
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T1,typename T2> __device__ void DeviceCudaVectorSumDivide(T1 *A, T2 *B, T2 *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster) {
int k;
int current_offset;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
C[current_offset] = (A[current_offset] / coefficentA) + (B[current_offset] / coefficentB);
}
}
// same for divide templae
template __device__ void DeviceCudaVectorSumDivide<float, double>(float *A, double *B, double *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
template __device__ void DeviceCudaVectorSumDivide<float, float>(float *A, float *B, float *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p A is first global input array
* \p B is second global input array
* \p C is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p coefficentA is coefficent to multiply vector A
* \p coefficentB is coefficent to multiply vector B
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T1, typename T2> __device__ void DeviceCudaVectorSumDivideNSquare(T1 *A, T2 *B, T2 *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster) {
int k;
int current_offset;
T2 midSum;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
midSum = (A[current_offset] / coefficentA) + (B[current_offset] / coefficentB);
C[current_offset] = midSum*midSum;
}
}
// same for divide templae
template __device__ void DeviceCudaVectorSumDivideNSquare<float, double>(float *A, double *B, double *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
template __device__ void DeviceCudaVectorSumDivideNSquare<float, float>(float *A, float *B, float *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
/**
* calcs A*coefficents_set->A_coefficent+B*coefficents_set->B_coefficent => C for calculating shigh and ac+dc summary created with consistent indexes calculations
*
*/
template<typename T1, typename T2> __global__ void CudaVectorsSum(T1 *A, T2 *B, T2 *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set)
{
lambdaFloat coefficentA = lambdaFloat(coefficents_set->A_coefficent);
lambdaFloat coefficentB = lambdaFloat(coefficents_set->B_coefficent);
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(decoupledBlockId(blockIdx, gridDim)*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
// each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
if (coefficents_set->reverseCoefficents) {
//if (threadIdx.x == 0) printf("DeviceCudaVectorSumDivide(A,B,C,%d,%d,%d,%.2f,%.2f,%d,%d)\n", offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
DeviceCudaVectorSumDivide(A, B, C, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
}
else {
//if (threadIdx.x == 0) printf("DeviceCudaVectorSum(A,B,C,%d,%d,%d,%.2f,%.2f,%d,%d)\n", offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
DeviceCudaVectorSum(A, B, C, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
}
__syncthreads();
}
// cuda vector sum
template __global__ void CudaVectorsSum<float, double>(float *A, double *B, double *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
template __global__ void CudaVectorsSum<float, float>(float *A, float *B, float *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
template __global__ void CudaVectorsSum<double, double>(double *A, double *B, double *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
/**
* calcs A*coefficents_set->A_coefficent+B*coefficents_set->B_coefficent => C for calculating shigh and ac+dc summary created with consistent indexes calculations
*
*/
template<typename T1, typename T2> __global__ void CudaVectorsSumNSquare(T1 *A, T2 *B, T2 *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set)
{
lambdaFloat coefficentA = lambdaFloat(coefficents_set->A_coefficent);
lambdaFloat coefficentB = lambdaFloat(coefficents_set->B_coefficent);
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(decoupledBlockId(blockIdx, gridDim)*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
// each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
if (coefficents_set->reverseCoefficents) {
//if (threadIdx.x == 0) printf("DeviceCudaVectorSumDivide(A,B,C,%d,%d,%d,%.2f,%.2f,%d,%d)\n", offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
DeviceCudaVectorSumDivideNSquare(A, B, C, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
}
else {
//if (threadIdx.x == 0) printf("DeviceCudaVectorSum(A,B,C,%d,%d,%d,%.2f,%.2f,%d,%d)\n", offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
DeviceCudaVectorSumNSquare(A, B, C, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
}
__syncthreads();
}
// cuda vector sum
template __global__ void CudaVectorsSumNSquare<float, double>(float *A, double *B, double *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
template __global__ void CudaVectorsSumNSquare<float, float>(float *A, float *B, float *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
template __global__ void CudaVectorsSumNSquare<double, double>(double *A, double *B, double *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p src is global input array
* \p target is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<class T> __device__ void DeviceCudaSquare(T *src, T *target, int offset, int time_length_analysis, int cochlea_sections, int time_node_offset, int time_node_jumps_in_cluster)
{
int k;
int current_offset;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster){
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
target[current_offset] = src[current_offset] * src[current_offset];
}
}
template __device__ void DeviceCudaSquare<lambdaFloat>(lambdaFloat *src, lambdaFloat *target, int offset, int time_length_analysis, int cochlea_sections, int time_node_offset, int time_node_jumps_in_cluster);
/**
* calcs src.*src => target for Shigh.^2=>dS summary created with consistent indexes calculations
*
*/
template<class T> __global__ void CudaSquare(T *src, T *target, device_params *filter_params)
{
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(decoupledBlockId(blockIdx, gridDim)*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
// each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
DeviceCudaSquare<T>(src,target, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
__syncthreads();
}
// vector square
template __global__ void CudaSquare<lambdaFloat>(lambdaFloat *src, lambdaFloat *target, device_params *filter_params);
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p IHC is global ihc(both input and output) array
* \p IHC_Damage_Factor is calculated ihc health actually such that 10^8 is healthy and 0 is completely lost
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T,typename T2> __device__ void DeviceCudaCalcIHC(T *PRE_IHC, T2 *IHC, double IHC_Damage_Factor, int offset, int time_length_analysis, int cochlea_sections, int time_node_offset, int time_node_jumps_in_cluster)
{
int k;
int current_offset;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster){
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
IHC[current_offset] = T2(log10(fmax(double(IHC_Damage_Factor*PRE_IHC[current_offset]), 0.0) + EPS));
}
}
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p IHC is global ihc(both input and output) array
* \p IHC_Damage_Factor is calculated ihc health actually such that 10^8 is healthy and 0 is completely lost
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T, typename T2> __device__ void DeviceCudaCalcIHCComposite(T *AC_response,T *DC_response, T2 *IHC, double IHC_Damage_Factor, int offset, int time_length_analysis, int cochlea_sections, double coefficent_AC, double coefficent_DC, int time_node_offset, int time_node_jumps_in_cluster)
{
int k;
int current_offset;
double PRE_IHC;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
PRE_IHC = fmaf(AC_response[current_offset], coefficent_AC, DC_response[current_offset] * coefficent_DC);
IHC[current_offset] = T2(log10(fmax(double(IHC_Damage_Factor*PRE_IHC), 0.0) + EPS));
}
}
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p IHC is global ihc(both input and output) array
* \p IHC_Damage_Factor is calculated ihc health actually such that 10^8 is healthy and 0 is completely lost
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T, typename T2> __device__ void DeviceCudaCalcIHCCompositeDivide(T *AC_response, T *DC_response, T2 *IHC, double IHC_Damage_Factor, int offset, int time_length_analysis, int cochlea_sections, double coefficent_AC, double coefficent_DC, int time_node_offset, int time_node_jumps_in_cluster)
{
int k;
int current_offset;
double PRE_IHC;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
PRE_IHC = (AC_response[current_offset]/ coefficent_AC)+(DC_response[current_offset] / coefficent_DC);
IHC[current_offset] = T2(log10(fmax(double(IHC_Damage_Factor*PRE_IHC), 0.0) + EPS));
}
}
/**
* calcs lg10(max(IHC,0)*IHC_Damage_vector+EPS) into IHC vector
* this procedure will run after AC*eta_AC+DC*eta_DC
*
*/
template<typename T, typename T2> __global__ void CudaCalcIHC(T *PRE_IHC, T2 *IHC, device_params *filter_params)
{
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(decoupledBlockId(blockIdx, gridDim)*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
// each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
double IHC_Damage_Factor = CUDA_IHC_DAMAGE[cochlea_offset_section];
DeviceCudaCalcIHC<T,T2>(PRE_IHC,IHC, IHC_Damage_Factor, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
__syncthreads();
}
/**
* calcs lg10(max(IHC,0)*IHC_Damage_vector+EPS) into IHC vector
* this procedure will run after AC*eta_AC+DC*eta_DC
*
*/
template<typename T, typename T2> __global__ void CudaCalcIHCComposite(T *AC_response, T *DC_response, T2 *IHC, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set)
{
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(decoupledBlockId(blockIdx, gridDim)*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
T coefficentAC = T(coefficents_set[1].A_coefficent);
T coefficentDC = T(coefficents_set[1].B_coefficent);
int reverseCoefficents = coefficents_set[1].reverseCoefficents; // each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
double IHC_Damage_Factor = CUDA_IHC_DAMAGE[cochlea_offset_section];
if (reverseCoefficents) {
DeviceCudaCalcIHCCompositeDivide<T, T2>(AC_response, DC_response, IHC, IHC_Damage_Factor, offset, time_length_analysis, cochlea_sections, double(coefficentAC), double(coefficentDC), time_node_offset, time_node_jumps_in_cluster);
}
else {
DeviceCudaCalcIHCComposite<T, T2>(AC_response, DC_response, IHC, IHC_Damage_Factor, offset, time_length_analysis, cochlea_sections, double(coefficentAC), double(coefficentDC), time_node_offset, time_node_jumps_in_cluster);
}
__syncthreads();
}
// calc ihc isolated to templates
template __global__ void CudaCalcIHC<lambdaFloat,float>(lambdaFloat *PRE_IHC, float *IHC, device_params *filter_params);
// calc ihc isolated to templates
template __global__ void CudaCalcIHCComposite<lambdaFloat, float>(lambdaFloat *AC_response, lambdaFloat *DC_response, float *IHC, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
/**
* device run for single time block of single index of lambda calculation
* note offset here is the offset for start of time node name
* \p IHC is global ihc, input array
* \p Lambda is global output array
* \p cochlea_offset_section index of start calculation in output array, its the spatial section on the cochlea + lambda block index offset
* \p time_length_analysis number of indexes in output array to calculate
* \p lambda_index is the id of lambda block and spont rate index in unified nerves parameter
* \p A_index is the id A to take value in index in unified nerves parameter
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T1,typename T2> __device__ void DeviceCudaCalcLambda(T1 *IHC, float *Lambda, T2 *JND_Lambda, int cochlea_offset_section, int lambda_write_offset, int lambda_index, int A_index, int time_length_analysis, int cochlea_sections, int time_node_offset, int time_node_jumps_in_cluster, float Lambda_SAT)
{
int k = 0;
int current_offset; // progress on the input since we jump in sections every time
int write_offset;
double base_lambda;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster){
current_offset = cochlea_offset_section + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
write_offset = current_offset + lambda_write_offset;
base_lambda = fmin(double(Lambda_SAT - CUDA_Nerves_Clusters[lambda_index]), fmax(double(CUDA_Nerves_Clusters[A_index]) * IHC[current_offset], 0.0));
JND_Lambda[write_offset] = base_lambda;
Lambda[write_offset] = float(base_lambda) + CUDA_Nerves_Clusters[lambda_index];
//if ( current_offset>=350000&¤t_offset<=350256 ) printf("Y[%d]==%.3e,time_length_analysis=%d\n",current_offset,Y[current_offset],time_length_analysis);
}
}
// device calc ihc isolated for templates
template __device__ void DeviceCudaCalcLambda<lambdaFloat,double>(lambdaFloat *IHC, float *Lambda, double *JND_Lambda, int cochlea_offset_section, int lambda_write_offset, int lambda_index, int A_index, int time_length_analysis, int cochlea_sections, int time_node_offset, int time_node_jumps_in_cluster, float Lambda_SAT);
/*, float *Lambda*/
/**
* calculationg the lambda blocks from the IHC array by min(RSAT,SpontRate[lambda_type]+max(A[lambda_type]*IHC,0))
*/
template<typename T1,typename T2> __global__ void CudaCalcLambda(T1 *IHC,T2 *Lambda_Buffer, T2 *JND_Lambda, device_params *filter_params,int save_lambda)
{
// in this procedure lambda offset is ignored
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int intervalsNum = totalDecoupledBlocks(gridDim) / LAMBDA_COUNT;
int lambda_index = decoupledBlockId(blockIdx, gridDim) / intervalsNum;
int interval_id = decoupledBlockId(blockIdx, gridDim) - lambda_index*intervalsNum;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(interval_id*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
// each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
//int A_index = lambda_index + LAMBDA_COUNT;
int lambda_write_offset = lambda_index*filter_params->calcTime*cochlea_sections;
double zero_factor = cochlea_offset_section != 0 ? 1.0 : 0.0;
int write_offset;
int k;
int current_offset;
double base_lambda;
double Lambda_SAT = filter_params->Lambda_SAT;
double Aihc = double(model_Aihc[lambda_index*SECTIONS + cochlea_offset_section]);
double spont = double(CUDA_Nerves_Clusters[lambda_index]);
double zero_offset = cochlea_offset_section != 0 ? 0.0 : spont;
//DeviceCudaCalcLambda(IHC, Lambda, JND_Lambda, cochlea_offset_section, lambda_write_offset, lambda_index, A_index, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster, filter_params->Lambda_SAT);
for (k = time_node_offset; k<time_length_analysis; k++) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
write_offset = current_offset + lambda_write_offset;
base_lambda = fmin(Lambda_SAT, fmax(Aihc*double(IHC[current_offset]),spont));
JND_Lambda[write_offset] = T2(fma(zero_factor,base_lambda,zero_offset));
if (save_lambda) {
Lambda_Buffer[write_offset] = T2(fma(zero_factor, base_lambda, zero_offset));
}
//if ( current_offset>=350000&¤t_offset<=350256 ) printf("Y[%d]==%.3e,time_length_analysis=%d\n",current_offset,Y[current_offset],time_length_analysis);
}
__syncthreads();
}
// calc lambdas types of decalarations
template __global__ void CudaCalcLambda<lambdaFloat, double>(lambdaFloat *IHC, double *Lambda_Buffer, double *JND_Lambda, device_params *filter_params,int save_lambda);
template<typename T1, typename T2> __device__ void DeviceCopy_Array(volatile T1 *src, volatile T2 *dst, int offset, int time_length_analysis, int cochlea_sections, int time_node_offset, int time_node_jumps_in_cluster) {
int k = 0;
int current_offset;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
dst[current_offset] = src[current_offset] ;
}
}
/**
* inside equtaions references to Yonatan Koral Efficent Tool Thesis
* this global cuda procedure calculate the IHC array from the Basialr Membrane array results using device functions
* calculate the AC
* calculate the SHigh
* calculate the dS
* calculate the DC
* calculate the IHC
*/
template<typename T> __global__ void CudaUnifiedCalcIHC(
float *BM_internal,
T *cuda_Buffer2,
T *cuda_Buffer3,
T *cuda_Buffer4,
T *cuda_BufferOutput,
device_params *filter_params,
vectors_sum_linear_coefficents *coefficents_set,
int backup_stage) {
// first filter paramets for ac filter from filter params index 0
int filter_index = filter_params->filter_b_start_index + 1;
int filter_size = filter_params->order_of_ac_filter; //(int)(device_time_filter[filter_index]+0.1f); // very important filter data start from index 1 0 index is size....
int cochlea_offset_section = calcCochleaSection(blockIdx, blockDim, threadIdx);
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->calcTime - lambda_offset; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = calcTimeNodesJump(blockIdx, blockDim, threadIdx); // block dim y represent num of threads per sections so each thread will jump by this number times the number of sections
int offset = calcStartMainOffset(blockIdx, blockDim, threadIdx, lambda_offset, cochlea_sections);
int time_node_offset = calcTimeNodesOffset(blockIdx, blockDim, threadIdx);
// first sum is bm internal and cuda AC uses coefficents set from index 0
T coefficentA = T(coefficents_set->A_coefficent);
T coefficentB = T(coefficents_set->B_coefficent);
double IHC_Damage_Factor = CUDA_IHC_DAMAGE[cochlea_offset_section];
// calculate unified block to include decoupling, note value is same on both fir filters
int calculatedIntervalTimeNodes = filter_params->FilterDecoupledMode ? filter_params->intervalTimeNodes*filter_params->Decouple_Filter : time_length_analysis;
int final_regular_division_position = filter_params->intervalTimeNodes*filter_params->time_blocks;
// ac will only be calculated if its not IIR filter otherwise it will be calculated seperatly - calculating Eq. 5.1
if (filter_params->filter_a_start_index == -1) {
// first stage calculate the AC
DeviceCudaFIRFilter<float, T>(BM_internal, cuda_Buffer2, offset, time_length_analysis, cochlea_sections, filter_size, filter_index, time_node_offset, time_node_jumps_in_cluster, calculatedIntervalTimeNodes, final_regular_division_position);
}
if (backup_stage == 9) { // AC backup
DeviceCopy_Array<JNDFloat, JNDFloat>(cuda_Buffer2, cuda_BufferOutput, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
}
__syncthreads();
// calculating the summary of bm internal and ac each thread handles only its own result of ac so synchronization unecessary
// calculating Eq. 5.2
DeviceCudaVectorSumNSquare<float, T>(BM_internal, cuda_Buffer2, cuda_Buffer3, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
__syncthreads();
if (backup_stage == 12 || backup_stage == 11) { // SHigh backup
DeviceCopy_Array<JNDFloat, JNDFloat>(cuda_Buffer3, cuda_BufferOutput, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
}
// updating filter parameters for the dc filter now its from index 1, pre filter synchronization is necessary to ensure DS array is valid
filter_index = filter_params[1].filter_b_start_index+1;
filter_size = filter_params->order_of_dc_filter; // dc filter size
// calculating the DC filter - Eq 5.3
DeviceCudaFIRFilter<T, T>(cuda_Buffer3, cuda_Buffer4, offset, time_length_analysis, cochlea_sections, filter_size, filter_index, time_node_offset, time_node_jumps_in_cluster, calculatedIntervalTimeNodes, final_regular_division_position);
__syncthreads();
if (backup_stage == 10) { // DC backup
DeviceCopy_Array<JNDFloat, JNDFloat>(cuda_Buffer4, cuda_BufferOutput, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
}
// now setting the coefficents for pre IHC calculator
coefficentA =T(coefficents_set[1].A_coefficent);
coefficentB =T(coefficents_set[1].B_coefficent);
int reverseCoefficents = coefficents_set[1].reverseCoefficents;
// calculating the AC and DC for pre IHC, AC is already valid and since each thread use only its own results so synchronization is unecessary
// calculating equations 5.4 - 5.5, this was tested with divided parmeters and multiplied parmeters due to previous calculation error on my part (which was fixed), you can use either
if (reverseCoefficents) {
//if (threadIdx.x == 0) printf("DeviceCudaVectorSumDivide(A,B,C,%d,%d,%d,%.2f,%.2f,%d,%d)\n", offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
DeviceCudaCalcIHCCompositeDivide<T, JNDFloat>(cuda_Buffer2, cuda_Buffer4, BM_internal, IHC_Damage_Factor, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
}
else {
//if (threadIdx.x == 0) printf("DeviceCudaVectorSum(A,B,C,%d,%d,%d,%.2f,%.2f,%d,%d)\n", offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
DeviceCudaCalcIHCComposite<T, JNDFloat>(cuda_Buffer2, cuda_Buffer4, BM_internal, IHC_Damage_Factor, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
}
if (backup_stage == 14 || backup_stage==13) { // PRE IHC backup
DeviceCopy_Array<JNDFloat, JNDFloat>(BM_internal, cuda_BufferOutput, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
}
__syncthreads();
}
// possible types for iir on device
// specializing calculation of IHC
template __global__ void CudaUnifiedCalcIHC<JNDFloat>(
float *BM_internal,
JNDFloat *cuda_Buffer2,
JNDFloat *cuda_Buffer3,
JNDFloat *cuda_Buffer4,
JNDFloat *cuda_BufferOutput,
device_params *filter_params,
vectors_sum_linear_coefficents *coefficents_set,
int backup_stage);
template<typename T1, typename T2> __global__ void copyDeviceBackup(T1 *src, T2 *cudaBackupArray, device_params *filter_params) {
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(decoupledBlockId(blockIdx, gridDim)*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
// each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
DeviceCopy_Array<T1, T2>(src, cudaBackupArray, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
}
// calc ihc isolated to templates
template __global__ void copyDeviceBackup<float, JNDFloat>(float *src, JNDFloat *cudaBackupArray, device_params *filter_params);
//template __global__ void copyDeviceBackup<JNDFloat, JNDFloat>(JNDFloat *src, JNDFloat *cudaBackupArray, device_params *filter_params);
void runIIRKernelByParams(int Show_Run_Time,Log &outer_log) {
dim3 filtersIIRThreads(SECTIONS, 1, 1);
hipEvent_t start, stop;
cudaEventsCreate(start, stop, Show_Run_Time & 16);
cudaEventsStartTimer(start, stop, Show_Run_Time & 16);
if (cudaJNDHolder.host_local_param[0].FilterDecoupledMode) {
int dfilter = cudaJNDHolder.host_local_param[0].Decouple_Filter > 0 ? cudaJNDHolder.host_local_param[0].Decouple_Filter : 1;
dim3 decoupledGrid(cudaJNDHolder.host_local_param[0].time_blocks / dfilter, 1, 1);
//std::cout << "CudaIIRFilterDecoupled<float, lambdaFloat> << <" << showDIM3(decoupledGrid) << ", " << showDIM3(filtersIIRThreads) << " >> >(cudaHolderData.cuda_saved_speeds, cuda_JND_Lambda, global_device_params);" << std::endl;
CudaIIRFilterDecoupled<float, lambdaFloat> KERNEL_ARGS2(decoupledGrid, filtersIIRThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_JND_Lambda, cudaJNDHolder.global_device_params);
}
else {
// for IIR will parrallel in space only, not time so only single block with SECTIONS threads
dim3 filtersIIRGrid(1, 1, 1);
CudaIIRFilter<float, lambdaFloat> KERNEL_ARGS2(filtersIIRGrid, filtersIIRThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_JND_Lambda, cudaJNDHolder.global_device_params);
}
outer_log.timeAtFlag(40, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 16, "IIR calculation time"), Show_Run_Time & 16);
}
void setDecoupledRun(dim3 &filtersGrid, dim3 &filtersThreads, const int& intervals_num, const int&blocks_per_interval, const int& Decouple_Unified_IHC_Factor) {
filtersGrid.x = intervals_num;
filtersGrid.y = blocks_per_interval; // decoupled blocks+ extra decoupling for better division
filtersGrid.z = Decouple_Unified_IHC_Factor>0? Decouple_Unified_IHC_Factor:1;
filtersThreads.x = SECTIONS;
filtersThreads.y = 1;
filtersThreads.z = 1;
}
// calculating IHC by stages Cochlear Model for Hearing Loss, equations numbers from Yonatan Koral Thesis, Efficent Tool For Cochlea Simulation
extern "C" void RunIHCKernel(JNDFloat *host_backup, int Show_Run_Time, int save_lambda, int backup_stage,int Decouple_Unified_IHC_Factor,Log &outer_log) {
// if the data loaded from hd its on host and it needs to be first time or its not relevant
hipEvent_t start, stop;
cudaEventsCreate(start, stop, Show_Run_Time & 8);
dim3 filtersGrid(IHC_FILTER_BLOCK, 1, 1);
dim3 filtersThreads(SECTIONS_PER_IHC_FILTER_BLOCK, THREADS_PER_IHC_FILTER_SECTION, 1);
int lambda_write_offset = cudaJNDHolder.host_local_param->calcTime*SECTIONS;
// copy from saved speeds the rest of the data
// unfied calculation of the ihc
cudaEventsStartTimer(start, stop, Show_Run_Time & 8);
if (Decouple_Unified_IHC_Factor<=0) {
if (cudaJNDHolder.host_local_param[0].order_of_ac_filter > -1) {
// calculating AC stage if AC filter is IIR, Eq. 5.1
runIIRKernelByParams(Show_Run_Time,outer_log);
}
// rest of the IHC process calculated in single kernel - Eq. 5.2 - 5.5
CudaUnifiedCalcIHC<JNDFloat> KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_JND_Lambda, cudaLambdaHolderData.cuda_JND_Lambda + lambda_write_offset, cudaLambdaHolderData.cuda_JND_Lambda + 2*lambda_write_offset, cudaLambdaHolderData.cuda_Buffer1, cudaJNDHolder.global_device_params, cudaJNDHolder.vectors_sums_coefficents,backup_stage);
} else {
// this is effectively cause the grid to be one large interval if decoupler size is 0
int dfilter = cudaJNDHolder.host_local_param[0].Decouple_Filter > 0 ? cudaJNDHolder.host_local_param[0].Decouple_Filter : cudaJNDHolder.host_local_param[0].time_blocks;
setDecoupledRun(filtersGrid,filtersThreads, cudaJNDHolder.host_local_param[0].time_blocks / dfilter, dfilter, Decouple_Unified_IHC_Factor);
if (cudaJNDHolder.host_local_param[0].order_of_ac_filter == -1) {
// calculating the lambda by multiple kernel currently remain for backward reference
// ac filter run, FIR filter Eq. 5.1
CudaFIRFilter<float, lambdaFloat> KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_JND_Lambda, cudaJNDHolder.global_device_params);
}
else {
// ac a filter make IIR present, Eq. 5.1
runIIRKernelByParams(Show_Run_Time,outer_log);
}
if (backup_stage == 9) { // AC backup
copyDeviceBackup<JNDFloat, JNDFloat>KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaLambdaHolderData.cuda_JND_Lambda, cudaLambdaHolderData.cuda_Buffer1, cudaJNDHolder.global_device_params);
}
// calcs Eq 5.2 - dS
CudaVectorsSumNSquare<float, lambdaFloat> KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_JND_Lambda, cudaLambdaHolderData.cuda_JND_Lambda + lambda_write_offset, cudaJNDHolder.global_device_params, &cudaJNDHolder.vectors_sums_coefficents[0]);
if (backup_stage == 12 || backup_stage == 11) { // SHigh backup
copyDeviceBackup<JNDFloat, JNDFloat>KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaLambdaHolderData.cuda_JND_Lambda + lambda_write_offset, cudaLambdaHolderData.cuda_Buffer1, cudaJNDHolder.global_device_params);
}
// caculating Eq 5.3 - DC response
CudaFIRFilter<lambdaFloat, lambdaFloat> KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaLambdaHolderData.cuda_JND_Lambda + lambda_write_offset, cudaLambdaHolderData.cuda_JND_Lambda + 2*lambda_write_offset, &cudaJNDHolder.global_device_params[1]);
if (backup_stage == 10) { // DC backup
copyDeviceBackup<JNDFloat, JNDFloat>KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaLambdaHolderData.cuda_JND_Lambda + 2*lambda_write_offset, cudaLambdaHolderData.cuda_Buffer1, cudaJNDHolder.global_device_params);
}
// calculates Eq 5.4 and 5.5 - Log of Psi IHC
CudaCalcIHCComposite<lambdaFloat, float> KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaLambdaHolderData.cuda_JND_Lambda, cudaLambdaHolderData.cuda_JND_Lambda + 2 * lambda_write_offset, cudaHolderData.cuda_saved_speeds, cudaJNDHolder.global_device_params, &cudaJNDHolder.vectors_sums_coefficents[1]);
// calc the IHC log
if (backup_stage == 13 || backup_stage == 14) { // IHC backup
copyDeviceBackup<JNDFloat, JNDFloat>KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_Buffer1, cudaJNDHolder.global_device_params);
}
}
if (backup_stage >= 9 && backup_stage <= 14) {
GeneralKernel_Copy_Results_Template<JNDFloat>(host_backup, cudaLambdaHolderData.cuda_Buffer1, lambda_write_offset);
}
outer_log.timeAtFlag(41, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 8, "IHC calculation time"), Show_Run_Time & 8);
/// calcs the lambda itself
cudaEventsStartTimer(start, stop, Show_Run_Time & 8);
/* cuda_Lambda,*/
int dfilter = cudaJNDHolder.host_local_param[0].Decouple_Filter > 0 ? cudaJNDHolder.host_local_param[0].Decouple_Filter : cudaJNDHolder.host_local_param[0].time_blocks;
setDecoupledRun(filtersGrid, filtersThreads, LAMBDA_COUNT* cudaJNDHolder.host_local_param[0].time_blocks / dfilter, dfilter, Decouple_Unified_IHC_Factor);// changed grid to support all lambda calculations
// Calculates the AN response for all groups of Neurons Eq 5.6 - 5.8
CudaCalcLambda<lambdaFloat, JNDFloat> KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_Buffer1, cudaLambdaHolderData.cuda_JND_Lambda, cudaJNDHolder.global_device_params, save_lambda);
outer_log.timeAtFlag(42, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 8, "Lambda calculation time"), Show_Run_Time & 8);
}
/**
* src is __global__ array in cuda device
* target is array in host
* size is array length
* function will copy from src to taraget use hipMemcpy
*/
extern "C" void GeneralKernel_Copy_Results(float *target,float *src, size_t size) noexcept(false) {
const size_t sizer = size*sizeof(float);
//printf("copy %d bytes to host\n",sizer);
gpuAssert(hipMemcpy((void *)target,src,sizer,hipMemcpyDeviceToHost));
}
/**
* src is __global__ array in cuda device
* target is array in host
* size is array length
* function will copy from src to taraget use hipMemcpy
*/
extern "C" void GeneralKernel_Copy_Results_Double(double *target, double *src, size_t size) noexcept(false) {
const size_t sizer = size*sizeof(double);
//printf("copy %d bytes to host\n",sizer);
gpuAssert(hipMemcpy((void *)target, src, sizer, hipMemcpyDeviceToHost));
}
/**
* src is __global__ array in cuda device
* target is array in host
* size is array length
* function will copy from src to taraget use hipMemcpy
*/
template<class T> void GeneralKernel_Copy_Results_Template(T *target, T *src, size_t size, size_t offset) {
const size_t sizer = size*sizeof(T);
gpuAssert(hipMemcpy((void *)target, src+offset, sizer, hipMemcpyDeviceToHost));
}
template<class T> void GeneralKernel_Copy_Results_Template(T *target, T *src, size_t size) {
GeneralKernel_Copy_Results_Template<T>(target, src, size, 0);
}
template<class T> void ReverseKernel_Copy_Results_Template(T *cpu_src, T *cuda_target, size_t start_time_node, size_t time_nodes, int sections) {
gpuAssert(hipMemcpy((void *)cuda_target, &cpu_src[start_time_node*sections], time_nodes*sections*sizeof(T), hipMemcpyHostToDevice));
}
/**
* device run for calculation of accumulation of length cells at jump_size interval between them
* \p src is input summed array
* \p dst is pointer to target result
* \p jump_size is interval between to accumulated cells
* \p length number of accumulated cells
*/
__device__ void CalcAccumulation(float *src,float *dst,int jump_size,int length) {
float accu = 0.0f;
for (int index = 0; index < length*jump_size; index += jump_size) {
accu += src[index];
}
*dst = accu;
}
/**
* device run for calculation of average of length cells at jump_size interval between them
* \p src is input summed array
* \p dst is pointer to target result
* \p jump_size is interval between to accumulated cells
* \p length number of accumulated cells
*/
__device__ void CalcAvg(float *src, float *dst, int jump_size, int length) {
CalcAccumulation(src, dst, jump_size, length);
*dst = *dst / float(length);
}
/*
* calculates dA from input will run in single block with #threads as number of intervals
*/
__global__ void cudaCalculateDA(float *input, device_jnd_params *dA, int JNDIntervalNodes, int JNDIntervalHeadNodes, int JNDIntervalActualNodes, int offset_start) {
float acc = 0.0f;
float current = 0.0f;
int start_index = offset_start + threadIdx.x*JNDIntervalNodes + JNDIntervalHeadNodes;
int end_index = start_index + JNDIntervalActualNodes;
for (int index = start_index; index < end_index; index++) {
current = input[index];
current = current*current;
if (acc < current) acc = current;
}
dA[threadIdx.x].dA = sqrtf(acc);
__syncthreads();
}
// this function calculates average of lambda (part of Eq.17) due to synchronization issues
__global__ void GlobalCalculateMeanRate(
device_jnd_params *dA,
JNDFloat *JND_Lambda,
JNDFloat *MeanRate,
int lengthOffset,
int calculatedMatrixSize,
int JNDIntervals, // local
int JNDIntervalLength,
int JNDIntervalHeadNodes,
int overlapNodes,
int JND_USE_Spont_Base_Reference_Lambda
){
int lambda_index = blockIdx.y;
int dAindex = blockIdx.x;
int section_index = threadIdx.x;
int sections = blockDim.x;
int avg_fisher_full_index = lambda_index*JNDIntervals + dAindex;
int block_offset = lambda_index*calculatedMatrixSize + section_index + sections*(dAindex*JNDIntervalLength + JNDIntervalHeadNodes);
int mean_rate_offset = avg_fisher_full_index*sections + section_index;
double meanRateAccumulator = JND_Lambda[block_offset];
// averaging work per thread no need to loops
// will calculate manually to improve precisions
for (int time_offset = 1; time_offset < lengthOffset; time_offset++) {
//meanRateDiff = JND_Lambda[block_offset + sections*time_offset];// -lambdaBase;
meanRateAccumulator = meanRateAccumulator + JND_Lambda[block_offset + sections*time_offset];
}
MeanRate[mean_rate_offset] = JNDFloat(meanRateAccumulator) / JNDFloat(lengthOffset);
__syncthreads();
}
// uses the result of GlobalCalculateMeanRate to calculate on GPU eq 17-20)
__global__ void GlobalCalculateJND(
bool calculate_ai,
bool calculate_rms,
device_jnd_params *dA,
JNDFloat *JND_Lambda,
JNDFloat *MeanRate,
JNDFloat *Buffer1,
double *nIHC,
double Fs,
double scaleBMVelocityForLambdaCalculation,
int writeMatrixSize,
int calculatedMatrixSize,
int overlapNodes,
int JNDIntervalHeadNodes,
int JNDIntervalLength,
int lengthOffset,
int JNDIntervals, // local
int JNDIntervalsFull, // local
int *JND_Calculated_Intervals, // global
int numOFJNDCalculated, // global
int *JND_Refrence_Intervals, // global
int numOFJNDReferences, // global
int handeledIntervalsJND, // already handeled intervals
int *JND_Serial_Intervals_Positions,
int *JND_Interval_To_Reference,
JNDFloat *F_RA, // result for fisher rate not lambda summed, but time and space reduced
JNDFloat *FisherAISum, // result for fisher AI not lambda summed, but time and space reduced
double JND_Delta_Alpha_Length_Factor,
device_params *general_params,
int isdACalced,
int JND_USE_Spont_Base_Reference_Lambda,
int backup_stage
) {
__shared__ JNDFloat shared_acc_rms[SECTIONS];
__shared__ JNDFloat shared_acc_ai[SECTIONS];
int lambda_index = blockIdx.y;
int dAindex = blockIdx.x;
int section_index = threadIdx.x;
int sections = blockDim.x;
int avg_fisher_full_index = lambda_index*JNDIntervals + dAindex;
int mean_rate_offset = avg_fisher_full_index*sections + section_index;
JNDFloat T = JND_Delta_Alpha_Length_Factor / Fs;
JNDFloat Tlength = float(lengthOffset) / Fs;
//JNDFloat lambdaBase = CUDA_Nerves_Clusters[lambda_index];
int globaldAIndex = dAindex + handeledIntervalsJND;
// special control mechanism for mean rate calculation that avrages mean rate (not dMeanRate)
// to ensure that summary is larger enough than reference (so we are not just square negative values that will create artifacts)
if (calculate_rms) {
// test RMS average for debug output
shared_acc_rms[section_index] = MeanRate[mean_rate_offset];
// reducing spatial dimension
for (int t_i = (SECTIONS >> 1); t_i >= 1; t_i >>= 1) {
__syncthreads();
if (section_index<t_i) {
shared_acc_rms[section_index] = shared_acc_rms[section_index] + shared_acc_rms[section_index + t_i];
}
}
__syncthreads();
if (section_index == 0 && backup_stage==1) {
Buffer1[avg_fisher_full_index] = shared_acc_rms[section_index] / (JNDFloat(sections));
}
}
__syncthreads();
int globalReferenceInterval = globaldAIndex;
bool isRefrence = numOFJNDReferences > 0;
// this means we have actually refrences to test
if (isRefrence) {
isRefrence = false; // now for the actual test
for (int index = 0; index < numOFJNDReferences; index++) {
if (JND_Refrence_Intervals[index] == globaldAIndex) {
isRefrence = true;
break;
}
}
}
__syncthreads();
// find for each calculated JND signal+noise block its pure Noise block
if (!isRefrence) {
// assuming everything has reference
globalReferenceInterval = JND_Interval_To_Reference[JND_Serial_Intervals_Positions[globalReferenceInterval]];
}
__syncthreads();
int dAreferenceIndex = globalReferenceInterval - handeledIntervalsJND; // to find local index on tested output
int mean_rate_reference_offset = (avg_fisher_full_index + dAreferenceIndex - dAindex)*sections + section_index;
JNDFloat dAvalue = dA[isdACalced*dAindex + (1 - isdACalced)*globaldAIndex].dA;
JNDFloat dMRate = (MeanRate[mean_rate_offset] - MeanRate[mean_rate_reference_offset]) / dAvalue;
if (backup_stage == 2) {
Buffer1[mean_rate_offset] = dMRate;
}
if (calculate_ai) {
int calculate_lambda_offset = lambda_index*calculatedMatrixSize + section_index + sections*(dAindex*JNDIntervalLength + JNDIntervalHeadNodes);
int reference_lambda_offset = lambda_index*calculatedMatrixSize + section_index + sections*(dAreferenceIndex*JNDIntervalLength + JNDIntervalHeadNodes);
JNDFloat preFisherAITimeReducedValue = 0.0;
for (int time_offset = 0; time_offset < lengthOffset; time_offset++) {
JNDFloat refLambda = JND_Lambda[reference_lambda_offset];
JNDFloat calcedLambda = JND_Lambda[calculate_lambda_offset];
JNDFloat dLambdaCalculated = dAvalue > 0 ? (calcedLambda - refLambda) / dAvalue : 0;
if (backup_stage == 3) {
Buffer1[calculate_lambda_offset] = dLambdaCalculated;
}
/*
* calculating pre fisher AI
* from matlab
* fisher AI : Ts*(dL.^2./reshape(RefLamda(j,:,:),Nsec,Time) => into pre fisher AI
* VERY Important Correction: original division from matlab program incorrect:
* JNDFloat preFisherAIValue = refLambda>0 ? (dLambdaCalculated*dLambdaCalculated / refLambda / Fs) : 0;
* since its contradict eq 19 in miriam's article
*/
JNDFloat preFisherAIValue = (dLambdaCalculated*dLambdaCalculated / (Fs*refLambda)) ;
if (backup_stage == 4) {
Buffer1[calculate_lambda_offset] = preFisherAIValue;
}
preFisherAITimeReducedValue += preFisherAIValue;
reference_lambda_offset += sections;
calculate_lambda_offset += sections;
}
preFisherAITimeReducedValue = rsqrt(preFisherAITimeReducedValue);
JNDFloat preFisherAIValue = (T/Tlength)*nIHC[section_index] /(preFisherAITimeReducedValue* preFisherAITimeReducedValue);
if (backup_stage == 5) {
Buffer1[mean_rate_offset] = preFisherAIValue;
}
shared_acc_ai[section_index] = preFisherAIValue;
}
/*
* calculate pre fisher values before summering
* from matlab
* fisher rate : nIHC.*Tmean./RefMeanRate(j,:).*(dMeanRate(j,:).^2) => into pre fisher rate
*/
if (calculate_rms) {
JNDFloat MeanRateReferenced = MeanRate[mean_rate_reference_offset];
// all mean rates are actually multiplied by lambda base, so no needd to multiply by length offset on nominator
JNDFloat CRLB_RAValue = rsqrt(T / MeanRateReferenced*dMRate * dMRate);
if (backup_stage == 6) {
Buffer1[mean_rate_offset] = CRLB_RAValue;
}
shared_acc_rms[section_index] = nIHC[section_index] /(CRLB_RAValue*CRLB_RAValue);
}
__syncthreads();
// reducing spatial dimension for AI/RMS
for (int t_i = (SECTIONS >> 1); t_i >= 1; t_i >>= 1) {
__syncthreads();
if (section_index<t_i) {
if (calculate_ai) shared_acc_ai[section_index] = shared_acc_ai[section_index] + shared_acc_ai[section_index + t_i];
if (calculate_rms) shared_acc_rms[section_index] = shared_acc_rms[section_index] + shared_acc_rms[section_index + t_i];
}
}
__syncthreads();
// calculating fisher number for each block on AI/RMS
if (section_index == 0) {
int lambda_fisher_full_index = lambda_index*JNDIntervalsFull + globaldAIndex;
if (calculate_ai) FisherAISum[lambda_fisher_full_index] = shared_acc_ai[0] * CUDA_Nerves_Clusters[2*LAMBDA_COUNT+lambda_index];
if (calculate_rms) F_RA[lambda_fisher_full_index] = shared_acc_rms[0] * CUDA_Nerves_Clusters[2 * LAMBDA_COUNT + lambda_index];
}
__syncthreads();
}
// envelope function for GlobalCalculateJND, see detailed description in cochlea_common.h
extern "C" void CudaCalculateJND(
bool calculate_ai,
bool calculate_rms,
int mean_size,
int fisher_size,
double SPLRefVal,
double Fs,
double scaleBMVelocityForLambdaCalculation,
double *nIHC,
int *JND_Calculated_Intervals,
int numOFJNDCalculated,
int *JND_Refrence_Intervals,
int numOFJNDReferences,
int handeledIntervalsJND,
int JNDIntervalsFull, // global
int JNDIntervals, // current input # of handeled intervals
int JNDIntervalHeadNodes,
int overlapNodes,
int JNDIntervalNodes,
int lengthOffset, // local not global
int *JND_Serial_Intervals_Positions,
int *JND_Interval_To_Reference,
JNDFloat *F_RA, // result for fisher rate not lambda summed, but time and space reduced
JNDFloat *FisherAISum, // result for fisher AI not lambda summed, but time and space reduced
int writeMatrixSize,
int calculatedMatrixSize,
double JND_Delta_Alpha_Length_Factor,
int JND_USE_Spont_Base_Reference_Lambda,
int Show_Run_Time,
bool calcdA,
bool show_generated_input_params_cuda ,
int backup_stage,// in case of viewing output id of backup stage
Log &outer_log
) noexcept(false) {
std::cout << "Calculating JND on GPU" << std::endl;
hipEvent_t start, stop;
cudaEventsCreate(start, stop, Show_Run_Time & 16);
// calculated dA if not already calculated
if (calcdA) {
dim3 filtersGriddA(1, 1, 1);
dim3 filtersThreadsdA(JNDIntervals, 1, 1);
cudaEventsStartTimer(start, stop, Show_Run_Time & 16);
cudaCalculateDA KERNEL_ARGS2(filtersGriddA, filtersThreadsdA)(cudaHolderData.cuda_input_samples, cudaJNDHolder.cuda_jnd_params, JNDIntervalNodes, JNDIntervalHeadNodes, lengthOffset, overlapNodes);
outer_log.timeAtFlag(34, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 16, "dA calculation for JND"), Show_Run_Time & 16);
}
// copy paramers to GPU
cudaEventsStartTimer(start, stop, Show_Run_Time & 16);
gpuAssert(hipMemcpy(cudaLambdaHolderData.cuda_nIHC, nIHC, SECTIONS*sizeof(double), hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cudaJNDHolder.cuda_JND_Refrence_Intervals, JND_Refrence_Intervals, numOFJNDReferences*sizeof(int), hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cudaJNDHolder.cuda_JND_Serial_Intervals_Positions, JND_Serial_Intervals_Positions, JNDIntervalsFull*sizeof(int), hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cudaJNDHolder.cuda_JND_Interval_To_Reference, JND_Interval_To_Reference, numOFJNDCalculated*sizeof(int), hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cudaJNDHolder.cuda_JND_Calculated_Intervals, JND_Calculated_Intervals, numOFJNDCalculated*sizeof(int), hipMemcpyHostToDevice));
outer_log.timeAtFlag(35, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 16, "JND Memory preaparations"), Show_Run_Time & 16);
dim3 filtersGrid(JNDIntervals, LAMBDA_COUNT, 1);
dim3 filtersThreads(SECTIONS, 1, 1);
if (show_generated_input_params_cuda) {
std::cout << "lengthOffset = " << lengthOffset << "\n"
<< "overlapNodes = " << overlapNodes << "\n"
<< "JND_Delta_Alpha_Length_Factor = " << JND_Delta_Alpha_Length_Factor << "\n"
<< "calculatedMatrixSize = " << calculatedMatrixSize << "\n"
<< "JNDIntervalHeadNodes = " << JNDIntervalHeadNodes << "\n"
<< "filtersGrid = " << showDIM3(filtersGrid) << "\n"
<< "filtersThreads = " << showDIM3(filtersThreads) << "\n";
}
if (calculate_rms) {
// mean rate of lambda (part of Eq. 17) calculate pre run to ensure device synchronization
cudaEventsStartTimer(start, stop, Show_Run_Time & 16);
GlobalCalculateMeanRate KERNEL_ARGS2(filtersGrid, filtersThreads)(
cudaJNDHolder.cuda_jnd_params,
cudaLambdaHolderData.cuda_JND_Lambda,
cudaJNDHolder.cuda_MeanRate,
lengthOffset,
calculatedMatrixSize,
JNDIntervals, // local
JNDIntervalNodes,
JNDIntervalHeadNodes,
overlapNodes,
JND_USE_Spont_Base_Reference_Lambda
);
outer_log.timeAtFlag(36, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 16, "JND Mean Rate array calculation"), Show_Run_Time & 16);
}
cudaEventsStartTimer(start, stop, Show_Run_Time & 16);
// after mean rate ready can calculate the rest
GlobalCalculateJND KERNEL_ARGS2(filtersGrid, filtersThreads)(
calculate_ai,
calculate_rms,
cudaJNDHolder.cuda_jnd_params,
cudaLambdaHolderData.cuda_JND_Lambda,
cudaJNDHolder.cuda_MeanRate,
cudaLambdaHolderData.cuda_Buffer1,
cudaLambdaHolderData.cuda_nIHC,
Fs,
scaleBMVelocityForLambdaCalculation,
writeMatrixSize,
calculatedMatrixSize,
overlapNodes,
JNDIntervalHeadNodes,
JNDIntervalNodes,
lengthOffset,
JNDIntervals, // local
JNDIntervalsFull, // local
cudaJNDHolder.cuda_JND_Calculated_Intervals, // global
numOFJNDCalculated, // global
cudaJNDHolder.cuda_JND_Refrence_Intervals, // global
numOFJNDReferences, // global
handeledIntervalsJND, // already handeled intervals
cudaJNDHolder.cuda_JND_Serial_Intervals_Positions,
cudaJNDHolder.cuda_JND_Interval_To_Reference,
cudaJNDHolder.cuda_F_RA,
cudaJNDHolder.cuda_FisherAISum,
JND_Delta_Alpha_Length_Factor,
cudaJNDHolder.global_device_params,
calcdA ? 1 : 0,
JND_USE_Spont_Base_Reference_Lambda,
backup_stage
);
gpuAssert(hipMemcpy(F_RA, cudaJNDHolder.cuda_F_RA, fisher_size*sizeof(JNDFloat), hipMemcpyDeviceToHost));
gpuAssert(hipMemcpy(FisherAISum, cudaJNDHolder.cuda_FisherAISum, fisher_size*sizeof(JNDFloat), hipMemcpyDeviceToHost));
outer_log.timeAtFlag(37, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 16, "JND calculation"), Show_Run_Time & 16);
}
/**
* generating input per profile
*/
__global__ void CudaGenerateInputFromProfile(
device_jnd_params *input_profiles,
float *WN, // white noise array
float *Signal,
float *input_samples, // input samples to load
double wn_dc, // noise dc to be decreased
double wn_energy_normalize_factor, // factor to normalize energy interval
int signal_mode,
double signal_dc, // noise dc to be decreased
double signal_energy_normalize_factor, // factor to normalize energy interval
int startProfile,
int calculatedProfiles,
int overlapNodes, // for start offset
int IntervalLength, //number of nodes on actual input
int JND_Interval_Head,
float Fs
) {
int profile_index = startProfile + blockIdx.y;
int interval_position = blockIdx.x*blockDim.x + threadIdx.x;
int input_sample_position = blockIdx.y*IntervalLength + interval_position;
double dA = input_profiles[profile_index].dA;
double Wn = input_profiles[profile_index].Wn;
double frequency = 0;
if (signal_mode == 0) {
frequency = input_profiles[profile_index].frequency;
}
double time = double(interval_position - JND_Interval_Head) / double(Fs);
double timeCut = 2 * PI*frequency*time;
double sum = 0.0;
if (signal_mode) sum = (dA*(double(Signal[interval_position]) - signal_dc) / signal_energy_normalize_factor);
else sum = (dA*cos(timeCut) / signal_energy_normalize_factor);
sum=sum+ (Wn*(double(WN[interval_position]) - wn_dc) / wn_energy_normalize_factor);
input_samples[input_sample_position] = float(sum);
}
// calculates Hearing aid effect on the signal, done before BM velocity calculation
__global__ void CudaProcessSignalTroughHearingAID(
device_jnd_params *input_profiles,
float *input_samples, // input samples to load
float *input_samples_auxivulary, // for temporary saving
int startProfile,
int overlapNodes, // for start offset
int IntervalLength, //number of nodes on actual input
int JND_Interval_Head,
float Fs,
float *fir_transfer_function,
int fir_transfer_function_length
) {
int interval_position = blockIdx.x*blockDim.x + threadIdx.x;
int input_sample_position = blockIdx.y*IntervalLength + interval_position;
float function_summary = 0.0f;// input_samples[input_sample_position];
int backward_positions = min(interval_position, fir_transfer_function_length);
for (int i = 0; i < backward_positions; i++) {
function_summary = fmaf(fir_transfer_function[i], input_samples[input_sample_position - i], function_summary);
}
input_samples_auxivulary[input_sample_position] = function_summary;
}
// calculates Hearing aid effect on the signal can calculate for IIR filters as well, done before BM velocity calculation
__global__ void CudaProcessSignalTroughHearingAIDIIR(
device_jnd_params *input_profiles,
float *input_samples, // input samples to load
float *input_samples_auxivulary, // for temporary saving
int startProfile,
int overlapNodes, // for start offset
int IntervalLength, //number of nodes on actual input
int JND_Interval_Head,
float Fs,
float *iir_transfer_function,
int iir_transfer_function_length
) {
int interval_index = blockIdx.x*blockDim.x + threadIdx.x;
int input_sample_position = interval_index*IntervalLength;
int input_sample_end_position = input_sample_position + IntervalLength;
for (int i = input_sample_position; i < input_sample_end_position; i++) {
int backward_positions = min(i - input_sample_position+1, iir_transfer_function_length);
float function_summary = input_samples[i];// input_samples[input_sample_position];
for (int j = 1; j < backward_positions; j++) {
function_summary = fmaf(iir_transfer_function[j], input_samples[i-j], function_summary);
}
input_samples_auxivulary[i] = function_summary;
}
}
/* Standard C Function: Greatest Common Divisor */
int
gcd(int a, int b) {
int c;
while (a != 0) {
c = a; a = b%a; b = c;
}
return b;
}
double wn_dc;
double wn_energy_normalize_factor;
double signal_dc;
double signal_energy_normalize_factor;
void calculateDCANDNornalizationPostProcess(
int Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
double Normalize_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
int start_dc_expected_value_calculation,
int end_dc_expected_value_calculation,
int start_dc_normalized_value_calculation,
int end_dc_normalized_value_calculation,
float Fs,
double &dc,
double &energy_normalize_factor
) {
if (Normalize_Sigma_Type == 1) {
// option 1 division factor equal sqrt of the summary, avg energy is normalized
energy_normalize_factor = energy_normalize_factor / sqrt(static_cast<double>(end_dc_normalized_value_calculation - start_dc_normalized_value_calculation));
}
else if (Normalize_Sigma_Type == 2 && Normalize_Energy_To_Given_Interval > 0) {
// option 2 normalize to given time interval
energy_normalize_factor = energy_normalize_factor / sqrt(Fs*Normalize_Energy_To_Given_Interval);
}
else if (Normalize_Sigma_Type == 3) {
// option 3 energy not normalized, results identical to option 1 in case of pure tone
energy_normalize_factor = 1;
}
}
void calculateDCANDNornalizationFactorPureTone(
int Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
double Normalize_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
double Remove_Generated_DC,//if 1 removes the 0 frequency value from noise
int start_dc_expected_value_calculation,
int end_dc_expected_value_calculation,
int start_dc_normalized_value_calculation,
int end_dc_normalized_value_calculation,
float Fs,
double &dc,
double &energy_normalize_factor
) {
dc = 0.0;
// default, normalizing sigma to 1
energy_normalize_factor = sqrt(static_cast<double>(end_dc_normalized_value_calculation - start_dc_normalized_value_calculation));
calculateDCANDNornalizationPostProcess(
Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
Normalize_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
start_dc_expected_value_calculation,
end_dc_expected_value_calculation,
start_dc_normalized_value_calculation,
end_dc_normalized_value_calculation,
Fs,
dc,
energy_normalize_factor
);
//printf("dc=%.4e,energy_normalize_factor=%.4e\n", dc, energy_normalize_factor);
}
void calculateDCANDNornalizationFactor(
float *Source,
int Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
double Normalize_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
double Remove_Generated_DC,//if 1 removes the 0 frequency value from noise
int start_dc_expected_value_calculation,
int end_dc_expected_value_calculation,
int start_dc_normalized_value_calculation,
int end_dc_normalized_value_calculation,
float Fs,
double &dc,
double &energy_normalize_factor
) {
dc = Source[start_dc_expected_value_calculation];
for (int idx = start_dc_expected_value_calculation + 1; idx < end_dc_expected_value_calculation; idx++) {
dc = dc + Source[idx];
}
dc = Remove_Generated_DC * dc / static_cast<double>(end_dc_expected_value_calculation - start_dc_expected_value_calculation);
energy_normalize_factor = Source[start_dc_normalized_value_calculation] * Source[start_dc_normalized_value_calculation];
for (int idx = start_dc_normalized_value_calculation + 1; idx < end_dc_normalized_value_calculation; idx++) {
energy_normalize_factor = energy_normalize_factor + ((Source[idx] - dc) * (Source[idx] - dc));
}
energy_normalize_factor = sqrt(energy_normalize_factor);
calculateDCANDNornalizationPostProcess(
Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
Normalize_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
start_dc_expected_value_calculation,
end_dc_expected_value_calculation,
start_dc_normalized_value_calculation,
end_dc_normalized_value_calculation,
Fs,
dc,
energy_normalize_factor
);
//printf("dc=%.4e,energy_normalize_factor=%.4e\n", dc, energy_normalize_factor);
}
extern "C" void setupToleranceProfile(
device_jnd_params *profiles,
bool is_first_time_for_parameters_set, // for fixing arguments just one time
float Max_M1_SP_Error_Parameter,
float Max_Tolerance_Parameter,
int Relative_Error_Parameters,
float M1_SP_Fix_Factor,
float Tolerance_Fix_Factor,
int Blocks_Per_Interval,
int from_profile_index,
int calculatedProfiles
) noexcept(false) {
float host_model_max_m1_sp_tolerance[MAX_NUMBER_OF_BLOCKS];
float host_max_throw_tolerance[MAX_NUMBER_OF_BLOCKS];
for (int i = 0; i < calculatedProfiles; i++) {
int globalProfile = from_profile_index + i;
float m1_sp_fix_factor = 1.0f;
float throw_tolerance_factor = 1.0f;
if (Relative_Error_Parameters > 0) {
m1_sp_fix_factor = powf(10.0f, M1_SP_Fix_Factor*static_cast<float>(profiles[globalProfile].dBSPLSignal));
throw_tolerance_factor = powf(10.0f, Tolerance_Fix_Factor*static_cast<float>(profiles[globalProfile].dBSPLSignal));
}
for (int j = 0; j < Blocks_Per_Interval; j++) {
int model_index = Blocks_Per_Interval*i + j;
host_model_max_m1_sp_tolerance[model_index] = Max_M1_SP_Error_Parameter*m1_sp_fix_factor;
host_max_throw_tolerance[model_index] = Max_Tolerance_Parameter*throw_tolerance_factor;
}
}
gpuAssert(hipMemcpy(cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance, host_model_max_m1_sp_tolerance, MAX_NUMBER_OF_BLOCKS*sizeof(float), hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cudaHolderGeneratedData.generated_model_throw_tolerance, host_max_throw_tolerance, MAX_NUMBER_OF_BLOCKS*sizeof(float), hipMemcpyHostToDevice));
}
extern "C" void uploadProfiles(
device_jnd_params *profiles,
int numOfProfiles,
bool profilesLoaded // upload profiles array only if false
) noexcept(false) {
if (!profilesLoaded) {
gpuAssert(hipMemcpy(cudaJNDHolder.cuda_jnd_params, profiles, numOfProfiles*sizeof(device_jnd_params), hipMemcpyHostToDevice));
}
}
extern "C" void generateInputFromProfile(
device_jnd_params *profiles,
float *WN, // white noise array, single interval length white noise array, expected power level (linear of 1)
int wn_length, // max length of white noise
float *Signal, //signal array, single interval length white noise array, expected power level (linear of 1)
int signal_length, // max length of signal noise
int signal_mode, // 0 - is for normal frequencies, 1 - is for signal array
int Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
int Normalize_Sigma_Type_Signal,
double Normalize_Noise_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
double Remove_Generated_Noise_DC,//if 1 removes the 0 frequency value from noise
int start_dc_expected_value_calculation,
int end_dc_expected_value_calculation,
int start_dc_normalized_value_calculation,
int end_dc_normalized_value_calculation,
int numOfProfiles,
bool profilesLoaded, // upload profiles array only if false
int from_profile_index,
int calculatedProfiles,
int overlapNodes, // for start offset
int IntervalLength, //number of nodes on actual input
int JND_Interval_Head,
int JND_Interval_Actual_Length,
float Fs, // sample frequency
int Show_Generated_Input, // show generated input from file
float *target_input, // if Show_Generated_Input is true it will copy here the result with nodes fix per position
bool Show_Generated_Configuration, // % for debugging shw profiles of created input
bool is_first_time_for_parameters_set, // for fixing arguments just one time
float Max_M1_SP_Error_Parameter,
float Max_Tolerance_Parameter,
int Relative_Error_Parameters,
float M1_SP_Fix_Factor,
float Tolerance_Fix_Factor,
int Blocks_Per_Interval,
int Show_Run_Time,
float *fir_transfer_function,
int fir_transfer_function_length,
float *iir_transfer_function,
int iir_transfer_function_length,
Log &outer_log
) noexcept(false) {
hipEvent_t start, stop;
cudaEventsCreate(start, stop, Show_Run_Time & 32);
cudaEventsStartTimer(start, stop, Show_Run_Time & 32);
if (!profilesLoaded) {
gpuAssert(hipMemcpy(cudaJNDHolder.cuda_jnd_params, profiles, numOfProfiles*sizeof(device_jnd_params), hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(cudaSignalHolder.cuda_WN, WN, wn_length*sizeof(float), hipMemcpyHostToDevice));
if (signal_mode) {
gpuAssert(hipMemcpy(cudaSignalHolder.cuda_Signal, Signal, signal_length*sizeof(float), hipMemcpyHostToDevice));
}
}
if (end_dc_normalized_value_calculation == 0) end_dc_normalized_value_calculation = IntervalLength;
if (end_dc_expected_value_calculation == 0) end_dc_expected_value_calculation = IntervalLength;
if (is_first_time_for_parameters_set) {
calculateDCANDNornalizationFactor(
WN,
Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
Normalize_Noise_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
Remove_Generated_Noise_DC,//if 1 removes the 0 frequency value from noise
start_dc_expected_value_calculation,
end_dc_expected_value_calculation,
start_dc_normalized_value_calculation,
end_dc_normalized_value_calculation,
Fs,
wn_dc,
wn_energy_normalize_factor
);
if (signal_mode) {
calculateDCANDNornalizationFactor(
Signal,
Normalize_Sigma_Type_Signal, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
Normalize_Noise_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
Remove_Generated_Noise_DC,//if 1 removes the 0 frequency value from noise
start_dc_expected_value_calculation,
end_dc_expected_value_calculation,
start_dc_normalized_value_calculation,
end_dc_normalized_value_calculation,
Fs,
signal_dc,
signal_energy_normalize_factor
);
}
else {
// normalize pure tones to 1
calculateDCANDNornalizationFactorPureTone(
Normalize_Sigma_Type_Signal, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
Normalize_Noise_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
Remove_Generated_Noise_DC,//if 1 removes the 0 frequency value from noise
start_dc_expected_value_calculation,
end_dc_expected_value_calculation,
start_dc_normalized_value_calculation,
end_dc_normalized_value_calculation,
Fs,
signal_dc,
signal_energy_normalize_factor
);
}
}
int threadsPerBlock = IntervalLength / Blocks_Per_Interval;
int blocksOnxDim = Blocks_Per_Interval;
if (threadsPerBlock > 1024) {
int threads_number = static_cast<int>(ceilf(sqrtf(float(threadsPerBlock))));
while (gcd(threads_number, threadsPerBlock) != threads_number) threads_number++;
if (threads_number > 1024) {
threads_number = threadsPerBlock / threads_number;
}
blocksOnxDim = Blocks_Per_Interval*threadsPerBlock / threads_number;
threadsPerBlock = threads_number;
}
setupToleranceProfile(
profiles,
is_first_time_for_parameters_set, // for fixing arguments just one time
Max_M1_SP_Error_Parameter,
Max_Tolerance_Parameter,
Relative_Error_Parameters,
M1_SP_Fix_Factor,
Tolerance_Fix_Factor,
Blocks_Per_Interval,
from_profile_index,
calculatedProfiles
);
dim3 filtersGrid(blocksOnxDim, calculatedProfiles, 1);
dim3 filtersThreads(threadsPerBlock, 1, 1);
if (Show_Generated_Configuration) {
std::cout << std::boolalpha << "Show Generated Input: " << Show_Generated_Input << std::endl;
std::cout << "filtersGrid" << showDIM3(filtersGrid) << std::endl;
std::cout << "filtersThreads" << showDIM3(filtersThreads) << std::endl;
std::cout << "IntervalLength = " << IntervalLength << std::endl;
std::cout << "overlapNodes = " << overlapNodes << std::endl;
std::cout << "calculatedProfiles = " << calculatedProfiles << std::endl;
std::cout << "from_profile_index = " << from_profile_index << std::endl;
std::cout << "Fs = " << Fs << std::endl;
std::cout << "Normalize_Sigma_Type = " << Normalize_Sigma_Type << std::endl;
std::cout << "WN(DC) = " << wn_dc << std::endl;
std::cout << "WN(Normal_Factor) = " << wn_energy_normalize_factor << std::endl;
}
CudaGenerateInputFromProfile KERNEL_ARGS2(filtersGrid, filtersThreads)(
cudaJNDHolder.cuda_jnd_params,
cudaSignalHolder.cuda_WN, // white noise array
cudaSignalHolder.cuda_Signal,
cudaHolderData.cuda_input_samples,
wn_dc,
wn_energy_normalize_factor, // factor to normalize energy interval
signal_mode,
signal_dc,
signal_energy_normalize_factor,
from_profile_index,
calculatedProfiles,
overlapNodes, // for start offset
IntervalLength, //number of nodes on actual input
JND_Interval_Head,
Fs
);
if (fir_transfer_function_length > 1 || (fir_transfer_function_length > 0 && fir_transfer_function[0] != 1) || iir_transfer_function_length > 1 || (iir_transfer_function_length> 0 && iir_transfer_function[0] != 1) ) {
// processing hear the transfer function
float *cuda_transfer_function;
float *cuda_input_samples_auxivulary;
int processed_input_length = static_cast<int>(filtersGrid.x*filtersGrid.y*filtersGrid.z*filtersThreads.x*filtersThreads.y*filtersThreads.z);
gpuAssert(hipMalloc((void **)&cuda_transfer_function, max(fir_transfer_function_length,iir_transfer_function_length)*sizeof(float)));
gpuAssert(hipMemcpy(cuda_transfer_function, fir_transfer_function, fir_transfer_function_length*sizeof(float), hipMemcpyHostToDevice));
gpuAssert(hipMalloc((void **)&cuda_input_samples_auxivulary, processed_input_length*sizeof(float)));
CudaProcessSignalTroughHearingAID KERNEL_ARGS2(filtersGrid, filtersThreads)(
cudaJNDHolder.cuda_jnd_params,
cudaHolderData.cuda_input_samples, // input samples to load
cuda_input_samples_auxivulary, // for temporary saving
from_profile_index,
overlapNodes, // for start offset
IntervalLength, //number of nodes on actual input
JND_Interval_Head,
Fs,
cuda_transfer_function,
fir_transfer_function_length
);
if (iir_transfer_function_length > 1 || (iir_transfer_function_length > 0 && iir_transfer_function[0] != 1) ) {
// reverse use of buffer/ input to avoid copying completely
int threadsIIR = calculatedProfiles;
int blocksIIR = 1;
gpuAssert(hipMemcpy(cuda_transfer_function, iir_transfer_function, iir_transfer_function_length*sizeof(float), hipMemcpyHostToDevice));
int threads_number_iir = static_cast<int>(ceilf(sqrtf(float(threadsIIR))));
while (gcd(threads_number_iir, threadsIIR) != threads_number_iir) threads_number_iir++;
blocksIIR = threadsIIR / threads_number_iir;
threadsIIR = threads_number_iir;
dim3 filtersGridIIR(blocksIIR, 1, 1);
dim3 filtersThreadsIIR(threadsIIR, 1, 1);
CudaProcessSignalTroughHearingAIDIIR KERNEL_ARGS2(filtersGridIIR, filtersThreadsIIR)(
cudaJNDHolder.cuda_jnd_params,
cuda_input_samples_auxivulary, // input samples to load
cudaHolderData.cuda_input_samples, // for temporary saving
from_profile_index,
overlapNodes, // for start offset
IntervalLength, //number of nodes on actual input
JND_Interval_Head,
Fs,
cuda_transfer_function,
iir_transfer_function_length
);
} else {
gpuAssert(hipMemcpy(cudaHolderData.cuda_input_samples, cuda_input_samples_auxivulary, processed_input_length*sizeof(float), hipMemcpyDeviceToDevice));
}
gpuAssert(hipFree(cuda_transfer_function));
gpuAssert(hipFree(cuda_input_samples_auxivulary));
}
if (Show_Generated_Input & 1 > 0) {
gpuAssert(hipMemcpy(target_input, cudaHolderData.cuda_input_samples, calculatedProfiles*IntervalLength*sizeof(float), hipMemcpyDeviceToHost));
}
outer_log.timeAtFlag(43, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 32, "Input Generation"), Show_Run_Time & 32);
}
// fixed lambda values for output
template<typename T> __global__ void CUDAFIXJND_Lambda(
volatile T *cuda_Lambda,
volatile T *cuda_Buffer,
int cuda_buffer_update) {
int ind = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*blockIdx.y;
float fix_spikes = CUDA_Nerves_Clusters[blockIdx.y];
if (cuda_buffer_update) {
cuda_Buffer[ind] = T(fmaxf(fix_spikes, float(cuda_Lambda[ind])));
}
cuda_Lambda[ind] = T(fmaxf(0.0f, float(cuda_Lambda[ind]) - fix_spikes));
__syncthreads();
}
template __global__ void CUDAFIXJND_Lambda<float>(volatile float *cuda_Lambda,
volatile float *cuda_Buffer,
int cuda_buffer_update);
template __global__ void CUDAFIXJND_Lambda<double>(volatile double *cuda_Lambda,
volatile double *cuda_Buffer,
int cuda_buffer_update);
template<class T> extern void updateCUDALambdaArray(T *lambda_array,T* cuda_buffer, size_t calc_time_nodes, int sections,int Show_Run_Time,int Show_Device_Data,int cuda_buffer_update,Log &outer_log) {
dim3 grid(calc_time_nodes, LAMBDA_COUNT, 1);
dim3 thrds(sections, 1, 1);
if (Show_Device_Data & 16) {
std::cout << "CUDAFIXJND_Lambda<<<" << showDIM3(grid) << "," << showDIM3(thrds) << " >>>(lambda_array);" << std::endl;
}
hipEvent_t start, stop;
cudaEventsCreate(start, stop, Show_Run_Time & 32);
cudaEventsStartTimer(start, stop, Show_Run_Time & 32);
CUDAFIXJND_Lambda<T> << <grid, thrds >> >(lambda_array, cuda_buffer, cuda_buffer_update);
outer_log.timeAtFlag(44, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 32, "Fix Lambda"), Show_Run_Time & 32);
}
| ba455ebfa2728b9bc5c78eca72b0fbea1ef9be4b.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <stdexcept>
#include <vector>
#include <string>
#include <cmath>
#define __CUDA_INTERNAL_COMPILATION__
#include <math_functions.h>
#undef __CUDA_INTERNAL_COMPILATION__
//#include <cuda_runtime.h>
//#include <device_launch_parameters.h>
#include <device_functions.h>
#include <helper_functions.h>
#include <device_double_functions.h>
#include <helper_cuda.h>
#include <cuda.h>
typedef float2 Complex;
#include "const.h"
#include "cochlea_common.h"
#include "cochlea.cuh"
//#include <thrust\device_vector.h>
//#include <thrust\host_vector.h>
//#include <thrust\fill.h>
#ifdef CUDA_MEX_PROJECT
#include <mex.h>
#endif
// nvcc does not seem to like variadic macros, so we have to define
// one for each kernel parameter list:
#ifdef __CUDACC__
#define KERNEL_ARGS2(grid, block) <<< grid, block >>>
#define KERNEL_ARGS3(grid, block, sh_mem) <<< grid, block, sh_mem >>>
#define KERNEL_ARGS4(grid, block, sh_mem, stream) <<< grid, block, sh_mem, stream >>>
#else
#define KERNEL_ARGS2(grid, block)
#define KERNEL_ARGS3(grid, block, sh_mem)
#define KERNEL_ARGS4(grid, block, sh_mem, stream)
#endif
#ifndef gpuAssert
#define gpuAssert( condition ) { if( (condition) != 0 ) { printf( "\n FAILURE %s in %s, line %d\n", cudaGetErrorString(condition), __FILE__, __LINE__ ); throw std::runtime_error("GPU Failure aborts..."); } }
#endif
struct cudaHolderGeneratedData {
float *generated_model_max_m1_sp_tolerance;
float *generated_model_throw_tolerance;
float *generated_calculated_power_array; // in case of loaded from input boundary calculation write max power in dBSPL
int *generated_model_out_sample_index;
int *generated_model_end_sample_index;
int generated_sections = 0;
void allocateGenerated() {
if (generated_sections == 0) {
generated_sections = MAX_NUMBER_OF_BLOCKS;
int blocks_pointers_size = generated_sections * sizeof(float);
int blocks_pointers_size_int = generated_sections * sizeof(int);
// generated array for blocks thresholds
gpuAssert(cudaMalloc((void **)&generated_calculated_power_array, blocks_pointers_size));
gpuAssert(cudaMalloc((void **)&generated_model_max_m1_sp_tolerance, blocks_pointers_size));
gpuAssert(cudaMalloc((void **)&generated_model_throw_tolerance, blocks_pointers_size));
gpuAssert(cudaMalloc((void **)&generated_model_out_sample_index, blocks_pointers_size_int));
gpuAssert(cudaMalloc((void **)&generated_model_end_sample_index, blocks_pointers_size_int));
}
}
void releaseGenerated() {
if (generated_sections > 0) {
generated_sections = 0;
// generated array for blocks thresholds
gpuAssert(cudaFree(generated_calculated_power_array));
generated_calculated_power_array = NULL;
gpuAssert(cudaFree(generated_model_max_m1_sp_tolerance));
generated_model_max_m1_sp_tolerance = NULL;
gpuAssert(cudaFree(generated_model_throw_tolerance));
generated_model_throw_tolerance = NULL;
gpuAssert(cudaFree(generated_model_end_sample_index));
generated_model_end_sample_index = NULL;
gpuAssert(cudaFree(generated_model_out_sample_index));
generated_model_out_sample_index = NULL;
}
}
} cudaHolderGeneratedData;
struct cudaModelAihc {
int aihc_loaded = 0;
int is_loaded() { return aihc_loaded; }
void loadAihc(float *Aihc) {
if (!is_loaded()) {
gpuAssert(cudaMemcpyToSymbol(model_Aihc, Aihc, SECTIONS*LAMBDA_COUNT * sizeof(float),0,cudaMemcpyHostToDevice));
aihc_loaded = 1;
}
}
void enableLoadAihc() {
aihc_loaded = 0;
}
} cudaModelAihc;
struct cudaHolderData {
int cochlear_parametrs_initialized = 0;
int cochlea_sections;
float *cuda_input_samples;
float *cuda_saved_speeds;
float *cuda_Rd;
float *cuda_Sd;
float *cuda_Qd;
float *cuda_Yd;
float *cuda_gammad;
float *converge_speed;
float *converge_speed_blocks;
float *cuda_massd;
float *cuda_Md;
float *cuda_Ud;
float *cuda_Ld;
float *cuda_S_ohcd;
float *cuda_S_tmd;
float *cuda_R_tmd;
int *time_filter_params;
int last_saved_nodes_per_time_block_for_cuda;
int *cuda_Failed_Converged_Time_Node;
int *cuda_Failed_Converged_Blocks;
float *cuda_Converged_Time_Node;
float *cuda_Converged_Blocks;
float *cuda_convergence_jacoby_loops_per_iteration;
float *cuda_convergence_jacoby_loops_per_iteration_blocks;
size_t numBlocks_data = 0;
size_t inputBufferNodes_data = 0;
size_t resultBufferNodes_data = 0;
void allocateCochlearData(const int& Sections) {
if (cochlear_parametrs_initialized == 0) {
cochlea_sections = Sections;
cochlear_parametrs_initialized = 1;
int cochlear_allocated = cochlea_sections * sizeof(float);
// cuda Rd,Sd,Qd,Yd
gpuAssert(cudaMalloc((void **)&cuda_Rd, cochlear_allocated));
gpuAssert(cudaMalloc((void **)&cuda_Sd, cochlear_allocated));
gpuAssert(cudaMalloc((void **)&cuda_Qd, cochlear_allocated));
gpuAssert(cudaMalloc((void **)&cuda_Yd, cochlear_allocated));
// cuda S_ohcd,S_tmd,gammad,R_tmd
gpuAssert(cudaMalloc((void **)&cuda_S_ohcd, cochlear_allocated));
gpuAssert(cudaMalloc((void **)&cuda_S_tmd, cochlear_allocated));
gpuAssert(cudaMalloc((void **)&cuda_gammad, cochlear_allocated));
gpuAssert(cudaMalloc((void **)&cuda_R_tmd, cochlear_allocated));
// cuda massd,Md,Ud,Ld
gpuAssert(cudaMalloc((void **)&cuda_massd, cochlear_allocated));
gpuAssert(cudaMalloc((void **)&cuda_Md, cochlear_allocated));
gpuAssert(cudaMalloc((void **)&cuda_Ud, cochlear_allocated));
gpuAssert(cudaMalloc((void **)&cuda_Ld, cochlear_allocated));
}
}
void releaseCochlearData() {
if (cochlear_parametrs_initialized == 1) {
// cuda Rd,Sd,Qd,Yd
gpuAssert(cudaFree(cuda_Rd));
cuda_Rd = NULL;
gpuAssert(cudaFree(cuda_Sd));
cuda_Sd = NULL;
gpuAssert(cudaFree(cuda_Qd));
cuda_Qd = NULL;
gpuAssert(cudaFree(cuda_Yd));
cuda_Yd = NULL;
// cuda S_ohcd,S_tmd,gammad,R_tmd
gpuAssert(cudaFree(cuda_S_ohcd));
cuda_S_ohcd = NULL;
gpuAssert(cudaFree(cuda_S_tmd));
cuda_S_tmd = NULL;
gpuAssert(cudaFree(cuda_gammad));
cuda_gammad = NULL;
gpuAssert(cudaFree(cuda_R_tmd));
cuda_R_tmd = NULL;
// cuda massd,Md,Ud,Ld
gpuAssert(cudaFree(cuda_massd));
cuda_massd = NULL;
gpuAssert(cudaFree(cuda_Md));
cuda_Md = NULL;
gpuAssert(cudaFree(cuda_Ud));
cuda_Ud = NULL;
gpuAssert(cudaFree(cuda_Ld));
cuda_Ld = NULL;
cochlear_parametrs_initialized = 0;
}
}
void loadCochlearData(float *S_ohc, float *S_tm, float *gamma, float *R_tm, float *mass, float *M, float *U, float *L, float *R, float *S,float *Q ) {
if (cochlear_parametrs_initialized == 1) {
int cochlea_allocated = cochlea_sections * sizeof(float);
gpuAssert(cudaMemcpy(cuda_S_ohcd, S_ohc, cochlea_allocated, cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cuda_S_tmd, S_tm, cochlea_allocated, cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cuda_gammad, gamma, cochlea_allocated, cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cuda_R_tmd, R_tm, cochlea_allocated, cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cuda_massd, mass, cochlea_allocated, cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cuda_Md, M, cochlea_allocated, cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cuda_Ud, U, cochlea_allocated, cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cuda_Ld, L, cochlea_allocated, cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cuda_Rd, R, cochlea_allocated, cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cuda_Sd, S, cochlea_allocated, cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cuda_Qd, Q, cochlea_allocated, cudaMemcpyHostToDevice));
}
}
int isInputMemorySufficent(size_t test_nodes) { return inputBufferNodes_data >= test_nodes; }
int isInputMemoryAllocated() { return isInputMemorySufficent(1); }
int isOutputMemorySufficent(size_t test_nodes) { return resultBufferNodes_data >= test_nodes; }
int isOutputMemoryAllocated() { return isOutputMemorySufficent(1); }
int isBlocksMemorySufficent(size_t test_nodes) { return numBlocks_data >= test_nodes; }
int isBlocksMemoryAllocated() { return isBlocksMemorySufficent(1); }
void allocateOHCIOData(const size_t& inputBufferNodes,const size_t& resultBufferNodes) {
if (!isInputMemorySufficent(inputBufferNodes)) {
releaseInputData();
size_t tsizep = inputBufferNodes * sizeof(float);
size_t isizep = inputBufferNodes * sizeof(int);
inputBufferNodes_data = inputBufferNodes;
// input samples and results
gpuAssert(cudaMalloc((void **)&cuda_input_samples, tsizep));
gpuAssert(cudaMalloc((void **)&cuda_Failed_Converged_Time_Node, isizep));
gpuAssert(cudaMalloc((void **)&cuda_Converged_Time_Node, tsizep));
gpuAssert(cudaMalloc((void **)&cuda_convergence_jacoby_loops_per_iteration, tsizep));
gpuAssert(cudaMalloc((void **)&converge_speed, tsizep));
}
if (!isOutputMemorySufficent(resultBufferNodes)) {
releaseOutputData();
size_t ssizep = resultBufferNodes * sizeof(float);
resultBufferNodes_data = resultBufferNodes;
printf("allocated %d bytes for BM velocity\n", ssizep);
gpuAssert(cudaMalloc((void **)&cuda_saved_speeds, ssizep));
}
}
void releaseInputData() {
if (isInputMemoryAllocated()) {
inputBufferNodes_data = 0;
gpuAssert(cudaFree(cuda_input_samples));
cuda_input_samples = NULL;
gpuAssert(cudaFree(cuda_Failed_Converged_Time_Node));
gpuAssert(cudaFree(converge_speed));
gpuAssert(cudaFree(cuda_Converged_Time_Node));
gpuAssert(cudaFree(cuda_convergence_jacoby_loops_per_iteration));
cuda_convergence_jacoby_loops_per_iteration = NULL;
cuda_Failed_Converged_Time_Node = NULL;
cuda_Converged_Time_Node = NULL;
converge_speed = NULL;
}
}
void releaseOutputData() {
if (isOutputMemoryAllocated()) {
resultBufferNodes_data = 0;
gpuAssert(cudaFree(cuda_saved_speeds));
cuda_saved_speeds = NULL;
}
}
void releaseOHCIOData() {
// input samples and results
releaseInputData();
releaseOutputData();
}
void allocateBlocksConverganceArray(size_t nodes) {
if (!isBlocksMemorySufficent(nodes)) {
releaseBlocksConverganceArray();
numBlocks_data = nodes;
gpuAssert(cudaMalloc((void **)&cuda_Failed_Converged_Blocks, numBlocks_data*sizeof(int)));
gpuAssert(cudaMalloc((void **)&converge_speed_blocks, numBlocks_data * sizeof(float)));
gpuAssert(cudaMalloc((void **)&cuda_Converged_Blocks, numBlocks_data * sizeof(float)));
gpuAssert(cudaMalloc((void **)&cuda_convergence_jacoby_loops_per_iteration_blocks, numBlocks_data * sizeof(float)));
//converge_speed = thr
}
}
void releaseBlocksConverganceArray() {
if (isBlocksMemoryAllocated()) {
gpuAssert(cudaFree(cuda_Failed_Converged_Blocks));
cuda_Failed_Converged_Blocks = NULL;
gpuAssert(cudaFree(converge_speed_blocks));
gpuAssert(cudaFree(cuda_Converged_Blocks));
gpuAssert(cudaFree(cuda_convergence_jacoby_loops_per_iteration_blocks));
cuda_Converged_Blocks = NULL;
converge_speed_blocks = NULL;
cuda_convergence_jacoby_loops_per_iteration_blocks = NULL;
numBlocks_data = 0;
}
}
} cudaHolderData;
struct cudaLambdaHolderData {
// cuda IHC constant size data
double *cuda_nIHC = NULL;
int cochlea_sections = SECTIONS;
int allocatedIHCDataVar = 0;
inline int isIHCDataAllocated() { return allocatedIHCDataVar; }
void allocateIHCData();
void releaseIHCData();
size_t allocatedLambdaNodes = 0; // to know if data need, allocated or reallocated
inline size_t isLambdaMemorySufficent(size_t test_nodes) { return allocatedLambdaNodes >= LAMBDA_COUNT*test_nodes; }
inline size_t isLambdaMemoryAllocated() { return isLambdaMemorySufficent(1); }
size_t allocatedBufferNodes = 0; // to know if data need, allocated or reallocated
inline int isBufferMemorySufficent(size_t test_nodes) { return allocatedBufferNodes >= test_nodes; }
inline int isBufferMemoryAllocated() { return isBufferMemorySufficent(1); }
JNDFloat *cuda_JND_Lambda = NULL; // will use as buffer too for memory conversions purposes
JNDFloat *cuda_Buffer1 = NULL;
// nodes count for single buffer, for full lambda count multiple by LAMBDA constant
void allocateLambdaMemory(size_t nodes);
void releaseLambdaMemory();
// nodes count for single buffer, for full lambda count multiple by LAMBDA constant
void allocateBufferMemory(size_t nodes);
void releaseBufferMemory();
} cudaLambdaHolderData;
void cudaLambdaHolderData::releaseIHCData() {
if (isIHCDataAllocated()) {
gpuAssert(cudaFree(cuda_nIHC));
cuda_nIHC = NULL;
allocatedIHCDataVar = 0;
}
}
void cudaLambdaHolderData::allocateIHCData() {
if (!isIHCDataAllocated()) {
gpuAssert(cudaMalloc((void **)&cuda_nIHC, cochlea_sections * sizeof(double)));
allocatedIHCDataVar = 1;
}
}
// nodes count for single buffer, for full lambda count multiple by LAMBDA constant
void cudaLambdaHolderData::allocateLambdaMemory(size_t nodes) {
if (!isLambdaMemorySufficent(nodes)) {
releaseLambdaMemory();
allocatedLambdaNodes = nodes*LAMBDA_COUNT; // now its sufficent
size_t lambda_memory_size_in_bytes = allocatedLambdaNodes * sizeof(JNDFloat);
printf("allocated %lu bytes for lambda nodes\n", lambda_memory_size_in_bytes);
gpuAssert(cudaMalloc((void **)&cuda_JND_Lambda, lambda_memory_size_in_bytes));
}
}
void cudaLambdaHolderData::releaseLambdaMemory() {
if (isLambdaMemoryAllocated()) {
gpuAssert(cudaFree(cuda_JND_Lambda));
cuda_JND_Lambda = NULL;
allocatedLambdaNodes = 0;
}
}
// nodes count for single buffer, for full lambda count multiple by LAMBDA constant
void cudaLambdaHolderData::allocateBufferMemory(size_t nodes) {
if (!isBufferMemorySufficent(nodes)) {
releaseBufferMemory();
allocatedBufferNodes = nodes; // now its sufficent
size_t lambda_memory_size_in_bytes = allocatedBufferNodes * sizeof(JNDFloat);
printf("allocated %lu bytes for buffer nodes\n", lambda_memory_size_in_bytes);
gpuAssert(cudaMalloc((void **)&cuda_Buffer1, lambda_memory_size_in_bytes));
}
}
void cudaLambdaHolderData::releaseBufferMemory() {
if (isBufferMemoryAllocated()) {
gpuAssert(cudaFree(cuda_Buffer1));
cuda_Buffer1 = NULL;
allocatedBufferNodes = 0;
}
}
extern "C" JNDFloat *getCudaBuffer() {
return cudaLambdaHolderData.cuda_Buffer1;
}
extern "C" JNDFloat *getCudaLambda() {
return cudaLambdaHolderData.cuda_JND_Lambda;
}
extern "C" int *getCudaFailedTimeNodes() {
return cudaHolderData.cuda_Failed_Converged_Time_Node;
}
extern "C" int *getCudaFailedBlocks() {
return cudaHolderData.cuda_Failed_Converged_Blocks;
}
extern "C" float *getCudaConvergedTimeNodes() {
return cudaHolderData.cuda_Converged_Time_Node;
}
extern "C" float *getCudaConvergedJacobyLoopsPerIteration() {
return cudaHolderData.cuda_convergence_jacoby_loops_per_iteration;
}
extern "C" float *getCudaConvergedJacobyLoopsPerIterationBlocks() {
return cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks;
}
extern "C" float *getCudaConvergedBlocks() {
return cudaHolderData.cuda_Converged_Blocks;
}
extern "C" void loadAihc(float *Aihc) noexcept(false) {
cudaModelAihc.loadAihc(Aihc);
}
extern "C" void enableloadAihc() noexcept(false) {
cudaModelAihc.enableLoadAihc();
}
extern "C" void extractConvergenceTimes(float *convergence_times, size_t nodes) {
gpuAssert(cudaMemcpy(convergence_times, cudaHolderData.converge_speed, nodes * sizeof(float), cudaMemcpyDeviceToHost));
}
int *host_params_time_filter,*host_time_filters;
struct cudaJNDHolder {
//JNDFloat *cuda_dLambda = NULL;
JNDFloat *cuda_MeanRate = NULL;
/**
* pointer to parameters array of structures on the host
*/
device_params *host_local_param;
/**
* pointer to parameters array of structures on global device memory
*/
device_params *global_device_params;
vectors_sum_linear_coefficents *vectors_sums_coefficents;
int cuda_device_jnd_structs_allocated = 0;
int isDeviceStructsAllocated() { return cuda_device_jnd_structs_allocated; }
void ReleaseDeviceStructs() {
if (isDeviceStructsAllocated()) {
cuda_device_jnd_structs_allocated = 0;
delete[](host_local_param);
gpuAssert(cudaFree(global_device_params));
global_device_params = NULL;
gpuAssert(cudaFree(vectors_sums_coefficents));
vectors_sums_coefficents = NULL;
}
}
void allocateDeviceStructs() {
if (!isDeviceStructsAllocated()) {
gpuAssert(cudaMalloc((void **)&vectors_sums_coefficents, 2 * sizeof(vectors_sum_linear_coefficents)));
gpuAssert(cudaMalloc((void **)&global_device_params, 2 * sizeof(device_params))); // just one set filter includes its own size
host_local_param = new device_params[2];
cuda_device_jnd_structs_allocated = 1;
}
}
size_t cuda_jnd_intervals_num = 0;
int isSufficentIntervalAllocated(size_t nodes) { return cuda_jnd_intervals_num >= nodes; }
int isIntervalsAllocated() { return isSufficentIntervalAllocated(1); }
device_jnd_params *cuda_jnd_params = NULL;
int *cuda_JND_Serial_Intervals_Positions = NULL;
int *cuda_JND_Interval_To_Reference = NULL;
int *cuda_JND_Calculated_Intervals = NULL;
int *cuda_JND_Refrence_Intervals = NULL;
void releaseIntervals() {
if (isIntervalsAllocated()) {
printf("clearing %d params\n", cuda_jnd_intervals_num);
gpuAssert(cudaFree(cuda_JND_Serial_Intervals_Positions));
gpuAssert(cudaFree(cuda_JND_Interval_To_Reference));
gpuAssert(cudaFree(cuda_JND_Calculated_Intervals));
gpuAssert(cudaFree(cuda_JND_Refrence_Intervals));
gpuAssert(cudaFree(cuda_jnd_params));
cuda_JND_Serial_Intervals_Positions = NULL;
cuda_JND_Interval_To_Reference = NULL;
cuda_JND_Calculated_Intervals = NULL;
cuda_JND_Refrence_Intervals = NULL;
cuda_jnd_params = NULL;
cuda_jnd_intervals_num = 0;
}
}
void allocateIntervals(int intervals_num) {
if (!isSufficentIntervalAllocated(intervals_num)) {
releaseIntervals();
cuda_jnd_intervals_num = intervals_num;
size_t jndRefrencesSizeInBytes = cuda_jnd_intervals_num * sizeof(device_jnd_params);
size_t dA_size_in_bytes = cuda_jnd_intervals_num * sizeof(device_jnd_params);
gpuAssert(cudaMalloc((void **)&cuda_jnd_params, dA_size_in_bytes));
gpuAssert(cudaMalloc((void **)&cuda_JND_Serial_Intervals_Positions, jndRefrencesSizeInBytes));
gpuAssert(cudaMalloc((void **)&cuda_JND_Interval_To_Reference, jndRefrencesSizeInBytes));
gpuAssert(cudaMalloc((void **)&cuda_JND_Calculated_Intervals, jndRefrencesSizeInBytes));
gpuAssert(cudaMalloc((void **)&cuda_JND_Refrence_Intervals, jndRefrencesSizeInBytes));
}
}
JNDFloat *cuda_FisherAISum = NULL;
JNDFloat *cuda_F_RA = NULL;
size_t cuda_fisher_size = 0;
int isSufficentFisherNodesAllocated(size_t nodes) { return cuda_fisher_size >= nodes; }
int isFisherNodesAllocated() { return isSufficentFisherNodesAllocated(1); }
void allocateFisherNodes(size_t nodes);
void releaseFisherNodes();
size_t cuda_mean_nodes = 0;
int isSufficentMeanNodesAllocated(size_t nodes) { return cuda_mean_nodes >= nodes; }
int isMeanNodesAllocated() { return isSufficentMeanNodesAllocated(1); }
void allocateMeanNodes(size_t nodes);
void releaseMeanNodes();
} cudaJNDHolder;
void cudaJNDHolder::releaseFisherNodes() {
if (isFisherNodesAllocated()) {
printf("clearing %d fisher nodes\n", cuda_fisher_size);
gpuAssert(cudaFree(cuda_FisherAISum));
gpuAssert(cudaFree(cuda_F_RA));
cuda_FisherAISum = NULL;
cuda_F_RA = NULL;
cuda_fisher_size = 0;
}
}
void cudaJNDHolder::allocateFisherNodes(size_t nodes) {
if (!isSufficentFisherNodesAllocated(nodes)) {
releaseFisherNodes();
cuda_fisher_size = nodes;
size_t fisher_size_in_bytes = cuda_fisher_size * sizeof(JNDFloat);
gpuAssert(cudaMalloc((void **)&cuda_FisherAISum, fisher_size_in_bytes));
//gpuAssert(cudaMalloc((void **)&cuda_AvgMeanRate, fisher_size_in_bytes));
gpuAssert(cudaMalloc((void **)&cuda_F_RA, fisher_size_in_bytes));
}
}
void cudaJNDHolder::releaseMeanNodes() {
if (isMeanNodesAllocated()) {
printf("clearing %d mean nodes\n", cuda_mean_nodes);
gpuAssert(cudaFree(cuda_MeanRate));
cuda_MeanRate = NULL;
cuda_mean_nodes = 0;
}
}
void cudaJNDHolder::allocateMeanNodes(size_t nodes) {
if (!isSufficentMeanNodesAllocated(nodes)) {
releaseMeanNodes();
cuda_mean_nodes = nodes;
size_t mean_size_in_bytes = cuda_mean_nodes * sizeof(JNDFloat);
gpuAssert(cudaMalloc((void **)&cuda_MeanRate, mean_size_in_bytes));
}
}
extern "C" JNDFloat *getCudaMeanRate() {
return cudaJNDHolder.cuda_MeanRate;
}
struct cudaSignalHolder {
size_t cuda_signal_nodes = 0;
float *cuda_WN = NULL; // white noise for input generation
float *cuda_Signal = NULL;
int isSufficentSignalNodesAllocated(size_t nodes) { return cuda_signal_nodes >= nodes; }
int isSignalNodesAllocated() { return isSufficentSignalNodesAllocated(1); }
void allocateSignalNodes(int nodes) {
if (!isSufficentSignalNodesAllocated(nodes)) {
releaseSignalNodes();
cuda_signal_nodes = nodes;
size_t wn_length_bytes = cuda_signal_nodes * sizeof(float);
gpuAssert(cudaMalloc((void **)&cuda_WN, wn_length_bytes));
gpuAssert(cudaMalloc((void **)&cuda_Signal, wn_length_bytes));
}
}
void releaseSignalNodes() {
if (isSignalNodesAllocated()) {
cuda_signal_nodes = 0;
}
}
} cudaSignalHolder;
/****
* cochlea cu global variables
*
*
**/
//float *deviceBackupSpeeds; // save backup speeds from previous ihc run on the device
float *BM_host;
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
// More careful checking. However, this will affect performance.
// Comment away if needed.
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
printf("cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
/**
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );*/
throw std::runtime_error("Cuda check pre device synchronize failed");
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
/**
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
*/
printf("cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
throw std::runtime_error("Cuda check post device synchronize failed");
}
#endif
return;
}
inline void __cudaClearError( const char *file, const int line )
{
// More careful checking. However, this will affect performance.
// Comment away if needed.
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
/**
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );*/
printf("cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
}
return;
}
std::string showDIM3(dim3 d3) {
std::stringstream ss;
ss << "(" << d3.x << ", " << d3.y << "," << d3.z << ")";
return ss.str();
}
extern "C" void cudaEventsCreate(cudaEvent_t& start, cudaEvent_t& stop, int condition) noexcept(false) {
if (condition) {
gpuAssert(cudaEventCreate(&start));
gpuAssert(cudaEventCreate(&stop));
}
}
extern "C" void cudaEventsStartTimer(cudaEvent_t& start, cudaEvent_t& stop, int condition) noexcept(false) {
if (condition) {
gpuAssert(cudaEventRecord(start));
}
}
extern "C" void viewGPUStatus(int flags, const std::string& prefix) noexcept(false) {
if (flags & 16) {
size_t free_memory;
size_t total_memory;
gpuAssert(cudaMemGetInfo(&free_memory, &total_memory));
printf("%s : GPU Memory, Free(%d MB) / Total(%d MB)\n",prefix.c_str(), static_cast<int>((free_memory / (1024 * 1024))), static_cast<int>(total_memory / (1024 * 1024)));
}
}
extern "C" void cudaEventsStopTimer(cudaEvent_t& start, cudaEvent_t& stop, int condition) noexcept(false) {
if (condition) {
gpuAssert(cudaEventRecord(stop));
}
}
extern "C" float *getCudaConvergeSpeedBlocks() {
return cudaHolderData.converge_speed_blocks;
}
extern "C" float cudaEventsStopQueryTimer(cudaEvent_t& start, cudaEvent_t& stop, int condition, const std::string& prefix) noexcept(false) {
cudaEventsStopTimer(start, stop, condition);
float milliseconds = 0.0f;
if (condition) {
gpuAssert(cudaEventSynchronize(stop));
gpuAssert(cudaEventElapsedTime(&milliseconds, start, stop));
printf("%s : %.2f (msec) \n", prefix.c_str(), milliseconds);
}
return milliseconds;
}
template void GeneralKernel_Copy_Results_Template<double>(double *target, double *src, size_t size);
template void GeneralKernel_Copy_Results_Template<float>(float *target, float *src, size_t size);
template void GeneralKernel_Copy_Results_Template<int>(int *target, int *src, size_t size);
template void GeneralKernel_Copy_Results_Template<double>(double *target, double *src, size_t size, size_t offset);
template void GeneralKernel_Copy_Results_Template<float>(float *target, float *src, size_t size, size_t offset);
template void GeneralKernel_Copy_Results_Template<int>(int *target, int *src, size_t size, size_t offset);
template void ReverseKernel_Copy_Results_Template<float>(float *cpu_src, float *cuda_target, size_t start_time_node, size_t time_nodes, int sections);
template void ReverseKernel_Copy_Results_Template<double>(double *cpu_src, double *cuda_target, size_t start_time_node, size_t time_nodes, int sections);
template void updateCUDALambdaArray<float>(float *lambda_array, float *cuda_buffer, size_t calc_time_nodes, int sections, int Show_Run_Time, int Show_Device_Data, int cuda_buffer_update, Log &outer_log);
template void updateCUDALambdaArray<double>(double *lambda_array,double *cuda_buffer, size_t calc_time_nodes, int sections, int Show_Run_Time, int Show_Device_Data, int cuda_buffer_update,Log &outer_log);
extern "C" void BMOHCKernel_Init(
float *gamma,
float *mass,
float *M,
float *U,
float *L,
float *R,
float *S,
float *Q,
float *S_ohc,
float *S_tm,
float *R_tm,
float num_frequencies,
float dbA,
size_t inputBufferNodes,
size_t resultBufferNodes,
size_t lambdaBufferNodes,
bool first_time,
int Show_Run_Time,
int Show_Device_Data,
Log &outer_log
){
cudaEvent_t start, stop;
size_t ssizep = resultBufferNodes*sizeof(float);
cudaEventsCreate(start, stop, Show_Run_Time & 2);
cudaEventsStartTimer(start, stop, Show_Run_Time & 2);
if ( first_time ) {
if (Show_Device_Data & 8) {
printf("Saved speeeds allocated size = (%d KB), %d Nodes\n", (ssizep / 1024), (ssizep / 256));
}
} // end of first time memory allocations
cudaHolderGeneratedData.allocateGenerated();
cudaHolderData.allocateOHCIOData(inputBufferNodes, resultBufferNodes);
//printf("memory uploads lambda program arrays\n");
cudaHolderData.allocateCochlearData(SECTIONS);
cudaHolderData.loadCochlearData(S_ohc, S_tm, gamma, R_tm, mass, M, U, L, R, S, Q);
//printf("cuda malloc fisher program arrays\n");
outer_log.timeAtFlag(0,cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 2,"Initialize and allocate Memory for BM run"), Show_Run_Time & 2);
}
extern "C" void BMOHCKernel_Wait_Threads() noexcept(false)
{
gpuAssert(cudaDeviceSynchronize());
}
extern "C" void BMOHCKernel_Copy_Results(float *target, size_t resultNodes, size_t offset) noexcept(false) {
size_t ssize = resultNodes*sizeof(float);
gpuAssert(cudaMemcpy((void *)(target), cudaHolderData.cuda_saved_speeds+offset, ssize, cudaMemcpyDeviceToHost));
}
extern "C" void ReverseKernel_Copy_Results(float *src, size_t size) noexcept(false) {
gpuAssert(cudaMemcpy((void *)cudaHolderData.cuda_saved_speeds,src, size*sizeof(float), cudaMemcpyHostToDevice));
}
extern "C" void BMOHCKernel_Copy_Lambda(JNDFloat *target, size_t lambdaNodes, int offset) noexcept(false) {
size_t lsizep = lambdaNodes*sizeof(JNDFloat);
gpuAssert(cudaMemcpy((void *)(target), cudaLambdaHolderData.cuda_Buffer1+offset, lsizep, cudaMemcpyDeviceToHost));
}
extern "C" void BMOHCKernel_Free(
) noexcept(false) {
cudaHolderData.releaseCochlearData();
cudaHolderGeneratedData.releaseGenerated();
cudaHolderData.releaseOHCIOData();
cudaHolderData.releaseBlocksConverganceArray();
}
// non adjusted version for relative error parmeters as 0
__global__ void CudaCalculateThresholdBoundariesForNonGeneratedInputSimple(
float *m1_sp_maximum,
float *tolerance_maximum
) {
m1_sp_maximum[threadIdx.x] = model_constants[23];
tolerance_maximum[threadIdx.x] = model_constants[24];
__syncthreads();
}
// calculate threshold boundaries based on read input from file
__global__ void CudaCalculateThresholdBoundariesForNonGeneratedInput(
float *input_samples,
float *m1_sp_maximum,
float *tolerance_maximum,
float *power_calculated_array
) {
__shared__ float blockMaximum[1024];
//__shared__ float loader[1024];
int start_input = model_constants_integers[8] * blockIdx.x;
int bdim = blockDim.x;
float load_data = input_samples[start_input + threadIdx.x];
load_data = abs(load_data);
blockMaximum[threadIdx.x] = load_data;
for (int t_i = (bdim >> 1); t_i >= 1; t_i >>= 1) {
__syncthreads();
if (threadIdx.x < t_i ) {
blockMaximum[threadIdx.x] = fmax(blockMaximum[threadIdx.x], blockMaximum[threadIdx.x + t_i]);
}
}
__syncthreads();
if (threadIdx.x ==0) {
// calculate thresholds
// first calculate power relative to SPLRef
float power_calculated = model_constants[34] * blockMaximum[0];
power_calculated_array[blockIdx.x] = power_calculated;
}
__syncthreads();
}
// calculate threshold boundaries based on read input from file
__global__ void CudaCalculateThresholdBoundariesForNonGeneratedInputBlocked(
volatile float *input_samples,
volatile float *power_calculated_array,
int block_size,
float rSPLRefVal,
float M1_SP_Fix_Factor,
float Tolerance_Fix_Factor,
float Max_M1_SP_Error_Parameter,
float Max_Tolerance_Parameter,
int *gen_model_end_sample_index
) {
//__shared__ float loader[1024];
int start_input = block_size * threadIdx.x;
int end_output = gen_model_end_sample_index[threadIdx.x];
//int end_output = (model_constants_integers[8]+1) * threadIdx.x;
// determine max output for boundaries conditions
float load_data = 0.0f;
load_data = input_samples[start_input];
for (int t_i = start_input; t_i <end_output; t_i++) {
load_data = fmax(load_data, input_samples[t_i]);
}
// calculate thresholds
// first calculate power relative to SPLRef
//float power_calculated = rSPLRefVal * load_data;
power_calculated_array[threadIdx.x] = rSPLRefVal * load_data;
__syncthreads();
}
extern "C" void calculateBoundariesForNonGeneratedInput(
int Relative_Error_Parameters,
int max_block_length,
int Show_Calculated_Power,
float M1_SP_Fix_Factor,
float Tolerance_Fix_Factor,
float Max_M1_SP_Error_Parameter,
float Max_Tolerance_Parameter,
float rSPLRefVal,
int block_size,
dim3 inputBlockDivision
) noexcept(false) {
dim3 singleton(1, 1, 1);
if (Relative_Error_Parameters == 0) {
printf("Calculating simple configuration for %d blocks\n", inputBlockDivision.x);
CudaCalculateThresholdBoundariesForNonGeneratedInputSimple KERNEL_ARGS2(singleton, inputBlockDivision)(cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance, cudaHolderGeneratedData.generated_model_throw_tolerance);
} else {
float host_generated_m1_sp_array[MAX_NUMBER_OF_BLOCKS];
float host_generated_tolerance_array[MAX_NUMBER_OF_BLOCKS];
float host_generated_calculated_power_array[MAX_NUMBER_OF_BLOCKS];
int threadsNumber = min(max_block_length, 1024);
if (threadsNumber < 1024) {
threadsNumber = static_cast<int>(powf(2.0f, floor(log2f(static_cast<float>(threadsNumber)))));
}
dim3 threadsDivision(threadsNumber, 1, 1);
printf("Calculating alternate complex configuration size of %d\n", inputBlockDivision.x); // " blocks divided to threads at " << showDIM3(threadsDivision) << std::endl;
CudaCalculateThresholdBoundariesForNonGeneratedInput KERNEL_ARGS2(inputBlockDivision, threadsDivision)(cudaHolderData.cuda_input_samples, cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance, cudaHolderGeneratedData.generated_model_throw_tolerance, cudaHolderGeneratedData.generated_calculated_power_array);
gpuAssert(cudaMemcpy(host_generated_calculated_power_array, cudaHolderGeneratedData.generated_calculated_power_array, static_cast<int>(inputBlockDivision.x)*sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < static_cast<int>(inputBlockDivision.x); i++) {
float power_calculated = host_generated_calculated_power_array[i] > 0.0f ? 20 * log10f(0.1f*host_generated_calculated_power_array[i]) : 0.0f;
host_generated_m1_sp_array[i] = Max_M1_SP_Error_Parameter*powf(10.0f, M1_SP_Fix_Factor * power_calculated);
host_generated_tolerance_array[i] = Max_Tolerance_Parameter*powf(10.0f, Tolerance_Fix_Factor * power_calculated);
if (Show_Calculated_Power&1) {
printf("generated_calculated_power_array[%d]=%.3e\n",i, power_calculated);
printf("generated_m1_sp_array[%d]=%.3e\n", i, host_generated_m1_sp_array[i]);
printf("generated_tolerance_array[%d]=%.3e\n", i, host_generated_tolerance_array[i]);
}
}
}
}
/////////////////////////////////////////////////
void cudaOccupancyIndicator(int blockSize, const void *MyKernel, cudaDeviceProp &deviceProp) {
int maxActiveBlocks;
gpuAssert(cudaDeviceSynchronize());
gpuAssert(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks,
MyKernel, blockSize,
0));
float occupancy = (maxActiveBlocks * blockSize / deviceProp.warpSize) /
(float)(deviceProp.maxThreadsPerMultiProcessor /
deviceProp.warpSize);
printf("Launched blocks of size %d. Theoretical occupancy: %f,maxActive Blocks: %d\n",
blockSize, occupancy, maxActiveBlocks);
/*
std::cout << "activeWarps: " << activeWarps << std::endl
<< "maxWarps: " << maxWarps << std::endl
<< "GPU Blocks processing capability of BM calculations (" << run_mode_name << ") is : " << (deviceProp.multiProcessorCount*numBlocks) << std::endl
<< "current num blocks: " << grid.x << std::endl
<< "Occupancy: " << (double)activeWarps / maxWarps * 100 << "%" << std::endl;
*/
}
extern "C" void BMOHCNewKernel(
float *input_samples,
bool override_input_samples, // true if input generated, will not upload
float w_ohc,
float time_step,
float time_step_out,
float delta_x,
float alpha_r,
int enable_psi,
int enable_OW,
int base_index,
float Ts,
float _ohc_alpha_l,
float _ohc_alpha_s,
float model_Gme,
float model_a0,
float model_a1,
float model_a2,
float sigma_ow,
float eta_1,
float eta_2,
float *tres,
int Time_Blocks,
int samplesBufferLengthP1,
int overlap_nodes_for_block,
long overlapTimeMicroSec,
int show_transient, // always 1 and will be ignored than
float cuda_max_time_step,
float cuda_min_time_step,
int Decouple_Filter,
float Max_M1_SP_Error_Parameter,
float Max_Tolerance_Parameter,
int Relative_Error_Parameters,
int calculate_boundary_conditions, // if true will calculate max tolerance and max m1 sp error from input, should be used if input is not generated within the program
float M1_SP_Fix_Factor,
float Tolerance_Fix_Factor,
float SPLREfVal,
int Show_Calculated_Power,
int Show_Device_Data,
int Show_Run_Time,
int JACOBBY_Loops_Fast, // number of jcoby loops to perform on fast approximation
int JACOBBY_Loops_Slow, // number of jcoby loops to perform on slow approximation
int Cuda_Outern_Loops, // max control loops
int Run_Fast_BM_Calculation, // will run BM calculation with relaxed memory requirements
int BMOHC_Kernel_Configuration,
cudaEvent_t& start,
cudaEvent_t& stop,
cudaDeviceProp deviceProp,
Log &outer_log
) noexcept(false) {
cudaEventsCreate(start, stop, Show_Run_Time & 1);
dim3 threads(FIRST_STAGE_WIDTH, 1);
dim3 grid(Time_Blocks/*FIRST_STAGE_BLOCKS*/, 1);
cudaHolderData.allocateBlocksConverganceArray(Time_Blocks);
//last_saved_nodes_per_time_block_for_cuda = last_saved_nodes_per_block; // this setup for later copy data, fix indexes
int tsizep = (samplesBufferLengthP1)*sizeof(float);
//std::cout << "inputing " << samplesBufferLengthP1 << " nodes\n"<<tsizep<<" Bytes\n";
// allocate memory on device
// copy data to device
// TODO - allocate & memcopy only neccesary data according to enable_ow and enable_psi
//int numBlocks = grid.x; // Occupancy in terms of active blocks
//int activeWarps;
//int maxWarps;
std::string run_mode_name;
if (Run_Fast_BM_Calculation == 3 ) {
run_mode_name = "fast no self analysis";
} else if (Run_Fast_BM_Calculation == 2) {
run_mode_name = "Impercise";
} else if (Run_Fast_BM_Calculation == 1) {
run_mode_name = "Fast";
} else {
run_mode_name = "Precise";
}
/*
if (Show_Device_Data & 2) {
if (Run_Fast_BM_Calculation) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, BMOHC_FAST_kernel, threads.x, 0);
} else {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, BMOHC_NEW_kernel, threads.x, 0);
}
activeWarps = deviceProp.multiProcessorCount* numBlocks * threads.x / deviceProp.warpSize;
maxWarps = deviceProp.multiProcessorCount*deviceProp.maxThreadsPerMultiProcessor / deviceProp.warpSize;
std::cout << "activeWarps: " << activeWarps << std::endl
<< "maxWarps: " << maxWarps << std::endl
<< "GPU Blocks processing capability of BM calculations ("<<run_mode_name<<") is : " << (deviceProp.multiProcessorCount*numBlocks) << std::endl
<< "current num blocks: " << grid.x << std::endl
<< "Occupancy: " << (double)activeWarps / maxWarps * 100 << "%" << std::endl;
}
*/
if (!override_input_samples) {
gpuAssert(cudaMemcpy(cudaHolderData.cuda_input_samples, input_samples, tsizep, cudaMemcpyHostToDevice));
}
// load GPU parmeters
float host_model_constants[MODEL_FLOATS_CONSTANTS_SIZE];
int host_model_constants_integers[MODEL_INTEGERS_CONSTANTS_SIZE];
int host_model_constants_longs[MODEL_LONGS_CONSTANTS_SIZE];
float base_time = 0;
host_model_constants[0] = Ts;
host_model_constants[1] = static_cast<float>(1.0/static_cast<double>(Ts)); // Fs
host_model_constants[2] = _ohc_alpha_l;
host_model_constants[3] = _ohc_alpha_s;
host_model_constants[4] = -1.0f*_ohc_alpha_l;
host_model_constants[5] = -1.0f*_ohc_alpha_s;
host_model_constants[6] = static_cast<float>(static_cast<double>(_ohc_alpha_l) / static_cast<double>(_ohc_alpha_s));
host_model_constants[7] = static_cast<float>(1.0/static_cast<double>(sigma_ow));
host_model_constants[8] = delta_x;
host_model_constants[9] = delta_x*delta_x; // dx_pow2
host_model_constants[10] = model_a0;
host_model_constants[11] = model_a1;
host_model_constants[12] = model_a2;
host_model_constants[13] = eta_1;
host_model_constants[14] = eta_2;
host_model_constants[15] = model_Gme;
host_model_constants[16] = enable_OW*model_Gme;
host_model_constants[17] = (1 - enable_OW)*model_Gme;
host_model_constants[18] = -w_ohc; // negated, so all negatives in GPU become positive
host_model_constants[19] = time_step;
host_model_constants[20] = time_step_out;
//std::cout << "host_model_constants[20] = time_step_out = " << time_step_out << std::endl;
host_model_constants[21] = cuda_min_time_step;
host_model_constants[22] = cuda_max_time_step;
host_model_constants[23] = Max_M1_SP_Error_Parameter;
host_model_constants[24] = Max_Tolerance_Parameter;
//printf("model_constants[25] = alpha_r=%.2e\n", alpha_r);
host_model_constants[25] = alpha_r;
host_model_constants[26] = host_model_constants[8] * host_model_constants[10];
host_model_constants[31] = -model_a1;
host_model_constants[32] = -model_a2;
host_model_constants[33] = SPLREfVal;
host_model_constants[34] = 1.0f/SPLREfVal;
host_model_constants[35] = M1_SP_Fix_Factor;
host_model_constants[36] = Tolerance_Fix_Factor;
// integer constants
host_model_constants_integers[0] = Decouple_Filter;
//std::cout << "Decouple_Filter = " << Decouple_Filter << std::endl;
host_model_constants_integers[1] = enable_OW;
host_model_constants_integers[2] = enable_psi;
host_model_constants_integers[3] = Time_Blocks;
host_model_constants_integers[4] = samplesBufferLengthP1;
host_model_constants_integers[5] = overlap_nodes_for_block;
host_model_constants_integers[6] = show_transient;
host_model_constants_integers[7] = Relative_Error_Parameters;
host_model_constants_integers[8] = (samplesBufferLengthP1 - overlap_nodes_for_block) / (Time_Blocks + 1);
host_model_constants_integers[9] = JACOBBY_Loops_Fast;
host_model_constants_integers[10] = JACOBBY_Loops_Slow;
host_model_constants_integers[11] = Cuda_Outern_Loops;
int host_model_out_sample_index[MAX_NUMBER_OF_BLOCKS];
int host_model_end_sample_index[MAX_NUMBER_OF_BLOCKS];
int max_block_length = 0;
// calculate division of of timed blocks, which node each cuda block starts procerssing the input
for (int bindex = 0; bindex < static_cast<int>(grid.x); bindex++) {
int transient_offset = (bindex > 0 && (host_model_constants_integers[0] == 0 || (host_model_constants_integers[0] != 1 && bindex%host_model_constants_integers[0] != 0)));
int preDecoupled = host_model_constants_integers[0]>0 && (host_model_constants_integers[0] == 1 || ((bindex + 1) % host_model_constants_integers[0] == 0)) && bindex != grid.x - 1;
int postDecoupled = host_model_constants_integers[0] > 0 && (bindex % host_model_constants_integers[0] == 0);
int input_sample = bindex*host_model_constants_integers[8];
// calculates out_sample
host_model_out_sample_index[bindex] = input_sample + std::max((1 - transient_offset - postDecoupled),0)*host_model_constants_integers[5]; // use as start output
// calculate end output as constant for convience
int block_length = host_model_constants_integers[8] + (1 - preDecoupled)*host_model_constants_integers[5];
max_block_length = max(block_length, max_block_length);
host_model_end_sample_index[bindex] = input_sample + block_length;
if (bindex > 0) {
host_model_out_sample_index[bindex] = max(host_model_out_sample_index[bindex], host_model_end_sample_index[bindex-1]);
}
if (Show_Calculated_Power & 2) {
std::cout << "Block[" << bindex << "] ={'start_input'=" << input_sample << ",'start_output'=" << host_model_out_sample_index[bindex] << ",'end_output'=" << host_model_end_sample_index[bindex] << "}" << std::endl;
}
if (Show_Calculated_Power & 4) {
std::cout << "Block[" << bindex << "] ={'preDecoupled'=" << preDecoupled << ",'transient_offset'=" << transient_offset << ",'block_length'=" << block_length << "}" << std::endl;
}
}
// long constants
host_model_constants_longs[0] = overlapTimeMicroSec;
// preapare symbols
gpuAssert(cudaMemcpyToSymbol(model_constants, host_model_constants, MODEL_FLOATS_CONSTANTS_SIZE*sizeof(float)));
gpuAssert(cudaMemcpyToSymbol(model_constants_integers, host_model_constants_integers, MODEL_INTEGERS_CONSTANTS_SIZE*sizeof(int)));
gpuAssert(cudaMemcpy(cudaHolderGeneratedData.generated_model_out_sample_index, host_model_out_sample_index, MAX_NUMBER_OF_BLOCKS*sizeof(int),cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cudaHolderGeneratedData.generated_model_end_sample_index, host_model_end_sample_index, MAX_NUMBER_OF_BLOCKS*sizeof(int), cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpyToSymbol(model_constants_longs, host_model_constants_longs, MODEL_LONGS_CONSTANTS_SIZE*sizeof(long)));
// calculate convergence criteria
if (calculate_boundary_conditions) {
calculateBoundariesForNonGeneratedInput(
Relative_Error_Parameters,
max_block_length,
Show_Calculated_Power,
M1_SP_Fix_Factor,
Tolerance_Fix_Factor,
Max_M1_SP_Error_Parameter,
Max_Tolerance_Parameter,
host_model_constants[34],
host_model_constants_integers[8],
grid // tell us block partition of current run
);
}
/**
void *params[] = { cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
&w_ohc,
&time_step,
&time_step_out,
&delta_x,
&alpha_r,
&enable_psi,
&enable_OW,
&base_time,
&Ts,
&_ohc_alpha_l,
&_ohc_alpha_s,
&model_Gme,
&model_a0,
&model_a1,
&model_a2,
&sigma_ow,
&eta_1,
&eta_2,
&samplesBufferLengthP1,
&overlap_nodes_for_block,
&cuda_min_time_step,
&cuda_max_time_step };
cuLaunchKernel(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_5B, threads.x, threads.y, threads.z, grid.x, grid.y, grid.z, 0, NULL, params, NULL);
*/
// choose and execute kernel version
printf("prefered BMOHC_Kernel_Configuration: %d\n", BMOHC_Kernel_Configuration);
cudaEventsStartTimer(start, stop, Show_Run_Time & 1);
if (Run_Fast_BM_Calculation == 18) {
cudaFuncSetCacheConfig(BMOHC_Triple_Aggragation_FAST_kernel, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_Triple_Aggragation_FAST_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_Triple_Aggragation_FAST_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_Triple_Aggragation_FAST_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 17) {
cudaFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_triple_aggregations, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_triple_aggregations << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_triple_aggregations KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_triple_aggregations, deviceProp);
} else if (Run_Fast_BM_Calculation == 16) {
cudaFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_advanced_aggregations, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_advanced_aggregations << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_advanced_aggregations KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized_advanced_aggregations, deviceProp);
} else if (Run_Fast_BM_Calculation == 15) {
cudaFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B_Optimized, deviceProp);
} else if (Run_Fast_BM_Calculation == 7) {
cudaFuncSetCacheConfig(BMOHC_OLD_2017_01_13_kernel, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_OLD_2017_01_13_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_OLD_2017_01_13_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
static_cast<int>(1000000*Ts*overlap_nodes_for_block),
1,
cuda_max_time_step,
cuda_min_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_OLD_2017_01_13_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 8) {
cudaFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_2B, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_2B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_2B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_2B, deviceProp);
} else if (Run_Fast_BM_Calculation == 9) {
cudaFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_3B, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_3B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_3B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_3B, deviceProp);
} else if (Run_Fast_BM_Calculation == 10) {
cudaFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_4B, deviceProp);
} else if (Run_Fast_BM_Calculation == 14) {
cudaFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_8B, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_8B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_8B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_8B, deviceProp);
} else if (Run_Fast_BM_Calculation == 13) {
cudaFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_7B, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_7B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_7B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_7B, deviceProp);
} else if (Run_Fast_BM_Calculation == 12) {
cudaFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_6B, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_6B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_6B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_6B, deviceProp);
} else if (Run_Fast_BM_Calculation == 11) {
cudaFuncSetCacheConfig(BMOHC_FAST_Pre_fmaf_No_Constants_kernel_5B, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_Pre_fmaf_No_Constants_kernel_5B << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_Pre_fmaf_No_Constants_kernel_5B KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_Pre_fmaf_No_Constants_kernel_5B, deviceProp);
} else if (Run_Fast_BM_Calculation == 6) {
cudaFuncSetCacheConfig(BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Constants_kernel, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Constants_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Constants_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
//cudaHolderData.cuda_Failed_Converged_Time_Node,
//cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
w_ohc,
time_step,
time_step_out,
delta_x,
alpha_r,
enable_psi,
enable_OW,
base_time,
Ts,
_ohc_alpha_l,
_ohc_alpha_s,
model_Gme,
model_a0,
model_a1,
model_a2,
sigma_ow,
eta_1,
eta_2,
samplesBufferLengthP1,
overlap_nodes_for_block,
cuda_min_time_step,
cuda_max_time_step
//cudaHolderData.converge_speed,
//cudaHolderData.converge_speed_blocks,
//cudaHolderData.cuda_Converged_Time_Node,
//cudaHolderData.cuda_Converged_Blocks,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Constants_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 5) {
cudaFuncSetCacheConfig(BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Or_Sync_kernel, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Or_Sync_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Or_Sync_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
//cudaHolderData.cuda_Failed_Converged_Time_Node,
//cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index
//cudaHolderData.converge_speed,
//cudaHolderData.converge_speed_blocks,
//cudaHolderData.cuda_Converged_Time_Node,
//cudaHolderData.cuda_Converged_Blocks,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_No_Or_Sync_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 4) {
cudaFuncSetCacheConfig(BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_kernel, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
//cudaHolderData.cuda_Failed_Converged_Time_Node,
//cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index
//cudaHolderData.converge_speed,
//cudaHolderData.converge_speed_blocks,
//cudaHolderData.cuda_Converged_Time_Node,
//cudaHolderData.cuda_Converged_Blocks,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_NO_SelfAnalysis_Pre_fmaf_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 3) {
cudaFuncSetCacheConfig(BMOHC_FAST_NO_SelfAnalysis_kernel, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_NO_SelfAnalysis_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_NO_SelfAnalysis_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
//cudaHolderData.cuda_Failed_Converged_Time_Node,
//cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index
//cudaHolderData.converge_speed,
//cudaHolderData.converge_speed_blocks,
//cudaHolderData.cuda_Converged_Time_Node,
//cudaHolderData.cuda_Converged_Blocks,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
//cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_NO_SelfAnalysis_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 2) {
cudaFuncSetCacheConfig(BMOHC_IMPERCISE_kernel, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
if (Show_Calculated_Power & 16) {
printf("BMOHC_IMPERCISE_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_IMPERCISE_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_IMPERCISE_kernel, deviceProp);
} else if (Run_Fast_BM_Calculation == 1) {
//if (deviceProp.major < 5) { // correct onlt for kepler architecture
cudaFuncSetCacheConfig(BMOHC_FAST_kernel, static_cast<cudaFuncCache>(BMOHC_Kernel_Configuration));
//}
if (Show_Calculated_Power & 16) {
printf("BMOHC_FAST_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_FAST_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
//cuda_saved_speeds_buffer,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_FAST_kernel, deviceProp);
} else {
if (Show_Calculated_Power & 16) {
printf("BMOHC_NEW_kernel << <%s,%s>>>(...);\n", showDIM3(grid).c_str(), showDIM3(threads).c_str());
}
BMOHC_NEW_kernel KERNEL_ARGS2(grid, threads)(
cudaHolderData.cuda_input_samples,
cudaHolderData.cuda_saved_speeds,
cudaHolderData.cuda_Failed_Converged_Time_Node,
cudaHolderData.cuda_Failed_Converged_Blocks,
cudaHolderData.cuda_massd,
cudaHolderData.cuda_Md,
cudaHolderData.cuda_Ud,
cudaHolderData.cuda_Ld,
cudaHolderData.cuda_Rd,
cudaHolderData.cuda_Sd,
cudaHolderData.cuda_Qd,
cudaHolderData.cuda_gammad,
cudaHolderData.cuda_S_ohcd,
cudaHolderData.cuda_S_tmd,
cudaHolderData.cuda_R_tmd,
cudaHolderGeneratedData.generated_model_throw_tolerance,
cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance,
cudaHolderGeneratedData.generated_model_out_sample_index,
cudaHolderGeneratedData.generated_model_end_sample_index,
cudaHolderData.converge_speed,
cudaHolderData.converge_speed_blocks,
cudaHolderData.cuda_Converged_Time_Node,
cudaHolderData.cuda_Converged_Blocks,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration,
cudaHolderData.cuda_convergence_jacoby_loops_per_iteration_blocks
);
if (Show_Device_Data & 32) cudaOccupancyIndicator(threads.x, BMOHC_NEW_kernel, deviceProp);
}
std::ostringstream oss("");
oss << "BM (" << run_mode_name << ") run time";
outer_log.timeAtFlag(33, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 1, oss.str()), Show_Run_Time & 1);
// copy results to host
//std::cout << "passed kernel...\n";
cutilCheckMsg("OHCBM_kernel<<<>>> execution failed\n");
}
extern "C" void cudaMallocatingByMode(void **ptr,size_t bytes_num,bool disable_advanced_mode) noexcept(false) {
if (disable_advanced_mode) { gpuAssert(cudaMalloc(ptr, bytes_num)); }
else {
gpuAssert(cudaMallocManaged(ptr, bytes_num));
}
}
/* IHC combined variables */
size_t input_max_size;
size_t input_max_size_in_bytes;
size_t double_input_max_size_in_bytes;
size_t lambda_float_input_max_size_in_bytes;
size_t lambda_max_size;
size_t lambda_max_size_in_bytes;
size_t lambda_double_max_size_in_bytes;
size_t lambda_forced_double_max_size_in_bytes;
size_t backup_speeds_size_in_bytes;
size_t matrixSizeOfBackupTime; // ensure position of taking the backup array
size_t matrixSizeOfBackupTimeInBytes; // ensure position of taking the backup array
size_t matrixSizeOfCalcTime; // ensure position of taking the backup array
size_t matrixSizeOfCalcTimeInBytes; // ensure position of taking the backup array
size_t matrixSizeOfWriteTime;
size_t matrixSizeOfWriteTimeInBytes;
bool localloadedFromHD;
bool local_first_time_for_param_set;
bool local_CalculateJNDOnGPU;
float *local_backup_speeds;
size_t local_backup_speeds_length;
/**
* ac filter device includes its size in first place
*/
__constant__ double device_time_filter[DEVICE_MAX_FILTER_ORDER];
/**
* ihc damage vector (nerves converted to 0 to 10^8)
*/
__constant__ double CUDA_IHC_DAMAGE[SECTIONS];
/**
* nerves cluster parmeters, A and W and spont (nerves converted to 0 to 10^8)
*/
__constant__ float CUDA_Nerves_Clusters[3*LAMBDA_COUNT];
/**
* ac filters includes its size in first place
*/
double host_filter[DEVICE_MAX_FILTER_ORDER];
/*end of ihc combined variables */
#define ORDER_OF_DC_FILTER_SIZE_IN_PARAMS 4
#define ORDER_OF_AC_FILTER_SIZE_IN_PARAMS 5
extern "C" void InputProfilesArrayInitialization(
int maxJNDIntervals,
int wn_length,
int signal_length,
int signal_mode,
int Show_Generated_Input
) {
cudaJNDHolder.allocateIntervals(maxJNDIntervals);
if ( wn_length > 0 ) {
cudaSignalHolder.allocateSignalNodes(wn_length);
if (Show_Generated_Input & 8) std::cout << "WN array allocated length: " << wn_length << std::endl;
}
}
// release input array
extern "C" void InputProfilesArrayTermination() {
cudaJNDHolder.releaseIntervals();
cudaSignalHolder.releaseSignalNodes();
}
// calculates IHC see description in cochlea_common.h
extern "C" void IHCNewKernel(
double *IHC_Damage_Factor,
float Nerves_Clusters[3 * LAMBDA_COUNT],
double *dc_filter,
int order_of_dc_filter,
double *ac_b_filter,
double *ac_a_filter,
bool is_ac_iir_filter,
int order_of_ac_filter,
int cochlea_sections,
int time_blocks,
double SPLRefVal,
float *backup_speeds,
int backup_speeds_length,
int calcTime,
int writeTime,
int allocateTime,
int intervalTimeNodes, // single time block time nodes
int max_backup_nodes_len,
int lambda_offset, // offset of time nodes in order to compensate for larger lambda than necessary
float Lambda_SAT,
float eta_AC, // IHC AC coupling [V/s/cm]
float eta_DC, // IHC DC coupling [V/s/cm]
bool first_time,
bool first_time_for_param_set,
bool loadedFromHD,
bool disable_advanced_memory_handling,
bool review_memory_handling,
bool asMemoryHandlingOnly,
float scaleBMVelocityForLambdaCalculation, // params[params_set_counter].scaleBMVelocityForLambdaCalculation
bool CalculateJNDOnGPU,
int maxJNDIntervals,
int overlapNodes,
int Decouple_Filter, // filter is decoupled if this parameter largeer than 0,if filter decoupled than output blocks will not use input with time start before output block start
int Show_Run_Time,
Log &outer_log
) noexcept(false) {
cudaEvent_t start, stop;
cudaEventsCreate(start, stop, Show_Run_Time & 4);
local_CalculateJNDOnGPU = CalculateJNDOnGPU;
// calculate sizes of Input and output arrays
input_max_size = allocateTime*cochlea_sections + max_backup_nodes_len;
int mean_size = maxJNDIntervals*LAMBDA_COUNT*cochlea_sections;
int fisher_size = maxJNDIntervals*LAMBDA_COUNT;
int dA_size = maxJNDIntervals;
input_max_size_in_bytes = input_max_size*sizeof(float);
double_input_max_size_in_bytes = input_max_size*sizeof(double);
lambda_float_input_max_size_in_bytes = input_max_size*sizeof(lambdaFloat);
lambda_max_size = LAMBDA_COUNT*input_max_size;
//std::cout << "allocated " << lambda_max_size << " nodes for calculation\n";
lambda_max_size_in_bytes = lambda_max_size*sizeof(float);
lambda_double_max_size_in_bytes = lambda_max_size*sizeof(lambdaFloat);
lambda_forced_double_max_size_in_bytes = lambda_max_size*sizeof(JNDFloat);
matrixSizeOfBackupTime = calcTime*cochlea_sections - backup_speeds_length;
matrixSizeOfBackupTimeInBytes = matrixSizeOfBackupTime*sizeof(float);
matrixSizeOfCalcTime = calcTime*cochlea_sections;
matrixSizeOfCalcTimeInBytes = matrixSizeOfBackupTime*sizeof(float);
backup_speeds_size_in_bytes = max_backup_nodes_len*sizeof(float);
matrixSizeOfWriteTime = writeTime*cochlea_sections;
matrixSizeOfWriteTimeInBytes = matrixSizeOfWriteTime*sizeof(float);
local_backup_speeds = backup_speeds;
local_first_time_for_param_set = first_time_for_param_set;
localloadedFromHD = loadedFromHD;
local_backup_speeds_length = backup_speeds_length;
int size_of_device_params = sizeof(device_params);
int size_of_vectors_sum_linear_coefficents = sizeof(vectors_sum_linear_coefficents);
vectors_sum_linear_coefficents host_vectors_sum_linear_coefficents[2];
for ( int i=0;i<DEVICE_MAX_FILTER_ORDER;i++){
host_filter[i] = 0;
}
// allocate AC/DC filters on GPU and copy them
host_filter[0] = (double)order_of_dc_filter;
int ac_filter_b_index = (int)host_filter[0]+1;
int ac_filter_a_index = -1;
host_filter[ac_filter_b_index] = (double)order_of_ac_filter;
memcpy_s(&host_filter[1],sizeof(double)*(DEVICE_MAX_FILTER_ORDER-1),dc_filter,order_of_dc_filter*sizeof(double));
memcpy_s(&host_filter[ac_filter_b_index + 1], sizeof(double)*(DEVICE_MAX_FILTER_ORDER - order_of_dc_filter - 2), ac_b_filter, order_of_ac_filter*sizeof(double));
std::cout.precision(5);
// update ac filter for auto scale of scaleBMVelocityForLambdaCalculation
for (int ix = 0; ix < order_of_ac_filter; ix++) {
host_filter[ac_filter_b_index + 1 + ix] = scaleBMVelocityForLambdaCalculation*host_filter[ac_filter_b_index + 1 + ix];
//std::cout << "host_filter[" << (ac_filter_b_index + ix + 1) << "] = " << host_filter[ac_filter_b_index + 1 + ix] << "\n";
}
if ( is_ac_iir_filter) {
ac_filter_a_index = ac_filter_b_index + order_of_ac_filter + 1;
host_filter[ac_filter_a_index] = (double)order_of_ac_filter;
memcpy_s(&host_filter[ac_filter_a_index + 1], sizeof(double)*(DEVICE_MAX_FILTER_ORDER - order_of_dc_filter - order_of_ac_filter - 3), ac_a_filter, order_of_ac_filter*sizeof(double));
/*for (int ix = 0; ix < order_of_ac_filter; ix++) {
std::cout << "host_filter[" << (ac_filter_a_index + ix + 1) << "] = " << host_filter[ac_filter_a_index + 1 + ix] << "\n";
}*/
}
if (review_memory_handling) {
printf("Lambda Memory Size %lu KB\n", (lambda_max_size_in_bytes / 1024));
printf("calcTime %ld Nodes\n", calcTime);
printf("Input (Results BM Speeds) Allocated Memory Size %lu KB\n", (input_max_size_in_bytes / 1024));
printf("Input (Results BM Speeds) Array Allocated length %lu \n ", input_max_size);
printf("Allocate time %ld \n", allocateTime);
printf("Backup speeds size %lu KB\n ",(backup_speeds_size_in_bytes / 1024));
}
// allocate lambda memory
cudaLambdaHolderData.allocateLambdaMemory(input_max_size);
cudaJNDHolder.allocateDeviceStructs();
cudaEventsStartTimer(start, stop, Show_Run_Time & 4);
// allocate JND intermidiate arrays (Eq. 17-24 in Cochlear Model for Hearing Loss)
if (CalculateJNDOnGPU) {
cudaJNDHolder.allocateMeanNodes(mean_size);
cudaLambdaHolderData.allocateIHCData();
cudaJNDHolder.allocateIntervals(dA_size);
cudaJNDHolder.allocateFisherNodes(fisher_size);
}
outer_log.timeAtFlag(38, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 4, "Initialize and allocate Memory for Lambda Calculation run"), Show_Run_Time & 4);
if (!asMemoryHandlingOnly) {
// copy all IHC parmeters necessary due to changed profile to GPU
cudaEventsStartTimer(start, stop, Show_Run_Time & 4);
if (first_time_for_param_set) {
/**
* define the coefficents for both vectors sums
* at node 0 A_coefficent=1,B_coeffient=-1 for SHigh = BM_input - AC
* at node 1 A_coefficent=eta_AC,B_coeffient=eta_DC for IHC=eta_AC*AC+eta_DC*DC
*/
host_vectors_sum_linear_coefficents[0].A_coefficent = scaleBMVelocityForLambdaCalculation;
host_vectors_sum_linear_coefficents[0].B_coefficent = -1;
host_vectors_sum_linear_coefficents[0].reverseCoefficents = 0;
host_vectors_sum_linear_coefficents[1].A_coefficent = eta_AC;// (eta_AC*scaleBMVelocityForLambdaCalculation);
host_vectors_sum_linear_coefficents[1].B_coefficent = eta_DC;// (eta_DC*scaleBMVelocityForLambdaCalculation*scaleBMVelocityForLambdaCalculation);
host_vectors_sum_linear_coefficents[1].reverseCoefficents = 0;
if (host_vectors_sum_linear_coefficents[1].B_coefficent < 1 && sizeof(host_vectors_sum_linear_coefficents[1].B_coefficent) == sizeof(float)) {
host_vectors_sum_linear_coefficents[1].reverseCoefficents = 1;
host_vectors_sum_linear_coefficents[1].A_coefficent = 1.0 / host_vectors_sum_linear_coefficents[1].A_coefficent;
host_vectors_sum_linear_coefficents[1].B_coefficent = 1.0 / host_vectors_sum_linear_coefficents[1].B_coefficent;
}
gpuAssert(cudaMemcpy(cudaJNDHolder.vectors_sums_coefficents, host_vectors_sum_linear_coefficents, 2 * size_of_vectors_sum_linear_coefficents, cudaMemcpyHostToDevice));
// copy ihc damage factor
gpuAssert(cudaMemcpyToSymbol(CUDA_IHC_DAMAGE, IHC_Damage_Factor, SECTIONS*sizeof(double), 0, cudaMemcpyHostToDevice));
// copy nerves parameters
gpuAssert(cudaMemcpyToSymbol(CUDA_Nerves_Clusters, Nerves_Clusters, 3 * LAMBDA_COUNT*sizeof(float), 0, cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpyToSymbol(device_time_filter, host_filter, DEVICE_MAX_FILTER_ORDER*sizeof(double), 0, cudaMemcpyHostToDevice));
// here I prime and load to the device both the filters and and the genral params
}
cudaJNDHolder.host_local_param[0].Decouple_Filter = Decouple_Filter;
cudaJNDHolder.host_local_param[0].cochlea_sections = cochlea_sections;
cudaJNDHolder.host_local_param[0].intervalTimeNodes = intervalTimeNodes;
cudaJNDHolder.host_local_param[0].time_blocks = time_blocks;
cudaJNDHolder.host_local_param[0].ovelapNodes = overlapNodes;
cudaJNDHolder.host_local_param[0].lambda_offset = lambda_offset;
cudaJNDHolder.host_local_param[0].order_of_dc_filter = order_of_dc_filter;
cudaJNDHolder.host_local_param[0].order_of_ac_filter = order_of_ac_filter;
cudaJNDHolder.host_local_param[0].lambda_count = LAMBDA_COUNT;
cudaJNDHolder.host_local_param[0].time_block = calcTime / time_blocks;
cudaJNDHolder.host_local_param[0].FilterDecoupledMode = Decouple_Filter>0;
cudaJNDHolder.host_local_param[0].reverseSQRTScaleBMVelocityForLambdaCalculation = 1 / sqrtf(scaleBMVelocityForLambdaCalculation); // necessary adjustment for dA fix
//std::cout << "reverseSQRTScaleBMVelocityForLambdaCalculation = " << cudaJNDHolder.host_local_param[0].reverseSQRTScaleBMVelocityForLambdaCalculation << "\n";
//std::cout << "prepares host local setups" << std::endl;
cudaJNDHolder.host_local_param[0].Lambda_SAT = Lambda_SAT;
cudaJNDHolder.host_local_param[0].calcTime = calcTime;
//std::cout << "cudaJNDHolder.host_local_param[0].intervalTimeNodes*host_local_params[0].time_blocks= " << (cudaJNDHolder.host_local_param[0].intervalTimeNodes*host_local_params[0].time_blocks) << "\n";
cudaJNDHolder.host_local_param[0].writeTime = writeTime;
cudaJNDHolder.host_local_param[0].filter_b_start_index = ac_filter_b_index; // first filter is ac
cudaJNDHolder.host_local_param[0].filter_a_start_index = ac_filter_a_index; // first filter is ac
/*std::cout << "cudaJNDHolder.host_local_param[0].filter_b_start_index: " << cudaJNDHolder.host_local_param[0].filter_b_start_index << "\n";
std::cout << "cudaJNDHolder.host_local_param[0].filter_a_start_index: " << cudaJNDHolder.host_local_param[0].filter_a_start_index << "\n";*/
//std::cout << "prepares host local copy" << std::endl;
memcpy_s(&cudaJNDHolder.host_local_param[1], size_of_device_params, &cudaJNDHolder.host_local_param[0], size_of_device_params);
cudaJNDHolder.host_local_param[1].filter_b_start_index = 0; // second filter is dc
cudaJNDHolder.host_local_param[1].filter_a_start_index = -1; // second filter is dc
//std::cout << "prepares gpu copy" << std::endl;
gpuAssert(cudaMemcpy(cudaJNDHolder.global_device_params, cudaJNDHolder.host_local_param, 2 * size_of_device_params, cudaMemcpyHostToDevice));
//std::cout << "passed allocations sequence..." << std::endl;
outer_log.timeAtFlag(39, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 4, "Load Parmeters for Lambda Calculation run"), Show_Run_Time & 4);
}
}
// allocate GPU buffer
extern "C" void allocateBuffer(const int size_in_nodes, int Show_Run_Time, cudaEvent_t& start,
cudaEvent_t& stop,
cudaDeviceProp deviceProp) noexcept(false) {
cudaEventsCreate(start, stop, Show_Run_Time & 32);
std::ostringstream oss("");
oss << "Allocate Debug Buffer (" << size_in_nodes << " Nodes)";
std::string rec = oss.str();
cudaEventsStartTimer(start, stop, Show_Run_Time & 32);
cudaLambdaHolderData.allocateBufferMemory(size_in_nodes);
cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 32, rec);
}
// release GPU buffer
extern "C" void releaseBuffer( int Show_Run_Time, cudaEvent_t& start,
cudaEvent_t& stop,
cudaDeviceProp deviceProp) noexcept(false) {
cudaEventsCreate(start, stop, Show_Run_Time & 32);
cudaEventsStartTimer(start, stop, Show_Run_Time & 32);
cudaLambdaHolderData.releaseBufferMemory();
cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 32, "release Debug Buffer");
}
extern "C" void IHCKernel_Free() noexcept(false) {
cudaLambdaHolderData.releaseLambdaMemory();
cudaJNDHolder.releaseIntervals();
cudaJNDHolder.ReleaseDeviceStructs();
cudaJNDHolder.releaseFisherNodes();
cudaJNDHolder.releaseMeanNodes();
}
/**
* calculate correct section for the thread to handle out of cuda parameters of calling function
* number of sections per block mult by block number + thread id per section in the block
* blockCoordinates=blockIdx
* blocksDimensions=blockDim
* threadCoordinates=threadIdx
*/
#define calcCochleaSection(blockCoordinates,blocksDimensions,threadCoordinates) (blocksDimensions.x*blockCoordinates.x + threadCoordinates.x)
/**
* calculate number of time nodes each thread will jump every consecutive calculated point
* block dimension y represents number of threads working on the same section
* blockCoordinates=blockIdx
* blocksDimensions=blockDim
* threadCoordinates=threadIdx
*/
#define calcTimeNodesJump(blockCoordinates,blocksDimensions,threadCoordinates) (blocksDimensions.y)
/**
* calculate the lambda cluster for the thread by blockIdx.y==blockCoordinates.y
* blockCoordinates=blockIdx
* blocksDimensions=blockDim
* threadCoordinates=threadIdx
*/
#define calcLambdaBlock(blockCoordinates,blocksDimensions,threadCoordinates) (blockCoordinates.y)
/**
* calculate number of time nodes offset from the begginning for the thread to work on
* thread dimension y represent thread id of working in the same section
* blockCoordinates=blockIdx
* blocksDimensions=blockDim
* threadCoordinates=threadIdx
*/
#define calcTimeNodesOffset(blockCoordinates,blocksDimensions,threadCoordinates) (threadCoordinates.y)
/**
* calculate start offset per thread on unified calculations of indexes
* blockCoordinates=blockIdx
* blocksDimensions=blockDim
* threadCoordinates=threadIdx
*/
#define calcStartMainOffset(blockCoordinates,blocksDimensions,threadCoordinates,lambda_offset,cochlea_sections) (calcCochleaSection(blockCoordinates,blocksDimensions,threadCoordinates)+ cochlea_sections*lambda_offset)
/**
* decoupled block id
*/
#define intervalDecoupledBlockId(blockCoordinates,blocksDimensions) (blockCoordinates.z+blocksDimensions.z*blockCoordinates.y)
/**
* number of decoupled blocks per interval for unified IHC/ Lambda calculation
*
*/
#define intervalDecoupledBlocks(blocksDimensions) (blocksDimensions.z*blocksDimensions.y)
#define totalDecoupledBlocks(blocksDimensions) (blocksDimensions.x*intervalDecoupledBlocks(blocksDimensions))
/**
* decoupled block id
*/
#define decoupledBlockId(blockCoordinates,blocksDimensions) (intervalDecoupledBlockId(blockCoordinates,blocksDimensions)+blockCoordinates.x*intervalDecoupledBlocks(blocksDimensions))
/**
* device run for single time block of single index
* note offset here is the offset for start of time node name
* \p X is global input array
* \p Y is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p filter_size is order of FIR filter
* \p filter_index is position on unfied device filter array to start the filter from
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
* \p singleBlockLength in case of decoupled mode will be able to ignore "tails" on necessary positions, time length analysis for not decoupled mode
*
*/
template<typename T1, typename T2> __device__ void DeviceCudaFIRFilter(T1 *X, T2 *Y, int offset, int time_length_analysis, int cochlea_sections, int filter_size, int filter_index, int time_node_offset, int time_node_jumps_in_cluster, int singleBlockLength, int final_regular_division_position) {
int k = 0;
int i = 0;
int current_offset; // progress on the input since we jump in sections every time
int offset_boundary;
double Ycurrent;
int sgny = 0;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
sgny = (k / (final_regular_division_position - 1));
sgny = sgny*time_length_analysis + (1 - sgny)*singleBlockLength;
offset_boundary = min((k + 1) % sgny, filter_size); // note if singleBlockLength == time_length_analysis
Ycurrent = 0.0f;
for (i = 0; i<offset_boundary; i++) {
Ycurrent = Ycurrent + device_time_filter[i + filter_index] * X[current_offset - i*cochlea_sections]; // untransposed jumping by sections each time
}
Y[current_offset] = T2(Ycurrent);
}
}
/**
* device run for single sesction
* \p X is global input array
* \p Y is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p filter_size is order of FIR filter
* \p filter_a_index is position on unfied device filter array to start the a coefficents of filter
* \p filter_b_index is position on unfied device filter array to start the b coefficents of filter
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<class T1, class T2> __device__ void DeviceCudaIIRFilter(T1 *X,T2 *Y, int offset, int time_length_analysis, int IntervalLength, int final_regular_division_position, int cochlea_sections, int filter_size, int filter_b_index, int filter_a_index)
{
int k = 0;
int i = 0;
int j;
int offset_boundarya;
int current_offset; // progress on the input since we jump in sections every time
int offset_boundary;
T2 Ycurrent = T2(0.0);
int sgny = 0;
for (k = 0; k<time_length_analysis; k++) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
sgny = (k / (final_regular_division_position - 1));
sgny = sgny*time_length_analysis + (1 - sgny)*IntervalLength;
//offset_boundary = min((k + 1) % ((1-sgny)*IntervalLength + 2*sgny*time_length_analysis), filter_size);
offset_boundary = min((k + 1) % sgny, filter_size);
offset_boundarya = offset_boundary - 1;
Ycurrent = T2(0.0);
for (i = 0; i<offset_boundary; i++) {
//Ycurrent = fmaf( device_time_filter[i + 1 + filter_b_index], X[current_offset - i*cochlea_sections], Ycurrent);
Ycurrent = Ycurrent + device_time_filter[i + filter_b_index] * X[current_offset - i*cochlea_sections]; // untransposed jumping by sections each time
}
for (i = 0; i<offset_boundarya; i++) {
j = i + 1;
//Ycurrent = fmaf(-1 * device_time_filter[j + 1 + filter_a_index], Y[current_offset - j*cochlea_sections], Ycurrent);
Ycurrent = Ycurrent - device_time_filter[j + filter_a_index] * Y[current_offset - j*cochlea_sections]; // untransposed jumping by sections each time
}
Y[current_offset] = Ycurrent;
}
}
/**
* device run for single sesction
* \p X is global input array
* \p Y is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p filter_size is order of FIR filter
* \p filter_a_index is position on unfied device filter array to start the a coefficents of filter
* \p filter_b_index is position on unfied device filter array to start the b coefficents of filter
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<class T1,class T2> __host__ void DeviceCudaIIRFilterHost(T1 *X, T2 *Y, int offset, int time_length_analysis, int cochlea_sections, int filter_size, int filter_b_index, int filter_a_index) {
int k = 0;
int i = 0;
int j;
int offset_boundarya;
int current_offset; // progress on the input since we jump in sections every time
int offset_boundary;
double Ycurrent = 0.0;
for (k = 0; k<time_length_analysis; k++) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
offset_boundary = __tmin(k + 1, filter_size);
offset_boundarya = offset_boundary - 1;
Ycurrent = 0.0f;
for (i = 0; i<offset_boundary; i++) {
//Ycurrent = fmaf( device_time_filter[i + 1 + filter_b_index], X[current_offset - i*cochlea_sections], Ycurrent);
Ycurrent = Ycurrent + host_filter[i + filter_b_index] * X[current_offset - i*cochlea_sections]; // untransposed jumping by sections each time
}
for (i = 0; i<offset_boundarya; i++) {
j = i + 1;
//Ycurrent = fmaf(-1 * device_time_filter[j + 1 + filter_a_index], Y[current_offset - j*cochlea_sections], Ycurrent);
Ycurrent = Ycurrent - host_filter[j + filter_a_index] * Y[current_offset - j*cochlea_sections]; // untransposed jumping by sections each time
}
Y[current_offset] = T2(Ycurrent);
}
}
// possible types for iir on host
template __host__ void DeviceCudaIIRFilterHost<float, double>(float *X, double *Y, int offset, int time_length_analysis, int cochlea_sections, int filter_size, int filter_b_index, int filter_a_index);
template __host__ void DeviceCudaIIRFilterHost<double, double>(double *X, double *Y, int offset, int time_length_analysis, int cochlea_sections, int filter_size, int filter_b_index, int filter_a_index);
template __host__ void DeviceCudaIIRFilterHost<float, float>(float *X, float *Y, int offset, int time_length_analysis, int cochlea_sections, int filter_size, int filter_b_index, int filter_a_index);
// this version runs the entire time line on relevant sections
template<typename T1,typename T2> __global__ void CudaFIRFilter(T1 *X, T2 *Y, device_params *filter_params)
{
int filter_index = filter_params->filter_b_start_index+1;
int filter_size = (int)(device_time_filter[filter_index-1]+0.1f); // very important filter data start from index 1 0 index is size....
//if (threadIdx.x == 0) printf("filter_index=%d,filter_size=%d\n", filter_index, filter_size);
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int total_time_nodes = (filter_params->calcTime - lambda_offset);
int time_length_analysis = filter_params->intervalTimeNodes/gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int grid_block_id = (decoupledBlockId(blockIdx, gridDim) - intervalDecoupledBlockId(blockIdx, gridDim)); // each thread start from its own adjusted offset in the time block offset
int offset = cochlea_offset_section + cochlea_sections*(grid_block_id*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
int time_node_offset = intervalDecoupledBlockId(blockIdx,gridDim)*time_length_analysis;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
time_length_analysis = time_node_offset + time_length_analysis;
// on stage one device filter run from global
int calculatedIntervalTimeNodes = filter_params->FilterDecoupledMode ? filter_params->intervalTimeNodes*filter_params->Decouple_Filter : total_time_nodes;
int final_regular_division_position = filter_params->intervalTimeNodes *gridDim.y;
//if (threadIdx.x==0) printf("block[%d].time_nodes=[%d,%d],interval_offset=%d,grid_block_id=%d,final_regular_division_position=%d\n", decoupledBlockId(blockIdx, gridDim), time_node_offset, time_length_analysis,offset, grid_block_id, final_regular_division_position);
DeviceCudaFIRFilter<T1, T2>(X, Y, offset, time_length_analysis, cochlea_sections, filter_size, filter_index, time_node_offset, time_node_jumps_in_cluster, calculatedIntervalTimeNodes, final_regular_division_position);
__syncthreads();
}
// cuda fir filter
template __global__ void CudaFIRFilter<float, double>(float *X, double *Y, device_params *filter_params);
template __global__ void CudaFIRFilter<double, double>(double *X, double *Y, device_params *filter_params);
// this version runs the entire time can be aprralleized by sections
template<class T1,class T2> __global__ void CudaIIRFilter(T1 *X, T2 *Y, device_params *filter_params) {
int filter_b_index = 1 + filter_params->filter_b_start_index;
int filter_a_index = 1 + filter_params->filter_a_start_index;
int filter_size = filter_params->order_of_ac_filter;//(int)(device_time_filter[filter_b_index-1]+0.1); // very important filter data start from index 1 0 index is size....
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->calcTime - lambda_offset; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
/*
* start offset represents calculated section + lambda offset_in_time*sections +
* time offset per thread per section since each thread handles 1/blockDim.y
*/
// on stage one device filter run from global
int intervalLength = filter_params->FilterDecoupledMode == true ? filter_params->intervalTimeNodes*filter_params->Decouple_Filter : time_length_analysis;
int final_regular_division_position = filter_params->intervalTimeNodes*filter_params->time_blocks;
int startOffset = threadIdx.x;
DeviceCudaIIRFilter<T1, T2>(X, Y, startOffset, time_length_analysis, intervalLength, final_regular_division_position, cochlea_sections, filter_size, filter_b_index, filter_a_index);
__syncthreads();
}
// cuda iir filter
template __global__ void CudaIIRFilter<float, double>(float *X, double *Y, device_params *filter_params);
template __global__ void CudaIIRFilter<float, float>(float *X, float *Y, device_params *filter_params);
template __global__ void CudaIIRFilter<double, double>(double *X, double *Y, device_params *filter_params);
/**
* this version runs multiple decoupled IIR filters for small inputs
*/
template<class T1, class T2> __global__ void CudaIIRFilterDecoupled(T1 *X, T2 *Y, device_params *filter_params) {
int filter_b_index = 1 + filter_params->filter_b_start_index;
int filter_a_index = 1 + filter_params->filter_a_start_index;
int filter_size = filter_params->order_of_ac_filter;//(int)(device_time_filter[filter_b_index-1]+0.1); // very important filter data start from index 1 0 index is size....
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->calcTime - lambda_offset; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
/*
* start offset represents calculated section + lambda offset_in_time*sections +
* time offset per thread per section since each thread handles 1/blockDim.y
*/
// on stage one device filter run from global
int intervalLength = filter_params->intervalTimeNodes*filter_params->Decouple_Filter;
int final_regular_division_position = filter_params->intervalTimeNodes*filter_params->time_blocks;
int startOffset = threadIdx.x + blockIdx.x*intervalLength*cochlea_sections;
//printf("startOffset: %d,threadIdx.x=%d,blockIdx.x=%d,intervalLength=%d\n", startOffset, threadIdx.x, blockIdx.x, intervalLength);
if (blockIdx.x+1==gridDim.x) {
intervalLength += (time_length_analysis - gridDim.x*intervalLength);
}
time_length_analysis = intervalLength;
DeviceCudaIIRFilter<T1, T2>(X, Y, startOffset, time_length_analysis, intervalLength, final_regular_division_position, cochlea_sections, filter_size, filter_b_index, filter_a_index);
__syncthreads();
}
// cuda iir filter
template __global__ void CudaIIRFilterDecoupled<float, double>(float *X, double *Y, device_params *filter_params);
template __global__ void CudaIIRFilterDecoupled<float, float>(float *X, float *Y, device_params *filter_params);
template __global__ void CudaIIRFilterDecoupled<double, double>(double *X, double *Y, device_params *filter_params);
// this version runs the entire time can be aprralleized by sections
template<class T1,class T2> __host__ void CudaIIRFilterHost(T1 *X, T2 *Y, device_params *filter_params) {
int filter_b_index = 1 + filter_params->filter_b_start_index;
int filter_a_index = 1 + filter_params->filter_a_start_index;
int filter_size = filter_params->order_of_ac_filter;//(int)(device_time_filter[filter_b_index-1]+0.1); // very important filter data start from index 1 0 index is size....
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->calcTime - lambda_offset; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
/*
* start offset represents calculated section + lambda offset_in_time*sections +
* time offset per thread per section since each thread handles 1/blockDim.y
*/
for (int tidx = 0; tidx < cochlea_sections; tidx++) {
DeviceCudaIIRFilterHost<T1,T2>(X, Y, tidx, time_length_analysis, cochlea_sections, filter_size, filter_b_index, filter_a_index);
}
}
template __host__ void CudaIIRFilterHost<float, float>(float *X, float *Y, device_params *filter_params);
template __host__ void CudaIIRFilterHost<float, double>(float *X, double *Y, device_params *filter_params);
template __host__ void CudaIIRFilterHost<double, double>(double *X, double *Y, device_params *filter_params);
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p A is first global input array
* \p B is second global input array
* \p C is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p coefficentA is coefficent to multiply vector A
* \p coefficentB is coefficent to multiply vector B
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T1, typename T2> __device__ void DeviceCudaVectorSum(T1 *A, T2 *B, T2 *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster) {
int k;
int current_offset;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
C[current_offset] = (A[current_offset] * coefficentA) + (B[current_offset] * coefficentB);
}
}
// unified IHC divide of types
template __device__ void DeviceCudaVectorSum<float, double>(float *A, double *B, double *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
template __device__ void DeviceCudaVectorSum<float, float>(float *A, float *B, float *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p A is first global input array
* \p B is second global input array
* \p C is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p coefficentA is coefficent to multiply vector A
* \p coefficentB is coefficent to multiply vector B
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T1, typename T2> __device__ void DeviceCudaVectorSumNSquare(T1 *A, T2 *B, T2 *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster) {
int k;
T2 midSum;
int current_offset;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
midSum = fmaf(A[current_offset], coefficentA, B[current_offset] * coefficentB);
C[current_offset] = midSum*midSum;
}
}
// unified IHC divide of types
template __device__ void DeviceCudaVectorSumNSquare<float, double>(float *A, double *B, double *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
template __device__ void DeviceCudaVectorSumNSquare<float, float>(float *A, float *B, float *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p A is first global input array
* \p B is second global input array
* \p C is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p coefficentA is coefficent to multiply vector A
* \p coefficentB is coefficent to multiply vector B
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T1,typename T2> __device__ void DeviceCudaVectorSumDivide(T1 *A, T2 *B, T2 *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster) {
int k;
int current_offset;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
C[current_offset] = (A[current_offset] / coefficentA) + (B[current_offset] / coefficentB);
}
}
// same for divide templae
template __device__ void DeviceCudaVectorSumDivide<float, double>(float *A, double *B, double *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
template __device__ void DeviceCudaVectorSumDivide<float, float>(float *A, float *B, float *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p A is first global input array
* \p B is second global input array
* \p C is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p coefficentA is coefficent to multiply vector A
* \p coefficentB is coefficent to multiply vector B
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T1, typename T2> __device__ void DeviceCudaVectorSumDivideNSquare(T1 *A, T2 *B, T2 *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster) {
int k;
int current_offset;
T2 midSum;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
midSum = (A[current_offset] / coefficentA) + (B[current_offset] / coefficentB);
C[current_offset] = midSum*midSum;
}
}
// same for divide templae
template __device__ void DeviceCudaVectorSumDivideNSquare<float, double>(float *A, double *B, double *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
template __device__ void DeviceCudaVectorSumDivideNSquare<float, float>(float *A, float *B, float *C, int offset, int time_length_analysis, int cochlea_sections, double coefficentA, double coefficentB, int time_node_offset, int time_node_jumps_in_cluster);
/**
* calcs A*coefficents_set->A_coefficent+B*coefficents_set->B_coefficent => C for calculating shigh and ac+dc summary created with consistent indexes calculations
*
*/
template<typename T1, typename T2> __global__ void CudaVectorsSum(T1 *A, T2 *B, T2 *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set)
{
lambdaFloat coefficentA = lambdaFloat(coefficents_set->A_coefficent);
lambdaFloat coefficentB = lambdaFloat(coefficents_set->B_coefficent);
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(decoupledBlockId(blockIdx, gridDim)*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
// each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
if (coefficents_set->reverseCoefficents) {
//if (threadIdx.x == 0) printf("DeviceCudaVectorSumDivide(A,B,C,%d,%d,%d,%.2f,%.2f,%d,%d)\n", offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
DeviceCudaVectorSumDivide(A, B, C, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
}
else {
//if (threadIdx.x == 0) printf("DeviceCudaVectorSum(A,B,C,%d,%d,%d,%.2f,%.2f,%d,%d)\n", offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
DeviceCudaVectorSum(A, B, C, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
}
__syncthreads();
}
// cuda vector sum
template __global__ void CudaVectorsSum<float, double>(float *A, double *B, double *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
template __global__ void CudaVectorsSum<float, float>(float *A, float *B, float *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
template __global__ void CudaVectorsSum<double, double>(double *A, double *B, double *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
/**
* calcs A*coefficents_set->A_coefficent+B*coefficents_set->B_coefficent => C for calculating shigh and ac+dc summary created with consistent indexes calculations
*
*/
template<typename T1, typename T2> __global__ void CudaVectorsSumNSquare(T1 *A, T2 *B, T2 *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set)
{
lambdaFloat coefficentA = lambdaFloat(coefficents_set->A_coefficent);
lambdaFloat coefficentB = lambdaFloat(coefficents_set->B_coefficent);
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(decoupledBlockId(blockIdx, gridDim)*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
// each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
if (coefficents_set->reverseCoefficents) {
//if (threadIdx.x == 0) printf("DeviceCudaVectorSumDivide(A,B,C,%d,%d,%d,%.2f,%.2f,%d,%d)\n", offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
DeviceCudaVectorSumDivideNSquare(A, B, C, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
}
else {
//if (threadIdx.x == 0) printf("DeviceCudaVectorSum(A,B,C,%d,%d,%d,%.2f,%.2f,%d,%d)\n", offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
DeviceCudaVectorSumNSquare(A, B, C, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
}
__syncthreads();
}
// cuda vector sum
template __global__ void CudaVectorsSumNSquare<float, double>(float *A, double *B, double *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
template __global__ void CudaVectorsSumNSquare<float, float>(float *A, float *B, float *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
template __global__ void CudaVectorsSumNSquare<double, double>(double *A, double *B, double *C, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p src is global input array
* \p target is global output array
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<class T> __device__ void DeviceCudaSquare(T *src, T *target, int offset, int time_length_analysis, int cochlea_sections, int time_node_offset, int time_node_jumps_in_cluster)
{
int k;
int current_offset;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster){
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
target[current_offset] = src[current_offset] * src[current_offset];
}
}
template __device__ void DeviceCudaSquare<lambdaFloat>(lambdaFloat *src, lambdaFloat *target, int offset, int time_length_analysis, int cochlea_sections, int time_node_offset, int time_node_jumps_in_cluster);
/**
* calcs src.*src => target for Shigh.^2=>dS summary created with consistent indexes calculations
*
*/
template<class T> __global__ void CudaSquare(T *src, T *target, device_params *filter_params)
{
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(decoupledBlockId(blockIdx, gridDim)*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
// each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
DeviceCudaSquare<T>(src,target, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
__syncthreads();
}
// vector square
template __global__ void CudaSquare<lambdaFloat>(lambdaFloat *src, lambdaFloat *target, device_params *filter_params);
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p IHC is global ihc(both input and output) array
* \p IHC_Damage_Factor is calculated ihc health actually such that 10^8 is healthy and 0 is completely lost
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T,typename T2> __device__ void DeviceCudaCalcIHC(T *PRE_IHC, T2 *IHC, double IHC_Damage_Factor, int offset, int time_length_analysis, int cochlea_sections, int time_node_offset, int time_node_jumps_in_cluster)
{
int k;
int current_offset;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster){
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
IHC[current_offset] = T2(log10(fmax(double(IHC_Damage_Factor*PRE_IHC[current_offset]), 0.0) + EPS));
}
}
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p IHC is global ihc(both input and output) array
* \p IHC_Damage_Factor is calculated ihc health actually such that 10^8 is healthy and 0 is completely lost
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T, typename T2> __device__ void DeviceCudaCalcIHCComposite(T *AC_response,T *DC_response, T2 *IHC, double IHC_Damage_Factor, int offset, int time_length_analysis, int cochlea_sections, double coefficent_AC, double coefficent_DC, int time_node_offset, int time_node_jumps_in_cluster)
{
int k;
int current_offset;
double PRE_IHC;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
PRE_IHC = fmaf(AC_response[current_offset], coefficent_AC, DC_response[current_offset] * coefficent_DC);
IHC[current_offset] = T2(log10(fmax(double(IHC_Damage_Factor*PRE_IHC), 0.0) + EPS));
}
}
/**
* device run for single time block of single index of vector summary
* note offset here is the offset for start of time node name
* \p IHC is global ihc(both input and output) array
* \p IHC_Damage_Factor is calculated ihc health actually such that 10^8 is healthy and 0 is completely lost
* \p offset index of start calculation in output array, its already considarate the spatial section on the cochlea
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T, typename T2> __device__ void DeviceCudaCalcIHCCompositeDivide(T *AC_response, T *DC_response, T2 *IHC, double IHC_Damage_Factor, int offset, int time_length_analysis, int cochlea_sections, double coefficent_AC, double coefficent_DC, int time_node_offset, int time_node_jumps_in_cluster)
{
int k;
int current_offset;
double PRE_IHC;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
PRE_IHC = (AC_response[current_offset]/ coefficent_AC)+(DC_response[current_offset] / coefficent_DC);
IHC[current_offset] = T2(log10(fmax(double(IHC_Damage_Factor*PRE_IHC), 0.0) + EPS));
}
}
/**
* calcs lg10(max(IHC,0)*IHC_Damage_vector+EPS) into IHC vector
* this procedure will run after AC*eta_AC+DC*eta_DC
*
*/
template<typename T, typename T2> __global__ void CudaCalcIHC(T *PRE_IHC, T2 *IHC, device_params *filter_params)
{
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(decoupledBlockId(blockIdx, gridDim)*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
// each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
double IHC_Damage_Factor = CUDA_IHC_DAMAGE[cochlea_offset_section];
DeviceCudaCalcIHC<T,T2>(PRE_IHC,IHC, IHC_Damage_Factor, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
__syncthreads();
}
/**
* calcs lg10(max(IHC,0)*IHC_Damage_vector+EPS) into IHC vector
* this procedure will run after AC*eta_AC+DC*eta_DC
*
*/
template<typename T, typename T2> __global__ void CudaCalcIHCComposite(T *AC_response, T *DC_response, T2 *IHC, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set)
{
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(decoupledBlockId(blockIdx, gridDim)*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
T coefficentAC = T(coefficents_set[1].A_coefficent);
T coefficentDC = T(coefficents_set[1].B_coefficent);
int reverseCoefficents = coefficents_set[1].reverseCoefficents; // each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
double IHC_Damage_Factor = CUDA_IHC_DAMAGE[cochlea_offset_section];
if (reverseCoefficents) {
DeviceCudaCalcIHCCompositeDivide<T, T2>(AC_response, DC_response, IHC, IHC_Damage_Factor, offset, time_length_analysis, cochlea_sections, double(coefficentAC), double(coefficentDC), time_node_offset, time_node_jumps_in_cluster);
}
else {
DeviceCudaCalcIHCComposite<T, T2>(AC_response, DC_response, IHC, IHC_Damage_Factor, offset, time_length_analysis, cochlea_sections, double(coefficentAC), double(coefficentDC), time_node_offset, time_node_jumps_in_cluster);
}
__syncthreads();
}
// calc ihc isolated to templates
template __global__ void CudaCalcIHC<lambdaFloat,float>(lambdaFloat *PRE_IHC, float *IHC, device_params *filter_params);
// calc ihc isolated to templates
template __global__ void CudaCalcIHCComposite<lambdaFloat, float>(lambdaFloat *AC_response, lambdaFloat *DC_response, float *IHC, device_params *filter_params, vectors_sum_linear_coefficents *coefficents_set);
/**
* device run for single time block of single index of lambda calculation
* note offset here is the offset for start of time node name
* \p IHC is global ihc, input array
* \p Lambda is global output array
* \p cochlea_offset_section index of start calculation in output array, its the spatial section on the cochlea + lambda block index offset
* \p time_length_analysis number of indexes in output array to calculate
* \p lambda_index is the id of lambda block and spont rate index in unified nerves parameter
* \p A_index is the id A to take value in index in unified nerves parameter
* \p time_length_analysis number of indexes in output array to calculate
* \p cochlea_sections is number of spatial cochlea sections
* \p time_node_offset is number of time nodes from the start offset the algorithm will calculate
* \p time_node_jumps_in_cluster time nodes jump between each consecutive calculation
*/
template<typename T1,typename T2> __device__ void DeviceCudaCalcLambda(T1 *IHC, float *Lambda, T2 *JND_Lambda, int cochlea_offset_section, int lambda_write_offset, int lambda_index, int A_index, int time_length_analysis, int cochlea_sections, int time_node_offset, int time_node_jumps_in_cluster, float Lambda_SAT)
{
int k = 0;
int current_offset; // progress on the input since we jump in sections every time
int write_offset;
double base_lambda;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster){
current_offset = cochlea_offset_section + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
write_offset = current_offset + lambda_write_offset;
base_lambda = fmin(double(Lambda_SAT - CUDA_Nerves_Clusters[lambda_index]), fmax(double(CUDA_Nerves_Clusters[A_index]) * IHC[current_offset], 0.0));
JND_Lambda[write_offset] = base_lambda;
Lambda[write_offset] = float(base_lambda) + CUDA_Nerves_Clusters[lambda_index];
//if ( current_offset>=350000&¤t_offset<=350256 ) printf("Y[%d]==%.3e,time_length_analysis=%d\n",current_offset,Y[current_offset],time_length_analysis);
}
}
// device calc ihc isolated for templates
template __device__ void DeviceCudaCalcLambda<lambdaFloat,double>(lambdaFloat *IHC, float *Lambda, double *JND_Lambda, int cochlea_offset_section, int lambda_write_offset, int lambda_index, int A_index, int time_length_analysis, int cochlea_sections, int time_node_offset, int time_node_jumps_in_cluster, float Lambda_SAT);
/*, float *Lambda*/
/**
* calculationg the lambda blocks from the IHC array by min(RSAT,SpontRate[lambda_type]+max(A[lambda_type]*IHC,0))
*/
template<typename T1,typename T2> __global__ void CudaCalcLambda(T1 *IHC,T2 *Lambda_Buffer, T2 *JND_Lambda, device_params *filter_params,int save_lambda)
{
// in this procedure lambda offset is ignored
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int intervalsNum = totalDecoupledBlocks(gridDim) / LAMBDA_COUNT;
int lambda_index = decoupledBlockId(blockIdx, gridDim) / intervalsNum;
int interval_id = decoupledBlockId(blockIdx, gridDim) - lambda_index*intervalsNum;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(interval_id*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
// each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
//int A_index = lambda_index + LAMBDA_COUNT;
int lambda_write_offset = lambda_index*filter_params->calcTime*cochlea_sections;
double zero_factor = cochlea_offset_section != 0 ? 1.0 : 0.0;
int write_offset;
int k;
int current_offset;
double base_lambda;
double Lambda_SAT = filter_params->Lambda_SAT;
double Aihc = double(model_Aihc[lambda_index*SECTIONS + cochlea_offset_section]);
double spont = double(CUDA_Nerves_Clusters[lambda_index]);
double zero_offset = cochlea_offset_section != 0 ? 0.0 : spont;
//DeviceCudaCalcLambda(IHC, Lambda, JND_Lambda, cochlea_offset_section, lambda_write_offset, lambda_index, A_index, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster, filter_params->Lambda_SAT);
for (k = time_node_offset; k<time_length_analysis; k++) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
write_offset = current_offset + lambda_write_offset;
base_lambda = fmin(Lambda_SAT, fmax(Aihc*double(IHC[current_offset]),spont));
JND_Lambda[write_offset] = T2(fma(zero_factor,base_lambda,zero_offset));
if (save_lambda) {
Lambda_Buffer[write_offset] = T2(fma(zero_factor, base_lambda, zero_offset));
}
//if ( current_offset>=350000&¤t_offset<=350256 ) printf("Y[%d]==%.3e,time_length_analysis=%d\n",current_offset,Y[current_offset],time_length_analysis);
}
__syncthreads();
}
// calc lambdas types of decalarations
template __global__ void CudaCalcLambda<lambdaFloat, double>(lambdaFloat *IHC, double *Lambda_Buffer, double *JND_Lambda, device_params *filter_params,int save_lambda);
template<typename T1, typename T2> __device__ void DeviceCopy_Array(volatile T1 *src, volatile T2 *dst, int offset, int time_length_analysis, int cochlea_sections, int time_node_offset, int time_node_jumps_in_cluster) {
int k = 0;
int current_offset;
for (k = time_node_offset; k<time_length_analysis; k += time_node_jumps_in_cluster) {
current_offset = offset + k*cochlea_sections; // untransposed adding sections multiplication for k, offset time
dst[current_offset] = src[current_offset] ;
}
}
/**
* inside equtaions references to Yonatan Koral Efficent Tool Thesis
* this global cuda procedure calculate the IHC array from the Basialr Membrane array results using device functions
* calculate the AC
* calculate the SHigh
* calculate the dS
* calculate the DC
* calculate the IHC
*/
template<typename T> __global__ void CudaUnifiedCalcIHC(
float *BM_internal,
T *cuda_Buffer2,
T *cuda_Buffer3,
T *cuda_Buffer4,
T *cuda_BufferOutput,
device_params *filter_params,
vectors_sum_linear_coefficents *coefficents_set,
int backup_stage) {
// first filter paramets for ac filter from filter params index 0
int filter_index = filter_params->filter_b_start_index + 1;
int filter_size = filter_params->order_of_ac_filter; //(int)(device_time_filter[filter_index]+0.1f); // very important filter data start from index 1 0 index is size....
int cochlea_offset_section = calcCochleaSection(blockIdx, blockDim, threadIdx);
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->calcTime - lambda_offset; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = calcTimeNodesJump(blockIdx, blockDim, threadIdx); // block dim y represent num of threads per sections so each thread will jump by this number times the number of sections
int offset = calcStartMainOffset(blockIdx, blockDim, threadIdx, lambda_offset, cochlea_sections);
int time_node_offset = calcTimeNodesOffset(blockIdx, blockDim, threadIdx);
// first sum is bm internal and cuda AC uses coefficents set from index 0
T coefficentA = T(coefficents_set->A_coefficent);
T coefficentB = T(coefficents_set->B_coefficent);
double IHC_Damage_Factor = CUDA_IHC_DAMAGE[cochlea_offset_section];
// calculate unified block to include decoupling, note value is same on both fir filters
int calculatedIntervalTimeNodes = filter_params->FilterDecoupledMode ? filter_params->intervalTimeNodes*filter_params->Decouple_Filter : time_length_analysis;
int final_regular_division_position = filter_params->intervalTimeNodes*filter_params->time_blocks;
// ac will only be calculated if its not IIR filter otherwise it will be calculated seperatly - calculating Eq. 5.1
if (filter_params->filter_a_start_index == -1) {
// first stage calculate the AC
DeviceCudaFIRFilter<float, T>(BM_internal, cuda_Buffer2, offset, time_length_analysis, cochlea_sections, filter_size, filter_index, time_node_offset, time_node_jumps_in_cluster, calculatedIntervalTimeNodes, final_regular_division_position);
}
if (backup_stage == 9) { // AC backup
DeviceCopy_Array<JNDFloat, JNDFloat>(cuda_Buffer2, cuda_BufferOutput, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
}
__syncthreads();
// calculating the summary of bm internal and ac each thread handles only its own result of ac so synchronization unecessary
// calculating Eq. 5.2
DeviceCudaVectorSumNSquare<float, T>(BM_internal, cuda_Buffer2, cuda_Buffer3, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
__syncthreads();
if (backup_stage == 12 || backup_stage == 11) { // SHigh backup
DeviceCopy_Array<JNDFloat, JNDFloat>(cuda_Buffer3, cuda_BufferOutput, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
}
// updating filter parameters for the dc filter now its from index 1, pre filter synchronization is necessary to ensure DS array is valid
filter_index = filter_params[1].filter_b_start_index+1;
filter_size = filter_params->order_of_dc_filter; // dc filter size
// calculating the DC filter - Eq 5.3
DeviceCudaFIRFilter<T, T>(cuda_Buffer3, cuda_Buffer4, offset, time_length_analysis, cochlea_sections, filter_size, filter_index, time_node_offset, time_node_jumps_in_cluster, calculatedIntervalTimeNodes, final_regular_division_position);
__syncthreads();
if (backup_stage == 10) { // DC backup
DeviceCopy_Array<JNDFloat, JNDFloat>(cuda_Buffer4, cuda_BufferOutput, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
}
// now setting the coefficents for pre IHC calculator
coefficentA =T(coefficents_set[1].A_coefficent);
coefficentB =T(coefficents_set[1].B_coefficent);
int reverseCoefficents = coefficents_set[1].reverseCoefficents;
// calculating the AC and DC for pre IHC, AC is already valid and since each thread use only its own results so synchronization is unecessary
// calculating equations 5.4 - 5.5, this was tested with divided parmeters and multiplied parmeters due to previous calculation error on my part (which was fixed), you can use either
if (reverseCoefficents) {
//if (threadIdx.x == 0) printf("DeviceCudaVectorSumDivide(A,B,C,%d,%d,%d,%.2f,%.2f,%d,%d)\n", offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
DeviceCudaCalcIHCCompositeDivide<T, JNDFloat>(cuda_Buffer2, cuda_Buffer4, BM_internal, IHC_Damage_Factor, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
}
else {
//if (threadIdx.x == 0) printf("DeviceCudaVectorSum(A,B,C,%d,%d,%d,%.2f,%.2f,%d,%d)\n", offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
DeviceCudaCalcIHCComposite<T, JNDFloat>(cuda_Buffer2, cuda_Buffer4, BM_internal, IHC_Damage_Factor, offset, time_length_analysis, cochlea_sections, coefficentA, coefficentB, time_node_offset, time_node_jumps_in_cluster);
}
if (backup_stage == 14 || backup_stage==13) { // PRE IHC backup
DeviceCopy_Array<JNDFloat, JNDFloat>(BM_internal, cuda_BufferOutput, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
}
__syncthreads();
}
// possible types for iir on device
// specializing calculation of IHC
template __global__ void CudaUnifiedCalcIHC<JNDFloat>(
float *BM_internal,
JNDFloat *cuda_Buffer2,
JNDFloat *cuda_Buffer3,
JNDFloat *cuda_Buffer4,
JNDFloat *cuda_BufferOutput,
device_params *filter_params,
vectors_sum_linear_coefficents *coefficents_set,
int backup_stage);
template<typename T1, typename T2> __global__ void copyDeviceBackup(T1 *src, T2 *cudaBackupArray, device_params *filter_params) {
int cochlea_offset_section = threadIdx.x; //
int lambda_offset = filter_params->lambda_offset;
int time_length_analysis = filter_params->intervalTimeNodes / gridDim.z; // here I run on the entire set
int cochlea_sections = filter_params->cochlea_sections; // number of cochlea space sections
int time_node_jumps_in_cluster = 1;
/*
* start offset represents calculated section + lambda offset_in_time*sections
*/
int offset = cochlea_offset_section + cochlea_sections*(decoupledBlockId(blockIdx, gridDim)*time_length_analysis + lambda_offset);//calcStartMainOffset(blockIdx,blockDim,threadIdx,lambda_offset,cochlea_sections);
// each thread start from its own adjusted offset in the time block offset
int time_node_offset = 0;//calcTimeNodesOffset(blockIdx,blockDim,threadIdx);
DeviceCopy_Array<T1, T2>(src, cudaBackupArray, offset, time_length_analysis, cochlea_sections, time_node_offset, time_node_jumps_in_cluster);
}
// calc ihc isolated to templates
template __global__ void copyDeviceBackup<float, JNDFloat>(float *src, JNDFloat *cudaBackupArray, device_params *filter_params);
//template __global__ void copyDeviceBackup<JNDFloat, JNDFloat>(JNDFloat *src, JNDFloat *cudaBackupArray, device_params *filter_params);
void runIIRKernelByParams(int Show_Run_Time,Log &outer_log) {
dim3 filtersIIRThreads(SECTIONS, 1, 1);
cudaEvent_t start, stop;
cudaEventsCreate(start, stop, Show_Run_Time & 16);
cudaEventsStartTimer(start, stop, Show_Run_Time & 16);
if (cudaJNDHolder.host_local_param[0].FilterDecoupledMode) {
int dfilter = cudaJNDHolder.host_local_param[0].Decouple_Filter > 0 ? cudaJNDHolder.host_local_param[0].Decouple_Filter : 1;
dim3 decoupledGrid(cudaJNDHolder.host_local_param[0].time_blocks / dfilter, 1, 1);
//std::cout << "CudaIIRFilterDecoupled<float, lambdaFloat> << <" << showDIM3(decoupledGrid) << ", " << showDIM3(filtersIIRThreads) << " >> >(cudaHolderData.cuda_saved_speeds, cuda_JND_Lambda, global_device_params);" << std::endl;
CudaIIRFilterDecoupled<float, lambdaFloat> KERNEL_ARGS2(decoupledGrid, filtersIIRThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_JND_Lambda, cudaJNDHolder.global_device_params);
}
else {
// for IIR will parrallel in space only, not time so only single block with SECTIONS threads
dim3 filtersIIRGrid(1, 1, 1);
CudaIIRFilter<float, lambdaFloat> KERNEL_ARGS2(filtersIIRGrid, filtersIIRThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_JND_Lambda, cudaJNDHolder.global_device_params);
}
outer_log.timeAtFlag(40, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 16, "IIR calculation time"), Show_Run_Time & 16);
}
void setDecoupledRun(dim3 &filtersGrid, dim3 &filtersThreads, const int& intervals_num, const int&blocks_per_interval, const int& Decouple_Unified_IHC_Factor) {
filtersGrid.x = intervals_num;
filtersGrid.y = blocks_per_interval; // decoupled blocks+ extra decoupling for better division
filtersGrid.z = Decouple_Unified_IHC_Factor>0? Decouple_Unified_IHC_Factor:1;
filtersThreads.x = SECTIONS;
filtersThreads.y = 1;
filtersThreads.z = 1;
}
// calculating IHC by stages Cochlear Model for Hearing Loss, equations numbers from Yonatan Koral Thesis, Efficent Tool For Cochlea Simulation
extern "C" void RunIHCKernel(JNDFloat *host_backup, int Show_Run_Time, int save_lambda, int backup_stage,int Decouple_Unified_IHC_Factor,Log &outer_log) {
// if the data loaded from hd its on host and it needs to be first time or its not relevant
cudaEvent_t start, stop;
cudaEventsCreate(start, stop, Show_Run_Time & 8);
dim3 filtersGrid(IHC_FILTER_BLOCK, 1, 1);
dim3 filtersThreads(SECTIONS_PER_IHC_FILTER_BLOCK, THREADS_PER_IHC_FILTER_SECTION, 1);
int lambda_write_offset = cudaJNDHolder.host_local_param->calcTime*SECTIONS;
// copy from saved speeds the rest of the data
// unfied calculation of the ihc
cudaEventsStartTimer(start, stop, Show_Run_Time & 8);
if (Decouple_Unified_IHC_Factor<=0) {
if (cudaJNDHolder.host_local_param[0].order_of_ac_filter > -1) {
// calculating AC stage if AC filter is IIR, Eq. 5.1
runIIRKernelByParams(Show_Run_Time,outer_log);
}
// rest of the IHC process calculated in single kernel - Eq. 5.2 - 5.5
CudaUnifiedCalcIHC<JNDFloat> KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_JND_Lambda, cudaLambdaHolderData.cuda_JND_Lambda + lambda_write_offset, cudaLambdaHolderData.cuda_JND_Lambda + 2*lambda_write_offset, cudaLambdaHolderData.cuda_Buffer1, cudaJNDHolder.global_device_params, cudaJNDHolder.vectors_sums_coefficents,backup_stage);
} else {
// this is effectively cause the grid to be one large interval if decoupler size is 0
int dfilter = cudaJNDHolder.host_local_param[0].Decouple_Filter > 0 ? cudaJNDHolder.host_local_param[0].Decouple_Filter : cudaJNDHolder.host_local_param[0].time_blocks;
setDecoupledRun(filtersGrid,filtersThreads, cudaJNDHolder.host_local_param[0].time_blocks / dfilter, dfilter, Decouple_Unified_IHC_Factor);
if (cudaJNDHolder.host_local_param[0].order_of_ac_filter == -1) {
// calculating the lambda by multiple kernel currently remain for backward reference
// ac filter run, FIR filter Eq. 5.1
CudaFIRFilter<float, lambdaFloat> KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_JND_Lambda, cudaJNDHolder.global_device_params);
}
else {
// ac a filter make IIR present, Eq. 5.1
runIIRKernelByParams(Show_Run_Time,outer_log);
}
if (backup_stage == 9) { // AC backup
copyDeviceBackup<JNDFloat, JNDFloat>KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaLambdaHolderData.cuda_JND_Lambda, cudaLambdaHolderData.cuda_Buffer1, cudaJNDHolder.global_device_params);
}
// calcs Eq 5.2 - dS
CudaVectorsSumNSquare<float, lambdaFloat> KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_JND_Lambda, cudaLambdaHolderData.cuda_JND_Lambda + lambda_write_offset, cudaJNDHolder.global_device_params, &cudaJNDHolder.vectors_sums_coefficents[0]);
if (backup_stage == 12 || backup_stage == 11) { // SHigh backup
copyDeviceBackup<JNDFloat, JNDFloat>KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaLambdaHolderData.cuda_JND_Lambda + lambda_write_offset, cudaLambdaHolderData.cuda_Buffer1, cudaJNDHolder.global_device_params);
}
// caculating Eq 5.3 - DC response
CudaFIRFilter<lambdaFloat, lambdaFloat> KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaLambdaHolderData.cuda_JND_Lambda + lambda_write_offset, cudaLambdaHolderData.cuda_JND_Lambda + 2*lambda_write_offset, &cudaJNDHolder.global_device_params[1]);
if (backup_stage == 10) { // DC backup
copyDeviceBackup<JNDFloat, JNDFloat>KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaLambdaHolderData.cuda_JND_Lambda + 2*lambda_write_offset, cudaLambdaHolderData.cuda_Buffer1, cudaJNDHolder.global_device_params);
}
// calculates Eq 5.4 and 5.5 - Log of Psi IHC
CudaCalcIHCComposite<lambdaFloat, float> KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaLambdaHolderData.cuda_JND_Lambda, cudaLambdaHolderData.cuda_JND_Lambda + 2 * lambda_write_offset, cudaHolderData.cuda_saved_speeds, cudaJNDHolder.global_device_params, &cudaJNDHolder.vectors_sums_coefficents[1]);
// calc the IHC log
if (backup_stage == 13 || backup_stage == 14) { // IHC backup
copyDeviceBackup<JNDFloat, JNDFloat>KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_Buffer1, cudaJNDHolder.global_device_params);
}
}
if (backup_stage >= 9 && backup_stage <= 14) {
GeneralKernel_Copy_Results_Template<JNDFloat>(host_backup, cudaLambdaHolderData.cuda_Buffer1, lambda_write_offset);
}
outer_log.timeAtFlag(41, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 8, "IHC calculation time"), Show_Run_Time & 8);
/// calcs the lambda itself
cudaEventsStartTimer(start, stop, Show_Run_Time & 8);
/* cuda_Lambda,*/
int dfilter = cudaJNDHolder.host_local_param[0].Decouple_Filter > 0 ? cudaJNDHolder.host_local_param[0].Decouple_Filter : cudaJNDHolder.host_local_param[0].time_blocks;
setDecoupledRun(filtersGrid, filtersThreads, LAMBDA_COUNT* cudaJNDHolder.host_local_param[0].time_blocks / dfilter, dfilter, Decouple_Unified_IHC_Factor);// changed grid to support all lambda calculations
// Calculates the AN response for all groups of Neurons Eq 5.6 - 5.8
CudaCalcLambda<lambdaFloat, JNDFloat> KERNEL_ARGS2(filtersGrid, filtersThreads)(cudaHolderData.cuda_saved_speeds, cudaLambdaHolderData.cuda_Buffer1, cudaLambdaHolderData.cuda_JND_Lambda, cudaJNDHolder.global_device_params, save_lambda);
outer_log.timeAtFlag(42, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 8, "Lambda calculation time"), Show_Run_Time & 8);
}
/**
* src is __global__ array in cuda device
* target is array in host
* size is array length
* function will copy from src to taraget use cudaMemcpy
*/
extern "C" void GeneralKernel_Copy_Results(float *target,float *src, size_t size) noexcept(false) {
const size_t sizer = size*sizeof(float);
//printf("copy %d bytes to host\n",sizer);
gpuAssert(cudaMemcpy((void *)target,src,sizer,cudaMemcpyDeviceToHost));
}
/**
* src is __global__ array in cuda device
* target is array in host
* size is array length
* function will copy from src to taraget use cudaMemcpy
*/
extern "C" void GeneralKernel_Copy_Results_Double(double *target, double *src, size_t size) noexcept(false) {
const size_t sizer = size*sizeof(double);
//printf("copy %d bytes to host\n",sizer);
gpuAssert(cudaMemcpy((void *)target, src, sizer, cudaMemcpyDeviceToHost));
}
/**
* src is __global__ array in cuda device
* target is array in host
* size is array length
* function will copy from src to taraget use cudaMemcpy
*/
template<class T> void GeneralKernel_Copy_Results_Template(T *target, T *src, size_t size, size_t offset) {
const size_t sizer = size*sizeof(T);
gpuAssert(cudaMemcpy((void *)target, src+offset, sizer, cudaMemcpyDeviceToHost));
}
template<class T> void GeneralKernel_Copy_Results_Template(T *target, T *src, size_t size) {
GeneralKernel_Copy_Results_Template<T>(target, src, size, 0);
}
template<class T> void ReverseKernel_Copy_Results_Template(T *cpu_src, T *cuda_target, size_t start_time_node, size_t time_nodes, int sections) {
gpuAssert(cudaMemcpy((void *)cuda_target, &cpu_src[start_time_node*sections], time_nodes*sections*sizeof(T), cudaMemcpyHostToDevice));
}
/**
* device run for calculation of accumulation of length cells at jump_size interval between them
* \p src is input summed array
* \p dst is pointer to target result
* \p jump_size is interval between to accumulated cells
* \p length number of accumulated cells
*/
__device__ void CalcAccumulation(float *src,float *dst,int jump_size,int length) {
float accu = 0.0f;
for (int index = 0; index < length*jump_size; index += jump_size) {
accu += src[index];
}
*dst = accu;
}
/**
* device run for calculation of average of length cells at jump_size interval between them
* \p src is input summed array
* \p dst is pointer to target result
* \p jump_size is interval between to accumulated cells
* \p length number of accumulated cells
*/
__device__ void CalcAvg(float *src, float *dst, int jump_size, int length) {
CalcAccumulation(src, dst, jump_size, length);
*dst = *dst / float(length);
}
/*
* calculates dA from input will run in single block with #threads as number of intervals
*/
__global__ void cudaCalculateDA(float *input, device_jnd_params *dA, int JNDIntervalNodes, int JNDIntervalHeadNodes, int JNDIntervalActualNodes, int offset_start) {
float acc = 0.0f;
float current = 0.0f;
int start_index = offset_start + threadIdx.x*JNDIntervalNodes + JNDIntervalHeadNodes;
int end_index = start_index + JNDIntervalActualNodes;
for (int index = start_index; index < end_index; index++) {
current = input[index];
current = current*current;
if (acc < current) acc = current;
}
dA[threadIdx.x].dA = sqrtf(acc);
__syncthreads();
}
// this function calculates average of lambda (part of Eq.17) due to synchronization issues
__global__ void GlobalCalculateMeanRate(
device_jnd_params *dA,
JNDFloat *JND_Lambda,
JNDFloat *MeanRate,
int lengthOffset,
int calculatedMatrixSize,
int JNDIntervals, // local
int JNDIntervalLength,
int JNDIntervalHeadNodes,
int overlapNodes,
int JND_USE_Spont_Base_Reference_Lambda
){
int lambda_index = blockIdx.y;
int dAindex = blockIdx.x;
int section_index = threadIdx.x;
int sections = blockDim.x;
int avg_fisher_full_index = lambda_index*JNDIntervals + dAindex;
int block_offset = lambda_index*calculatedMatrixSize + section_index + sections*(dAindex*JNDIntervalLength + JNDIntervalHeadNodes);
int mean_rate_offset = avg_fisher_full_index*sections + section_index;
double meanRateAccumulator = JND_Lambda[block_offset];
// averaging work per thread no need to loops
// will calculate manually to improve precisions
for (int time_offset = 1; time_offset < lengthOffset; time_offset++) {
//meanRateDiff = JND_Lambda[block_offset + sections*time_offset];// -lambdaBase;
meanRateAccumulator = meanRateAccumulator + JND_Lambda[block_offset + sections*time_offset];
}
MeanRate[mean_rate_offset] = JNDFloat(meanRateAccumulator) / JNDFloat(lengthOffset);
__syncthreads();
}
// uses the result of GlobalCalculateMeanRate to calculate on GPU eq 17-20)
__global__ void GlobalCalculateJND(
bool calculate_ai,
bool calculate_rms,
device_jnd_params *dA,
JNDFloat *JND_Lambda,
JNDFloat *MeanRate,
JNDFloat *Buffer1,
double *nIHC,
double Fs,
double scaleBMVelocityForLambdaCalculation,
int writeMatrixSize,
int calculatedMatrixSize,
int overlapNodes,
int JNDIntervalHeadNodes,
int JNDIntervalLength,
int lengthOffset,
int JNDIntervals, // local
int JNDIntervalsFull, // local
int *JND_Calculated_Intervals, // global
int numOFJNDCalculated, // global
int *JND_Refrence_Intervals, // global
int numOFJNDReferences, // global
int handeledIntervalsJND, // already handeled intervals
int *JND_Serial_Intervals_Positions,
int *JND_Interval_To_Reference,
JNDFloat *F_RA, // result for fisher rate not lambda summed, but time and space reduced
JNDFloat *FisherAISum, // result for fisher AI not lambda summed, but time and space reduced
double JND_Delta_Alpha_Length_Factor,
device_params *general_params,
int isdACalced,
int JND_USE_Spont_Base_Reference_Lambda,
int backup_stage
) {
__shared__ JNDFloat shared_acc_rms[SECTIONS];
__shared__ JNDFloat shared_acc_ai[SECTIONS];
int lambda_index = blockIdx.y;
int dAindex = blockIdx.x;
int section_index = threadIdx.x;
int sections = blockDim.x;
int avg_fisher_full_index = lambda_index*JNDIntervals + dAindex;
int mean_rate_offset = avg_fisher_full_index*sections + section_index;
JNDFloat T = JND_Delta_Alpha_Length_Factor / Fs;
JNDFloat Tlength = float(lengthOffset) / Fs;
//JNDFloat lambdaBase = CUDA_Nerves_Clusters[lambda_index];
int globaldAIndex = dAindex + handeledIntervalsJND;
// special control mechanism for mean rate calculation that avrages mean rate (not dMeanRate)
// to ensure that summary is larger enough than reference (so we are not just square negative values that will create artifacts)
if (calculate_rms) {
// test RMS average for debug output
shared_acc_rms[section_index] = MeanRate[mean_rate_offset];
// reducing spatial dimension
for (int t_i = (SECTIONS >> 1); t_i >= 1; t_i >>= 1) {
__syncthreads();
if (section_index<t_i) {
shared_acc_rms[section_index] = shared_acc_rms[section_index] + shared_acc_rms[section_index + t_i];
}
}
__syncthreads();
if (section_index == 0 && backup_stage==1) {
Buffer1[avg_fisher_full_index] = shared_acc_rms[section_index] / (JNDFloat(sections));
}
}
__syncthreads();
int globalReferenceInterval = globaldAIndex;
bool isRefrence = numOFJNDReferences > 0;
// this means we have actually refrences to test
if (isRefrence) {
isRefrence = false; // now for the actual test
for (int index = 0; index < numOFJNDReferences; index++) {
if (JND_Refrence_Intervals[index] == globaldAIndex) {
isRefrence = true;
break;
}
}
}
__syncthreads();
// find for each calculated JND signal+noise block its pure Noise block
if (!isRefrence) {
// assuming everything has reference
globalReferenceInterval = JND_Interval_To_Reference[JND_Serial_Intervals_Positions[globalReferenceInterval]];
}
__syncthreads();
int dAreferenceIndex = globalReferenceInterval - handeledIntervalsJND; // to find local index on tested output
int mean_rate_reference_offset = (avg_fisher_full_index + dAreferenceIndex - dAindex)*sections + section_index;
JNDFloat dAvalue = dA[isdACalced*dAindex + (1 - isdACalced)*globaldAIndex].dA;
JNDFloat dMRate = (MeanRate[mean_rate_offset] - MeanRate[mean_rate_reference_offset]) / dAvalue;
if (backup_stage == 2) {
Buffer1[mean_rate_offset] = dMRate;
}
if (calculate_ai) {
int calculate_lambda_offset = lambda_index*calculatedMatrixSize + section_index + sections*(dAindex*JNDIntervalLength + JNDIntervalHeadNodes);
int reference_lambda_offset = lambda_index*calculatedMatrixSize + section_index + sections*(dAreferenceIndex*JNDIntervalLength + JNDIntervalHeadNodes);
JNDFloat preFisherAITimeReducedValue = 0.0;
for (int time_offset = 0; time_offset < lengthOffset; time_offset++) {
JNDFloat refLambda = JND_Lambda[reference_lambda_offset];
JNDFloat calcedLambda = JND_Lambda[calculate_lambda_offset];
JNDFloat dLambdaCalculated = dAvalue > 0 ? (calcedLambda - refLambda) / dAvalue : 0;
if (backup_stage == 3) {
Buffer1[calculate_lambda_offset] = dLambdaCalculated;
}
/*
* calculating pre fisher AI
* from matlab
* fisher AI : Ts*(dL.^2./reshape(RefLamda(j,:,:),Nsec,Time) => into pre fisher AI
* VERY Important Correction: original division from matlab program incorrect:
* JNDFloat preFisherAIValue = refLambda>0 ? (dLambdaCalculated*dLambdaCalculated / refLambda / Fs) : 0;
* since its contradict eq 19 in miriam's article
*/
JNDFloat preFisherAIValue = (dLambdaCalculated*dLambdaCalculated / (Fs*refLambda)) ;
if (backup_stage == 4) {
Buffer1[calculate_lambda_offset] = preFisherAIValue;
}
preFisherAITimeReducedValue += preFisherAIValue;
reference_lambda_offset += sections;
calculate_lambda_offset += sections;
}
preFisherAITimeReducedValue = rsqrt(preFisherAITimeReducedValue);
JNDFloat preFisherAIValue = (T/Tlength)*nIHC[section_index] /(preFisherAITimeReducedValue* preFisherAITimeReducedValue);
if (backup_stage == 5) {
Buffer1[mean_rate_offset] = preFisherAIValue;
}
shared_acc_ai[section_index] = preFisherAIValue;
}
/*
* calculate pre fisher values before summering
* from matlab
* fisher rate : nIHC.*Tmean./RefMeanRate(j,:).*(dMeanRate(j,:).^2) => into pre fisher rate
*/
if (calculate_rms) {
JNDFloat MeanRateReferenced = MeanRate[mean_rate_reference_offset];
// all mean rates are actually multiplied by lambda base, so no needd to multiply by length offset on nominator
JNDFloat CRLB_RAValue = rsqrt(T / MeanRateReferenced*dMRate * dMRate);
if (backup_stage == 6) {
Buffer1[mean_rate_offset] = CRLB_RAValue;
}
shared_acc_rms[section_index] = nIHC[section_index] /(CRLB_RAValue*CRLB_RAValue);
}
__syncthreads();
// reducing spatial dimension for AI/RMS
for (int t_i = (SECTIONS >> 1); t_i >= 1; t_i >>= 1) {
__syncthreads();
if (section_index<t_i) {
if (calculate_ai) shared_acc_ai[section_index] = shared_acc_ai[section_index] + shared_acc_ai[section_index + t_i];
if (calculate_rms) shared_acc_rms[section_index] = shared_acc_rms[section_index] + shared_acc_rms[section_index + t_i];
}
}
__syncthreads();
// calculating fisher number for each block on AI/RMS
if (section_index == 0) {
int lambda_fisher_full_index = lambda_index*JNDIntervalsFull + globaldAIndex;
if (calculate_ai) FisherAISum[lambda_fisher_full_index] = shared_acc_ai[0] * CUDA_Nerves_Clusters[2*LAMBDA_COUNT+lambda_index];
if (calculate_rms) F_RA[lambda_fisher_full_index] = shared_acc_rms[0] * CUDA_Nerves_Clusters[2 * LAMBDA_COUNT + lambda_index];
}
__syncthreads();
}
// envelope function for GlobalCalculateJND, see detailed description in cochlea_common.h
extern "C" void CudaCalculateJND(
bool calculate_ai,
bool calculate_rms,
int mean_size,
int fisher_size,
double SPLRefVal,
double Fs,
double scaleBMVelocityForLambdaCalculation,
double *nIHC,
int *JND_Calculated_Intervals,
int numOFJNDCalculated,
int *JND_Refrence_Intervals,
int numOFJNDReferences,
int handeledIntervalsJND,
int JNDIntervalsFull, // global
int JNDIntervals, // current input # of handeled intervals
int JNDIntervalHeadNodes,
int overlapNodes,
int JNDIntervalNodes,
int lengthOffset, // local not global
int *JND_Serial_Intervals_Positions,
int *JND_Interval_To_Reference,
JNDFloat *F_RA, // result for fisher rate not lambda summed, but time and space reduced
JNDFloat *FisherAISum, // result for fisher AI not lambda summed, but time and space reduced
int writeMatrixSize,
int calculatedMatrixSize,
double JND_Delta_Alpha_Length_Factor,
int JND_USE_Spont_Base_Reference_Lambda,
int Show_Run_Time,
bool calcdA,
bool show_generated_input_params_cuda ,
int backup_stage,// in case of viewing output id of backup stage
Log &outer_log
) noexcept(false) {
std::cout << "Calculating JND on GPU" << std::endl;
cudaEvent_t start, stop;
cudaEventsCreate(start, stop, Show_Run_Time & 16);
// calculated dA if not already calculated
if (calcdA) {
dim3 filtersGriddA(1, 1, 1);
dim3 filtersThreadsdA(JNDIntervals, 1, 1);
cudaEventsStartTimer(start, stop, Show_Run_Time & 16);
cudaCalculateDA KERNEL_ARGS2(filtersGriddA, filtersThreadsdA)(cudaHolderData.cuda_input_samples, cudaJNDHolder.cuda_jnd_params, JNDIntervalNodes, JNDIntervalHeadNodes, lengthOffset, overlapNodes);
outer_log.timeAtFlag(34, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 16, "dA calculation for JND"), Show_Run_Time & 16);
}
// copy paramers to GPU
cudaEventsStartTimer(start, stop, Show_Run_Time & 16);
gpuAssert(cudaMemcpy(cudaLambdaHolderData.cuda_nIHC, nIHC, SECTIONS*sizeof(double), cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cudaJNDHolder.cuda_JND_Refrence_Intervals, JND_Refrence_Intervals, numOFJNDReferences*sizeof(int), cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cudaJNDHolder.cuda_JND_Serial_Intervals_Positions, JND_Serial_Intervals_Positions, JNDIntervalsFull*sizeof(int), cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cudaJNDHolder.cuda_JND_Interval_To_Reference, JND_Interval_To_Reference, numOFJNDCalculated*sizeof(int), cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cudaJNDHolder.cuda_JND_Calculated_Intervals, JND_Calculated_Intervals, numOFJNDCalculated*sizeof(int), cudaMemcpyHostToDevice));
outer_log.timeAtFlag(35, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 16, "JND Memory preaparations"), Show_Run_Time & 16);
dim3 filtersGrid(JNDIntervals, LAMBDA_COUNT, 1);
dim3 filtersThreads(SECTIONS, 1, 1);
if (show_generated_input_params_cuda) {
std::cout << "lengthOffset = " << lengthOffset << "\n"
<< "overlapNodes = " << overlapNodes << "\n"
<< "JND_Delta_Alpha_Length_Factor = " << JND_Delta_Alpha_Length_Factor << "\n"
<< "calculatedMatrixSize = " << calculatedMatrixSize << "\n"
<< "JNDIntervalHeadNodes = " << JNDIntervalHeadNodes << "\n"
<< "filtersGrid = " << showDIM3(filtersGrid) << "\n"
<< "filtersThreads = " << showDIM3(filtersThreads) << "\n";
}
if (calculate_rms) {
// mean rate of lambda (part of Eq. 17) calculate pre run to ensure device synchronization
cudaEventsStartTimer(start, stop, Show_Run_Time & 16);
GlobalCalculateMeanRate KERNEL_ARGS2(filtersGrid, filtersThreads)(
cudaJNDHolder.cuda_jnd_params,
cudaLambdaHolderData.cuda_JND_Lambda,
cudaJNDHolder.cuda_MeanRate,
lengthOffset,
calculatedMatrixSize,
JNDIntervals, // local
JNDIntervalNodes,
JNDIntervalHeadNodes,
overlapNodes,
JND_USE_Spont_Base_Reference_Lambda
);
outer_log.timeAtFlag(36, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 16, "JND Mean Rate array calculation"), Show_Run_Time & 16);
}
cudaEventsStartTimer(start, stop, Show_Run_Time & 16);
// after mean rate ready can calculate the rest
GlobalCalculateJND KERNEL_ARGS2(filtersGrid, filtersThreads)(
calculate_ai,
calculate_rms,
cudaJNDHolder.cuda_jnd_params,
cudaLambdaHolderData.cuda_JND_Lambda,
cudaJNDHolder.cuda_MeanRate,
cudaLambdaHolderData.cuda_Buffer1,
cudaLambdaHolderData.cuda_nIHC,
Fs,
scaleBMVelocityForLambdaCalculation,
writeMatrixSize,
calculatedMatrixSize,
overlapNodes,
JNDIntervalHeadNodes,
JNDIntervalNodes,
lengthOffset,
JNDIntervals, // local
JNDIntervalsFull, // local
cudaJNDHolder.cuda_JND_Calculated_Intervals, // global
numOFJNDCalculated, // global
cudaJNDHolder.cuda_JND_Refrence_Intervals, // global
numOFJNDReferences, // global
handeledIntervalsJND, // already handeled intervals
cudaJNDHolder.cuda_JND_Serial_Intervals_Positions,
cudaJNDHolder.cuda_JND_Interval_To_Reference,
cudaJNDHolder.cuda_F_RA,
cudaJNDHolder.cuda_FisherAISum,
JND_Delta_Alpha_Length_Factor,
cudaJNDHolder.global_device_params,
calcdA ? 1 : 0,
JND_USE_Spont_Base_Reference_Lambda,
backup_stage
);
gpuAssert(cudaMemcpy(F_RA, cudaJNDHolder.cuda_F_RA, fisher_size*sizeof(JNDFloat), cudaMemcpyDeviceToHost));
gpuAssert(cudaMemcpy(FisherAISum, cudaJNDHolder.cuda_FisherAISum, fisher_size*sizeof(JNDFloat), cudaMemcpyDeviceToHost));
outer_log.timeAtFlag(37, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 16, "JND calculation"), Show_Run_Time & 16);
}
/**
* generating input per profile
*/
__global__ void CudaGenerateInputFromProfile(
device_jnd_params *input_profiles,
float *WN, // white noise array
float *Signal,
float *input_samples, // input samples to load
double wn_dc, // noise dc to be decreased
double wn_energy_normalize_factor, // factor to normalize energy interval
int signal_mode,
double signal_dc, // noise dc to be decreased
double signal_energy_normalize_factor, // factor to normalize energy interval
int startProfile,
int calculatedProfiles,
int overlapNodes, // for start offset
int IntervalLength, //number of nodes on actual input
int JND_Interval_Head,
float Fs
) {
int profile_index = startProfile + blockIdx.y;
int interval_position = blockIdx.x*blockDim.x + threadIdx.x;
int input_sample_position = blockIdx.y*IntervalLength + interval_position;
double dA = input_profiles[profile_index].dA;
double Wn = input_profiles[profile_index].Wn;
double frequency = 0;
if (signal_mode == 0) {
frequency = input_profiles[profile_index].frequency;
}
double time = double(interval_position - JND_Interval_Head) / double(Fs);
double timeCut = 2 * PI*frequency*time;
double sum = 0.0;
if (signal_mode) sum = (dA*(double(Signal[interval_position]) - signal_dc) / signal_energy_normalize_factor);
else sum = (dA*cos(timeCut) / signal_energy_normalize_factor);
sum=sum+ (Wn*(double(WN[interval_position]) - wn_dc) / wn_energy_normalize_factor);
input_samples[input_sample_position] = float(sum);
}
// calculates Hearing aid effect on the signal, done before BM velocity calculation
__global__ void CudaProcessSignalTroughHearingAID(
device_jnd_params *input_profiles,
float *input_samples, // input samples to load
float *input_samples_auxivulary, // for temporary saving
int startProfile,
int overlapNodes, // for start offset
int IntervalLength, //number of nodes on actual input
int JND_Interval_Head,
float Fs,
float *fir_transfer_function,
int fir_transfer_function_length
) {
int interval_position = blockIdx.x*blockDim.x + threadIdx.x;
int input_sample_position = blockIdx.y*IntervalLength + interval_position;
float function_summary = 0.0f;// input_samples[input_sample_position];
int backward_positions = min(interval_position, fir_transfer_function_length);
for (int i = 0; i < backward_positions; i++) {
function_summary = fmaf(fir_transfer_function[i], input_samples[input_sample_position - i], function_summary);
}
input_samples_auxivulary[input_sample_position] = function_summary;
}
// calculates Hearing aid effect on the signal can calculate for IIR filters as well, done before BM velocity calculation
__global__ void CudaProcessSignalTroughHearingAIDIIR(
device_jnd_params *input_profiles,
float *input_samples, // input samples to load
float *input_samples_auxivulary, // for temporary saving
int startProfile,
int overlapNodes, // for start offset
int IntervalLength, //number of nodes on actual input
int JND_Interval_Head,
float Fs,
float *iir_transfer_function,
int iir_transfer_function_length
) {
int interval_index = blockIdx.x*blockDim.x + threadIdx.x;
int input_sample_position = interval_index*IntervalLength;
int input_sample_end_position = input_sample_position + IntervalLength;
for (int i = input_sample_position; i < input_sample_end_position; i++) {
int backward_positions = min(i - input_sample_position+1, iir_transfer_function_length);
float function_summary = input_samples[i];// input_samples[input_sample_position];
for (int j = 1; j < backward_positions; j++) {
function_summary = fmaf(iir_transfer_function[j], input_samples[i-j], function_summary);
}
input_samples_auxivulary[i] = function_summary;
}
}
/* Standard C Function: Greatest Common Divisor */
int
gcd(int a, int b) {
int c;
while (a != 0) {
c = a; a = b%a; b = c;
}
return b;
}
double wn_dc;
double wn_energy_normalize_factor;
double signal_dc;
double signal_energy_normalize_factor;
void calculateDCANDNornalizationPostProcess(
int Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
double Normalize_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
int start_dc_expected_value_calculation,
int end_dc_expected_value_calculation,
int start_dc_normalized_value_calculation,
int end_dc_normalized_value_calculation,
float Fs,
double &dc,
double &energy_normalize_factor
) {
if (Normalize_Sigma_Type == 1) {
// option 1 division factor equal sqrt of the summary, avg energy is normalized
energy_normalize_factor = energy_normalize_factor / sqrt(static_cast<double>(end_dc_normalized_value_calculation - start_dc_normalized_value_calculation));
}
else if (Normalize_Sigma_Type == 2 && Normalize_Energy_To_Given_Interval > 0) {
// option 2 normalize to given time interval
energy_normalize_factor = energy_normalize_factor / sqrt(Fs*Normalize_Energy_To_Given_Interval);
}
else if (Normalize_Sigma_Type == 3) {
// option 3 energy not normalized, results identical to option 1 in case of pure tone
energy_normalize_factor = 1;
}
}
void calculateDCANDNornalizationFactorPureTone(
int Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
double Normalize_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
double Remove_Generated_DC,//if 1 removes the 0 frequency value from noise
int start_dc_expected_value_calculation,
int end_dc_expected_value_calculation,
int start_dc_normalized_value_calculation,
int end_dc_normalized_value_calculation,
float Fs,
double &dc,
double &energy_normalize_factor
) {
dc = 0.0;
// default, normalizing sigma to 1
energy_normalize_factor = sqrt(static_cast<double>(end_dc_normalized_value_calculation - start_dc_normalized_value_calculation));
calculateDCANDNornalizationPostProcess(
Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
Normalize_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
start_dc_expected_value_calculation,
end_dc_expected_value_calculation,
start_dc_normalized_value_calculation,
end_dc_normalized_value_calculation,
Fs,
dc,
energy_normalize_factor
);
//printf("dc=%.4e,energy_normalize_factor=%.4e\n", dc, energy_normalize_factor);
}
void calculateDCANDNornalizationFactor(
float *Source,
int Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
double Normalize_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
double Remove_Generated_DC,//if 1 removes the 0 frequency value from noise
int start_dc_expected_value_calculation,
int end_dc_expected_value_calculation,
int start_dc_normalized_value_calculation,
int end_dc_normalized_value_calculation,
float Fs,
double &dc,
double &energy_normalize_factor
) {
dc = Source[start_dc_expected_value_calculation];
for (int idx = start_dc_expected_value_calculation + 1; idx < end_dc_expected_value_calculation; idx++) {
dc = dc + Source[idx];
}
dc = Remove_Generated_DC * dc / static_cast<double>(end_dc_expected_value_calculation - start_dc_expected_value_calculation);
energy_normalize_factor = Source[start_dc_normalized_value_calculation] * Source[start_dc_normalized_value_calculation];
for (int idx = start_dc_normalized_value_calculation + 1; idx < end_dc_normalized_value_calculation; idx++) {
energy_normalize_factor = energy_normalize_factor + ((Source[idx] - dc) * (Source[idx] - dc));
}
energy_normalize_factor = sqrt(energy_normalize_factor);
calculateDCANDNornalizationPostProcess(
Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
Normalize_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
start_dc_expected_value_calculation,
end_dc_expected_value_calculation,
start_dc_normalized_value_calculation,
end_dc_normalized_value_calculation,
Fs,
dc,
energy_normalize_factor
);
//printf("dc=%.4e,energy_normalize_factor=%.4e\n", dc, energy_normalize_factor);
}
extern "C" void setupToleranceProfile(
device_jnd_params *profiles,
bool is_first_time_for_parameters_set, // for fixing arguments just one time
float Max_M1_SP_Error_Parameter,
float Max_Tolerance_Parameter,
int Relative_Error_Parameters,
float M1_SP_Fix_Factor,
float Tolerance_Fix_Factor,
int Blocks_Per_Interval,
int from_profile_index,
int calculatedProfiles
) noexcept(false) {
float host_model_max_m1_sp_tolerance[MAX_NUMBER_OF_BLOCKS];
float host_max_throw_tolerance[MAX_NUMBER_OF_BLOCKS];
for (int i = 0; i < calculatedProfiles; i++) {
int globalProfile = from_profile_index + i;
float m1_sp_fix_factor = 1.0f;
float throw_tolerance_factor = 1.0f;
if (Relative_Error_Parameters > 0) {
m1_sp_fix_factor = powf(10.0f, M1_SP_Fix_Factor*static_cast<float>(profiles[globalProfile].dBSPLSignal));
throw_tolerance_factor = powf(10.0f, Tolerance_Fix_Factor*static_cast<float>(profiles[globalProfile].dBSPLSignal));
}
for (int j = 0; j < Blocks_Per_Interval; j++) {
int model_index = Blocks_Per_Interval*i + j;
host_model_max_m1_sp_tolerance[model_index] = Max_M1_SP_Error_Parameter*m1_sp_fix_factor;
host_max_throw_tolerance[model_index] = Max_Tolerance_Parameter*throw_tolerance_factor;
}
}
gpuAssert(cudaMemcpy(cudaHolderGeneratedData.generated_model_max_m1_sp_tolerance, host_model_max_m1_sp_tolerance, MAX_NUMBER_OF_BLOCKS*sizeof(float), cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cudaHolderGeneratedData.generated_model_throw_tolerance, host_max_throw_tolerance, MAX_NUMBER_OF_BLOCKS*sizeof(float), cudaMemcpyHostToDevice));
}
extern "C" void uploadProfiles(
device_jnd_params *profiles,
int numOfProfiles,
bool profilesLoaded // upload profiles array only if false
) noexcept(false) {
if (!profilesLoaded) {
gpuAssert(cudaMemcpy(cudaJNDHolder.cuda_jnd_params, profiles, numOfProfiles*sizeof(device_jnd_params), cudaMemcpyHostToDevice));
}
}
extern "C" void generateInputFromProfile(
device_jnd_params *profiles,
float *WN, // white noise array, single interval length white noise array, expected power level (linear of 1)
int wn_length, // max length of white noise
float *Signal, //signal array, single interval length white noise array, expected power level (linear of 1)
int signal_length, // max length of signal noise
int signal_mode, // 0 - is for normal frequencies, 1 - is for signal array
int Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
int Normalize_Sigma_Type_Signal,
double Normalize_Noise_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
double Remove_Generated_Noise_DC,//if 1 removes the 0 frequency value from noise
int start_dc_expected_value_calculation,
int end_dc_expected_value_calculation,
int start_dc_normalized_value_calculation,
int end_dc_normalized_value_calculation,
int numOfProfiles,
bool profilesLoaded, // upload profiles array only if false
int from_profile_index,
int calculatedProfiles,
int overlapNodes, // for start offset
int IntervalLength, //number of nodes on actual input
int JND_Interval_Head,
int JND_Interval_Actual_Length,
float Fs, // sample frequency
int Show_Generated_Input, // show generated input from file
float *target_input, // if Show_Generated_Input is true it will copy here the result with nodes fix per position
bool Show_Generated_Configuration, // % for debugging shw profiles of created input
bool is_first_time_for_parameters_set, // for fixing arguments just one time
float Max_M1_SP_Error_Parameter,
float Max_Tolerance_Parameter,
int Relative_Error_Parameters,
float M1_SP_Fix_Factor,
float Tolerance_Fix_Factor,
int Blocks_Per_Interval,
int Show_Run_Time,
float *fir_transfer_function,
int fir_transfer_function_length,
float *iir_transfer_function,
int iir_transfer_function_length,
Log &outer_log
) noexcept(false) {
cudaEvent_t start, stop;
cudaEventsCreate(start, stop, Show_Run_Time & 32);
cudaEventsStartTimer(start, stop, Show_Run_Time & 32);
if (!profilesLoaded) {
gpuAssert(cudaMemcpy(cudaJNDHolder.cuda_jnd_params, profiles, numOfProfiles*sizeof(device_jnd_params), cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(cudaSignalHolder.cuda_WN, WN, wn_length*sizeof(float), cudaMemcpyHostToDevice));
if (signal_mode) {
gpuAssert(cudaMemcpy(cudaSignalHolder.cuda_Signal, Signal, signal_length*sizeof(float), cudaMemcpyHostToDevice));
}
}
if (end_dc_normalized_value_calculation == 0) end_dc_normalized_value_calculation = IntervalLength;
if (end_dc_expected_value_calculation == 0) end_dc_expected_value_calculation = IntervalLength;
if (is_first_time_for_parameters_set) {
calculateDCANDNornalizationFactor(
WN,
Normalize_Sigma_Type, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
Normalize_Noise_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
Remove_Generated_Noise_DC,//if 1 removes the 0 frequency value from noise
start_dc_expected_value_calculation,
end_dc_expected_value_calculation,
start_dc_normalized_value_calculation,
end_dc_normalized_value_calculation,
Fs,
wn_dc,
wn_energy_normalize_factor
);
if (signal_mode) {
calculateDCANDNornalizationFactor(
Signal,
Normalize_Sigma_Type_Signal, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
Normalize_Noise_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
Remove_Generated_Noise_DC,//if 1 removes the 0 frequency value from noise
start_dc_expected_value_calculation,
end_dc_expected_value_calculation,
start_dc_normalized_value_calculation,
end_dc_normalized_value_calculation,
Fs,
signal_dc,
signal_energy_normalize_factor
);
}
else {
// normalize pure tones to 1
calculateDCANDNornalizationFactorPureTone(
Normalize_Sigma_Type_Signal, // 0 - normalize sigma to 1, 1 - normalize sigma summary to 1, 2 - normalize sigma summary to given time interval at Normalize_Noise_Energy_To_Given_Interval
Normalize_Noise_Energy_To_Given_Interval,// if noise generated normalize energy to given signal
Remove_Generated_Noise_DC,//if 1 removes the 0 frequency value from noise
start_dc_expected_value_calculation,
end_dc_expected_value_calculation,
start_dc_normalized_value_calculation,
end_dc_normalized_value_calculation,
Fs,
signal_dc,
signal_energy_normalize_factor
);
}
}
int threadsPerBlock = IntervalLength / Blocks_Per_Interval;
int blocksOnxDim = Blocks_Per_Interval;
if (threadsPerBlock > 1024) {
int threads_number = static_cast<int>(ceilf(sqrtf(float(threadsPerBlock))));
while (gcd(threads_number, threadsPerBlock) != threads_number) threads_number++;
if (threads_number > 1024) {
threads_number = threadsPerBlock / threads_number;
}
blocksOnxDim = Blocks_Per_Interval*threadsPerBlock / threads_number;
threadsPerBlock = threads_number;
}
setupToleranceProfile(
profiles,
is_first_time_for_parameters_set, // for fixing arguments just one time
Max_M1_SP_Error_Parameter,
Max_Tolerance_Parameter,
Relative_Error_Parameters,
M1_SP_Fix_Factor,
Tolerance_Fix_Factor,
Blocks_Per_Interval,
from_profile_index,
calculatedProfiles
);
dim3 filtersGrid(blocksOnxDim, calculatedProfiles, 1);
dim3 filtersThreads(threadsPerBlock, 1, 1);
if (Show_Generated_Configuration) {
std::cout << std::boolalpha << "Show Generated Input: " << Show_Generated_Input << std::endl;
std::cout << "filtersGrid" << showDIM3(filtersGrid) << std::endl;
std::cout << "filtersThreads" << showDIM3(filtersThreads) << std::endl;
std::cout << "IntervalLength = " << IntervalLength << std::endl;
std::cout << "overlapNodes = " << overlapNodes << std::endl;
std::cout << "calculatedProfiles = " << calculatedProfiles << std::endl;
std::cout << "from_profile_index = " << from_profile_index << std::endl;
std::cout << "Fs = " << Fs << std::endl;
std::cout << "Normalize_Sigma_Type = " << Normalize_Sigma_Type << std::endl;
std::cout << "WN(DC) = " << wn_dc << std::endl;
std::cout << "WN(Normal_Factor) = " << wn_energy_normalize_factor << std::endl;
}
CudaGenerateInputFromProfile KERNEL_ARGS2(filtersGrid, filtersThreads)(
cudaJNDHolder.cuda_jnd_params,
cudaSignalHolder.cuda_WN, // white noise array
cudaSignalHolder.cuda_Signal,
cudaHolderData.cuda_input_samples,
wn_dc,
wn_energy_normalize_factor, // factor to normalize energy interval
signal_mode,
signal_dc,
signal_energy_normalize_factor,
from_profile_index,
calculatedProfiles,
overlapNodes, // for start offset
IntervalLength, //number of nodes on actual input
JND_Interval_Head,
Fs
);
if (fir_transfer_function_length > 1 || (fir_transfer_function_length > 0 && fir_transfer_function[0] != 1) || iir_transfer_function_length > 1 || (iir_transfer_function_length> 0 && iir_transfer_function[0] != 1) ) {
// processing hear the transfer function
float *cuda_transfer_function;
float *cuda_input_samples_auxivulary;
int processed_input_length = static_cast<int>(filtersGrid.x*filtersGrid.y*filtersGrid.z*filtersThreads.x*filtersThreads.y*filtersThreads.z);
gpuAssert(cudaMalloc((void **)&cuda_transfer_function, max(fir_transfer_function_length,iir_transfer_function_length)*sizeof(float)));
gpuAssert(cudaMemcpy(cuda_transfer_function, fir_transfer_function, fir_transfer_function_length*sizeof(float), cudaMemcpyHostToDevice));
gpuAssert(cudaMalloc((void **)&cuda_input_samples_auxivulary, processed_input_length*sizeof(float)));
CudaProcessSignalTroughHearingAID KERNEL_ARGS2(filtersGrid, filtersThreads)(
cudaJNDHolder.cuda_jnd_params,
cudaHolderData.cuda_input_samples, // input samples to load
cuda_input_samples_auxivulary, // for temporary saving
from_profile_index,
overlapNodes, // for start offset
IntervalLength, //number of nodes on actual input
JND_Interval_Head,
Fs,
cuda_transfer_function,
fir_transfer_function_length
);
if (iir_transfer_function_length > 1 || (iir_transfer_function_length > 0 && iir_transfer_function[0] != 1) ) {
// reverse use of buffer/ input to avoid copying completely
int threadsIIR = calculatedProfiles;
int blocksIIR = 1;
gpuAssert(cudaMemcpy(cuda_transfer_function, iir_transfer_function, iir_transfer_function_length*sizeof(float), cudaMemcpyHostToDevice));
int threads_number_iir = static_cast<int>(ceilf(sqrtf(float(threadsIIR))));
while (gcd(threads_number_iir, threadsIIR) != threads_number_iir) threads_number_iir++;
blocksIIR = threadsIIR / threads_number_iir;
threadsIIR = threads_number_iir;
dim3 filtersGridIIR(blocksIIR, 1, 1);
dim3 filtersThreadsIIR(threadsIIR, 1, 1);
CudaProcessSignalTroughHearingAIDIIR KERNEL_ARGS2(filtersGridIIR, filtersThreadsIIR)(
cudaJNDHolder.cuda_jnd_params,
cuda_input_samples_auxivulary, // input samples to load
cudaHolderData.cuda_input_samples, // for temporary saving
from_profile_index,
overlapNodes, // for start offset
IntervalLength, //number of nodes on actual input
JND_Interval_Head,
Fs,
cuda_transfer_function,
iir_transfer_function_length
);
} else {
gpuAssert(cudaMemcpy(cudaHolderData.cuda_input_samples, cuda_input_samples_auxivulary, processed_input_length*sizeof(float), cudaMemcpyDeviceToDevice));
}
gpuAssert(cudaFree(cuda_transfer_function));
gpuAssert(cudaFree(cuda_input_samples_auxivulary));
}
if (Show_Generated_Input & 1 > 0) {
gpuAssert(cudaMemcpy(target_input, cudaHolderData.cuda_input_samples, calculatedProfiles*IntervalLength*sizeof(float), cudaMemcpyDeviceToHost));
}
outer_log.timeAtFlag(43, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 32, "Input Generation"), Show_Run_Time & 32);
}
// fixed lambda values for output
template<typename T> __global__ void CUDAFIXJND_Lambda(
volatile T *cuda_Lambda,
volatile T *cuda_Buffer,
int cuda_buffer_update) {
int ind = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*blockIdx.y;
float fix_spikes = CUDA_Nerves_Clusters[blockIdx.y];
if (cuda_buffer_update) {
cuda_Buffer[ind] = T(fmaxf(fix_spikes, float(cuda_Lambda[ind])));
}
cuda_Lambda[ind] = T(fmaxf(0.0f, float(cuda_Lambda[ind]) - fix_spikes));
__syncthreads();
}
template __global__ void CUDAFIXJND_Lambda<float>(volatile float *cuda_Lambda,
volatile float *cuda_Buffer,
int cuda_buffer_update);
template __global__ void CUDAFIXJND_Lambda<double>(volatile double *cuda_Lambda,
volatile double *cuda_Buffer,
int cuda_buffer_update);
template<class T> extern void updateCUDALambdaArray(T *lambda_array,T* cuda_buffer, size_t calc_time_nodes, int sections,int Show_Run_Time,int Show_Device_Data,int cuda_buffer_update,Log &outer_log) {
dim3 grid(calc_time_nodes, LAMBDA_COUNT, 1);
dim3 thrds(sections, 1, 1);
if (Show_Device_Data & 16) {
std::cout << "CUDAFIXJND_Lambda<<<" << showDIM3(grid) << "," << showDIM3(thrds) << " >>>(lambda_array);" << std::endl;
}
cudaEvent_t start, stop;
cudaEventsCreate(start, stop, Show_Run_Time & 32);
cudaEventsStartTimer(start, stop, Show_Run_Time & 32);
CUDAFIXJND_Lambda<T> << <grid, thrds >> >(lambda_array, cuda_buffer, cuda_buffer_update);
outer_log.timeAtFlag(44, cudaEventsStopQueryTimer(start, stop, Show_Run_Time & 32, "Fix Lambda"), Show_Run_Time & 32);
}
|
890fc4528885a2c53f4da97f0f7bcdc17309064f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <limits>
#include "internal.h"
#include <pcl/gpu/utils/safe_call.hpp>
#include <pcl/gpu/utils/texture_binder.hpp>
#include "npp.h"
#include <stdio.h>
namespace pcl
{
namespace device
{
texture<uchar4, hipTextureType1D, hipReadModeElementType> cmapTex;
__global__ void colorKernel(const PtrStepSz<unsigned char> labels, PtrStep<uchar4> output)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < labels.cols && y < labels.rows)
{
int l = labels.ptr(y)[x];
output.ptr(y)[x] = tex1Dfetch(cmapTex, l);
}
}
__global__ void mixedColorKernel(const PtrStepSz<unsigned char> labels, PtrStepSz<uchar4> rgba, PtrStep<uchar4> output)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < labels.cols && y < labels.rows)
{
uchar4 c = rgba.ptr(y)[x];
int l = labels.ptr(y)[x];
if (l != 8) // RHip but should be background
c = tex1Dfetch(cmapTex, l);
output.ptr(y)[x] = c;
}
}
}
}
void pcl::device::colorLMap(const Labels& labels, const DeviceArray<uchar4>& map, Image& rgba)
{
cmapTex.addressMode[0] = hipAddressModeClamp;
TextureBinder binder(map, cmapTex);
dim3 block(32, 8);
dim3 grid( divUp(labels.cols(), block.x), divUp(labels.rows(), block.y) );
hipLaunchKernelGGL(( colorKernel), dim3(grid), dim3(block) , 0, 0, labels, rgba );
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
void pcl::device::mixedColorMap(const Labels& labels, const DeviceArray<uchar4>& map, const Image& rgba, Image& output)
{
cmapTex.addressMode[0] = hipAddressModeClamp;
TextureBinder binder(map, cmapTex);
dim3 block(32, 8);
dim3 grid(divUp(labels.cols(), block.x), divUp(labels.rows(), block.y));
hipLaunchKernelGGL(( mixedColorKernel), dim3(grid), dim3(block), 0, 0, labels, rgba, output);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
/// TODO implement getError string for NPP and move this to the same place with cudaSafeCall
#if defined(__GNUC__)
#define nppSafeCall(expr) pcl::gpu::___nppSafeCall(expr, __FILE__, __LINE__, __func__)
#else /* defined(__HIPCC__) || defined(__MSVC__) */
#define nppSafeCall(expr) pcl::gpu::___nppSafeCall(expr, __FILE__, __LINE__)
#endif
namespace pcl
{
namespace gpu
{
void ___nppSafeCall(int err_code, const char *file, const int line, const char *func = "")
{
if (err_code < 0)
{
char buf[4096];
sprintf(buf, "NppErrorCode = %d", err_code);
error(buf, file, line, func);
}
}
}
}
void pcl::device::setZero(Mask& mask)
{
NppiSize sz;
sz.width = mask.cols();
sz.height = mask.rows();
nppSafeCall( nppiSet_8u_C1R( 0, mask, (int)mask.step(), sz) );
}
void pcl::device::Dilatation::prepareRect5x5Kernel(DeviceArray<unsigned char>& kernel)
{
if (kernel.size() == KSIZE_X * KSIZE_Y)
return;
std::vector<unsigned char> host(KSIZE_X * KSIZE_Y, (unsigned char)255);
kernel.upload(host);
}
void pcl::device::Dilatation::invoke(const Mask& src, const Kernel& kernel, Mask& dst)
{
dst.create(src.rows(), src.cols());
setZero(dst);
NppiSize sz;
sz.width = src.cols() - KSIZE_X;
sz.height = src.rows() - KSIZE_Y;
NppiSize ksz;
ksz.width = KSIZE_X;
ksz.height = KSIZE_Y;
NppiPoint anchor;
anchor.x = ANCH_X;
anchor.y = ANCH_Y;
// This one uses Nvidia performance primitives
nppSafeCall( nppiDilate_8u_C1R(src.ptr(ANCH_Y) + ANCH_X, (int)src.step(),
dst.ptr(ANCH_Y) + ANCH_X, (int)dst.step(), sz, kernel, ksz, anchor) );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
__global__ void fgDepthKernel(const PtrStepSz<unsigned short> depth1, const PtrStep<unsigned char> inv_mask, PtrStep<unsigned short> depth2)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth1.cols && y < depth1.rows)
{
unsigned short d = depth1.ptr(y)[x];
depth2.ptr(y)[x] = inv_mask.ptr(y)[x] ? d : std::numeric_limits<unsigned short>::max();
}
}
}
}
void pcl::device::prepareForeGroundDepth(const Depth& depth1, Mask& inverse_mask, Depth& depth2)
{
int cols = depth1.cols();
int rows = depth1.rows();
depth2.create(rows, cols);
dim3 block(32, 8);
dim3 grid( divUp(cols, block.x), divUp(rows, block.y) );
hipLaunchKernelGGL(( fgDepthKernel), dim3(grid), dim3(block) , 0, 0, depth1, inverse_mask, depth2 );
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
/// compute hue functionality
namespace pcl
{
namespace device
{
__device__ __host__ __forceinline__ float computeHueFunc (int rgba)
{
int r = (rgba ) & 0xFF;
int g = (rgba >> 8) & 0xFF;
int b = (rgba >> 16) & 0xFF;
int v = max (r, max (g, b));
float h;
float div_inv = 1.f / (v - min (r, min (g, b)) );
if (v == 0)
return -1;
if (r == v)
h = ( (g - b)) * div_inv;
else if (g == v)
h = (2 + (b - r)) * div_inv;
else
h = (4 + (r - g)) * div_inv;
h *= 60;
if (h < 0)
h += 360;
return h;
}
}
}
float pcl::device::computeHue(int rgba)
{
return computeHueFunc(rgba);
}
namespace pcl
{
namespace device
{
__global__ void computeHueKernel(const PtrStepSz<int> rgba, const PtrStep<unsigned short> depth, PtrStep<float> hue)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < rgba.cols && y < rgba.rows)
{
constexpr float qnan = std::numeric_limits<float>::quiet_NaN();
unsigned short d = depth.ptr(y)[x];
hue.ptr(y)[x] = (d == 0) ? qnan : computeHueFunc(rgba.ptr(y)[x]);
}
}
}
}
void pcl::device::computeHueWithNans(const Image& rgba, const Depth& depth, HueImage& hue)
{
hue.create(rgba.rows(), rgba.cols());
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(rgba.cols(), block.x);
grid.y = divUp(rgba.rows(), block.y);
hipLaunchKernelGGL(( computeHueKernel), dim3(grid), dim3(block), 0, 0, rgba, depth, hue);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
namespace pcl
{
namespace device
{
__global__ void reprojectDepthKenrel(const PtrStepSz<unsigned short> depth, const Intr intr, PtrStep<float4> cloud)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth.cols && y < depth.rows)
{
constexpr float qnan = std::numeric_limits<float>::quiet_NaN();
float4 p = make_float4(qnan, qnan, qnan, qnan);
int d = depth.ptr(y)[x];
float z = d * 0.001f; // mm -> meters
p.x = z * (x - intr.cx) / intr.fx;
p.y = z * (y - intr.cy) / intr.fy;
p.z = z;
cloud.ptr(y)[x] = p;
}
}
}
}
void pcl::device::computeCloud(const Depth& depth, const Intr& intr, Cloud& cloud)
{
cloud.create(depth.rows(), depth.cols());
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(depth.cols(), block.x);
grid.y = divUp(depth.rows(), block.y);
hipLaunchKernelGGL(( reprojectDepthKenrel), dim3(grid), dim3(block), 0, 0, depth, intr, cloud);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
| 890fc4528885a2c53f4da97f0f7bcdc17309064f.cu | #include <limits>
#include "internal.h"
#include <pcl/gpu/utils/safe_call.hpp>
#include <pcl/gpu/utils/texture_binder.hpp>
#include "npp.h"
#include <stdio.h>
namespace pcl
{
namespace device
{
texture<uchar4, cudaTextureType1D, cudaReadModeElementType> cmapTex;
__global__ void colorKernel(const PtrStepSz<unsigned char> labels, PtrStep<uchar4> output)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < labels.cols && y < labels.rows)
{
int l = labels.ptr(y)[x];
output.ptr(y)[x] = tex1Dfetch(cmapTex, l);
}
}
__global__ void mixedColorKernel(const PtrStepSz<unsigned char> labels, PtrStepSz<uchar4> rgba, PtrStep<uchar4> output)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < labels.cols && y < labels.rows)
{
uchar4 c = rgba.ptr(y)[x];
int l = labels.ptr(y)[x];
if (l != 8) // RHip but should be background
c = tex1Dfetch(cmapTex, l);
output.ptr(y)[x] = c;
}
}
}
}
void pcl::device::colorLMap(const Labels& labels, const DeviceArray<uchar4>& map, Image& rgba)
{
cmapTex.addressMode[0] = cudaAddressModeClamp;
TextureBinder binder(map, cmapTex);
dim3 block(32, 8);
dim3 grid( divUp(labels.cols(), block.x), divUp(labels.rows(), block.y) );
colorKernel<<< grid, block >>>( labels, rgba );
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
void pcl::device::mixedColorMap(const Labels& labels, const DeviceArray<uchar4>& map, const Image& rgba, Image& output)
{
cmapTex.addressMode[0] = cudaAddressModeClamp;
TextureBinder binder(map, cmapTex);
dim3 block(32, 8);
dim3 grid(divUp(labels.cols(), block.x), divUp(labels.rows(), block.y));
mixedColorKernel<<<grid, block>>>(labels, rgba, output);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
/// TODO implement getError string for NPP and move this to the same place with cudaSafeCall
#if defined(__GNUC__)
#define nppSafeCall(expr) pcl::gpu::___nppSafeCall(expr, __FILE__, __LINE__, __func__)
#else /* defined(__CUDACC__) || defined(__MSVC__) */
#define nppSafeCall(expr) pcl::gpu::___nppSafeCall(expr, __FILE__, __LINE__)
#endif
namespace pcl
{
namespace gpu
{
void ___nppSafeCall(int err_code, const char *file, const int line, const char *func = "")
{
if (err_code < 0)
{
char buf[4096];
sprintf(buf, "NppErrorCode = %d", err_code);
error(buf, file, line, func);
}
}
}
}
void pcl::device::setZero(Mask& mask)
{
NppiSize sz;
sz.width = mask.cols();
sz.height = mask.rows();
nppSafeCall( nppiSet_8u_C1R( 0, mask, (int)mask.step(), sz) );
}
void pcl::device::Dilatation::prepareRect5x5Kernel(DeviceArray<unsigned char>& kernel)
{
if (kernel.size() == KSIZE_X * KSIZE_Y)
return;
std::vector<unsigned char> host(KSIZE_X * KSIZE_Y, (unsigned char)255);
kernel.upload(host);
}
void pcl::device::Dilatation::invoke(const Mask& src, const Kernel& kernel, Mask& dst)
{
dst.create(src.rows(), src.cols());
setZero(dst);
NppiSize sz;
sz.width = src.cols() - KSIZE_X;
sz.height = src.rows() - KSIZE_Y;
NppiSize ksz;
ksz.width = KSIZE_X;
ksz.height = KSIZE_Y;
NppiPoint anchor;
anchor.x = ANCH_X;
anchor.y = ANCH_Y;
// This one uses Nvidia performance primitives
nppSafeCall( nppiDilate_8u_C1R(src.ptr(ANCH_Y) + ANCH_X, (int)src.step(),
dst.ptr(ANCH_Y) + ANCH_X, (int)dst.step(), sz, kernel, ksz, anchor) );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
__global__ void fgDepthKernel(const PtrStepSz<unsigned short> depth1, const PtrStep<unsigned char> inv_mask, PtrStep<unsigned short> depth2)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth1.cols && y < depth1.rows)
{
unsigned short d = depth1.ptr(y)[x];
depth2.ptr(y)[x] = inv_mask.ptr(y)[x] ? d : std::numeric_limits<unsigned short>::max();
}
}
}
}
void pcl::device::prepareForeGroundDepth(const Depth& depth1, Mask& inverse_mask, Depth& depth2)
{
int cols = depth1.cols();
int rows = depth1.rows();
depth2.create(rows, cols);
dim3 block(32, 8);
dim3 grid( divUp(cols, block.x), divUp(rows, block.y) );
fgDepthKernel<<< grid, block >>>( depth1, inverse_mask, depth2 );
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
/// compute hue functionality
namespace pcl
{
namespace device
{
__device__ __host__ __forceinline__ float computeHueFunc (int rgba)
{
int r = (rgba ) & 0xFF;
int g = (rgba >> 8) & 0xFF;
int b = (rgba >> 16) & 0xFF;
int v = max (r, max (g, b));
float h;
float div_inv = 1.f / (v - min (r, min (g, b)) );
if (v == 0)
return -1;
if (r == v)
h = ( (g - b)) * div_inv;
else if (g == v)
h = (2 + (b - r)) * div_inv;
else
h = (4 + (r - g)) * div_inv;
h *= 60;
if (h < 0)
h += 360;
return h;
}
}
}
float pcl::device::computeHue(int rgba)
{
return computeHueFunc(rgba);
}
namespace pcl
{
namespace device
{
__global__ void computeHueKernel(const PtrStepSz<int> rgba, const PtrStep<unsigned short> depth, PtrStep<float> hue)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < rgba.cols && y < rgba.rows)
{
constexpr float qnan = std::numeric_limits<float>::quiet_NaN();
unsigned short d = depth.ptr(y)[x];
hue.ptr(y)[x] = (d == 0) ? qnan : computeHueFunc(rgba.ptr(y)[x]);
}
}
}
}
void pcl::device::computeHueWithNans(const Image& rgba, const Depth& depth, HueImage& hue)
{
hue.create(rgba.rows(), rgba.cols());
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(rgba.cols(), block.x);
grid.y = divUp(rgba.rows(), block.y);
computeHueKernel<<<grid, block>>>(rgba, depth, hue);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
namespace pcl
{
namespace device
{
__global__ void reprojectDepthKenrel(const PtrStepSz<unsigned short> depth, const Intr intr, PtrStep<float4> cloud)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth.cols && y < depth.rows)
{
constexpr float qnan = std::numeric_limits<float>::quiet_NaN();
float4 p = make_float4(qnan, qnan, qnan, qnan);
int d = depth.ptr(y)[x];
float z = d * 0.001f; // mm -> meters
p.x = z * (x - intr.cx) / intr.fx;
p.y = z * (y - intr.cy) / intr.fy;
p.z = z;
cloud.ptr(y)[x] = p;
}
}
}
}
void pcl::device::computeCloud(const Depth& depth, const Intr& intr, Cloud& cloud)
{
cloud.create(depth.rows(), depth.cols());
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(depth.cols(), block.x);
grid.y = divUp(depth.rows(), block.y);
reprojectDepthKenrel<<<grid, block>>>(depth, intr, cloud);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
|
6358940f8507e13dce12d937d03a7aafa9dc646f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "SPkernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int k = 1;
int m = 2;
int n = XSIZE*YSIZE;
float *searchPoints = NULL;
hipMalloc(&searchPoints, XSIZE*YSIZE);
float *referencePoints = NULL;
hipMalloc(&referencePoints, XSIZE*YSIZE);
int *indices = NULL;
hipMalloc(&indices, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
SPkernel), dim3(gridBlock),dim3(threadBlock), 0, 0, k,m,n,searchPoints,referencePoints,indices);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
SPkernel), dim3(gridBlock),dim3(threadBlock), 0, 0, k,m,n,searchPoints,referencePoints,indices);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
SPkernel), dim3(gridBlock),dim3(threadBlock), 0, 0, k,m,n,searchPoints,referencePoints,indices);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6358940f8507e13dce12d937d03a7aafa9dc646f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "SPkernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int k = 1;
int m = 2;
int n = XSIZE*YSIZE;
float *searchPoints = NULL;
cudaMalloc(&searchPoints, XSIZE*YSIZE);
float *referencePoints = NULL;
cudaMalloc(&referencePoints, XSIZE*YSIZE);
int *indices = NULL;
cudaMalloc(&indices, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
SPkernel<<<gridBlock,threadBlock>>>(k,m,n,searchPoints,referencePoints,indices);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
SPkernel<<<gridBlock,threadBlock>>>(k,m,n,searchPoints,referencePoints,indices);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
SPkernel<<<gridBlock,threadBlock>>>(k,m,n,searchPoints,referencePoints,indices);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2f7252dc51b3d70b65001ffd0d8f57143624e829.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2017 by Contributors
* Copyright (c) 2017 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file psroi_pooling.cu
* \brief psroi pooling operator
* \author Yi Li, Tairui Chen, Guodong Zhang, Jifeng Dai
*/
#include "./psroi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "../../common/cuda_utils.h"
#include "../mxnet_op.h"
#define PSROIPOOLING_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
template <typename DType>
__global__ void PSROIPoolForwardKernel(
const int count,
const DType* bottom_data,
const DType spatial_scale,f
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const DType* bottom_rois,
const int output_dim,
const int group_size,
DType* top_data,
DType* mapping_channel) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
int hstart = floor(static_cast<DType>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<DType>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0),width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = floor(static_cast<DType>(pw)* group_size / pooled_width);
int gh = floor(static_cast<DType>(ph)* group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
int c = (ctop*group_size + gh)*group_size + gw;
const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
DType out_sum = 0;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h*width + w;
out_sum += offset_bottom_data[bottom_index];
}
}
DType bin_area = (hend - hstart)*(wend - wstart);
top_data[index] = is_empty? (DType)0. : out_sum/bin_area;
mapping_channel[index] = c;
}
}
template<typename DType>
inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
// LOG(INFO) << "PSROIPoolForward";
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
DType *top_data = out.dptr_;
DType *mapping_channel_ptr = mapping_channel.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
PSROIPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr);
PSROIPOOLING_CUDA_CHECK(hipPeekAtLastError());
}
template <typename DType>
__global__ void PSROIPoolBackwardAccKernel(
const int count,
const DType* top_diff,
const DType* mapping_channel,
const int num_rois,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
DType* bottom_diff,
const DType* bottom_rois) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
int hstart = floor(static_cast<DType>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<DType>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
DType* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
DType bin_area = (hend - hstart)*(wend - wstart);
DType diff_val = is_empty ? (DType)0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h*width + w;
// mxnet_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
atomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
template<typename DType>
inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_) {
// LOG(INFO) << "PSROIPoolBackward";
const DType *top_diff = out_grad.dptr_;
const DType *bottom_rois = bbox.dptr_;
DType *bottom_diff = in_grad.dptr_;
DType *mapping_channel_ptr = mapping_channel.dptr_;
const int count = out_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
PSROIPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, top_diff, mapping_channel_ptr, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, output_dim_, bottom_diff, bottom_rois);
PSROIPOOLING_CUDA_CHECK(hipPeekAtLastError());
}
} // namespace cuda
template<typename DType>
inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
cuda::PSROIPoolForward(out, data, bbox, mapping_channel, spatial_scale, output_dim_, group_size_);
}
template<typename DType>
inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_) {
cuda::PSROIPoolBackwardAcc(in_grad, out_grad, bbox, mapping_channel, spatial_scale, output_dim_);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(PSROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new PSROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| 2f7252dc51b3d70b65001ffd0d8f57143624e829.cu | /*!
* Copyright (c) 2017 by Contributors
* Copyright (c) 2017 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file psroi_pooling.cu
* \brief psroi pooling operator
* \author Yi Li, Tairui Chen, Guodong Zhang, Jifeng Dai
*/
#include "./psroi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "../../common/cuda_utils.h"
#include "../mxnet_op.h"
#define PSROIPOOLING_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
template <typename DType>
__global__ void PSROIPoolForwardKernel(
const int count,
const DType* bottom_data,
const DType spatial_scale,f
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const DType* bottom_rois,
const int output_dim,
const int group_size,
DType* top_data,
DType* mapping_channel) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
int hstart = floor(static_cast<DType>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<DType>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0),width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = floor(static_cast<DType>(pw)* group_size / pooled_width);
int gh = floor(static_cast<DType>(ph)* group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
int c = (ctop*group_size + gh)*group_size + gw;
const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
DType out_sum = 0;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h*width + w;
out_sum += offset_bottom_data[bottom_index];
}
}
DType bin_area = (hend - hstart)*(wend - wstart);
top_data[index] = is_empty? (DType)0. : out_sum/bin_area;
mapping_channel[index] = c;
}
}
template<typename DType>
inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
// LOG(INFO) << "PSROIPoolForward";
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
DType *top_data = out.dptr_;
DType *mapping_channel_ptr = mapping_channel.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
PSROIPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr);
PSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError());
}
template <typename DType>
__global__ void PSROIPoolBackwardAccKernel(
const int count,
const DType* top_diff,
const DType* mapping_channel,
const int num_rois,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
DType* bottom_diff,
const DType* bottom_rois) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
int hstart = floor(static_cast<DType>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<DType>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
DType* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
DType bin_area = (hend - hstart)*(wend - wstart);
DType diff_val = is_empty ? (DType)0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h*width + w;
// mxnet_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
atomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
template<typename DType>
inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_) {
// LOG(INFO) << "PSROIPoolBackward";
const DType *top_diff = out_grad.dptr_;
const DType *bottom_rois = bbox.dptr_;
DType *bottom_diff = in_grad.dptr_;
DType *mapping_channel_ptr = mapping_channel.dptr_;
const int count = out_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
PSROIPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, top_diff, mapping_channel_ptr, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, output_dim_, bottom_diff, bottom_rois);
PSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError());
}
} // namespace cuda
template<typename DType>
inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
cuda::PSROIPoolForward(out, data, bbox, mapping_channel, spatial_scale, output_dim_, group_size_);
}
template<typename DType>
inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_) {
cuda::PSROIPoolBackwardAcc(in_grad, out_grad, bbox, mapping_channel, spatial_scale, output_dim_);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(PSROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new PSROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
d7b17ae98f7e3ceaa7774b795f086326965d7e2c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Somatorio.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *results = NULL;
hipMalloc(&results, XSIZE*YSIZE);
long int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Somatorio), dim3(gridBlock),dim3(threadBlock), 0, 0, input,results,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Somatorio), dim3(gridBlock),dim3(threadBlock), 0, 0, input,results,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Somatorio), dim3(gridBlock),dim3(threadBlock), 0, 0, input,results,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d7b17ae98f7e3ceaa7774b795f086326965d7e2c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Somatorio.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *results = NULL;
cudaMalloc(&results, XSIZE*YSIZE);
long int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Somatorio<<<gridBlock,threadBlock>>>(input,results,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Somatorio<<<gridBlock,threadBlock>>>(input,results,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Somatorio<<<gridBlock,threadBlock>>>(input,results,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
109e4b49a6514c09cf603a68234b9feb4b84e17f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <cuml/ensemble/randomforest.hpp>
#include <random/rng.cuh>
namespace ML {
using namespace MLCommon;
struct RFInputs {
int n_rows_train;
int n_rows_test;
uint64_t seed;
int n_reps;
float pct_zero_class;
float min_expected_acc;
};
template <typename T>
class RFClassifierAccuracyTest : public ::testing::TestWithParam<RFInputs> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<RFInputs>::GetParam();
rng.reset(new Random::Rng(params.seed));
CUDA_CHECK(hipStreamCreate(&stream));
handle.reset(new cumlHandle(1));
handle->setStream(stream);
auto allocator = handle->getDeviceAllocator();
setRFParams();
X_train = (T *)allocator->allocate(params.n_rows_train * sizeof(T), stream);
y_train =
(int *)allocator->allocate(params.n_rows_train * sizeof(int), stream);
X_test = (T *)allocator->allocate(params.n_rows_test * sizeof(T), stream);
y_test =
(int *)allocator->allocate(params.n_rows_test * sizeof(int), stream);
y_pred =
(int *)allocator->allocate(params.n_rows_test * sizeof(int), stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
auto allocator = handle->getDeviceAllocator();
allocator->deallocate(X_train, params.n_rows_train * sizeof(T), stream);
allocator->deallocate(y_train, params.n_rows_train * sizeof(int), stream);
allocator->deallocate(X_test, params.n_rows_test * sizeof(T), stream);
allocator->deallocate(y_test, params.n_rows_test * sizeof(int), stream);
allocator->deallocate(y_pred, params.n_rows_test * sizeof(int), stream);
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipStreamDestroy(stream));
handle.reset();
rng.reset();
}
void runTest() {
for (int i = 0; i < params.n_reps; ++i) {
loadData(X_train, y_train, params.n_rows_train, 1);
loadData(X_test, y_test, params.n_rows_test, 1);
CUDA_CHECK(hipStreamSynchronize(stream));
auto accuracy = runTrainAndTest();
ASSERT_GT(accuracy, params.min_expected_acc) << " @repetition=" << i;
}
}
private:
void setRFParams() {
DecisionTree::DecisionTreeParams tree_params;
auto algo = SPLIT_ALGO::GLOBAL_QUANTILE;
auto sc = CRITERION::CRITERION_END;
set_tree_params(tree_params, 1, /* max_depth */
-1, /* max_leaves */
1.0, /* max_features */
16, /* n_bins */
algo, /* split_algo */
2, /* min_rows_per_node */
0.f, /* min_impurity_decrease */
false, /* bootstrap_features */
sc, /* split_criterion */
false, /* quantile_per_tree */
false /* shuffle_features */
);
set_all_rf_params(rfp, 1, /* n_trees */
true, /* bootstrap */
1.0, /* rows_sample */
-1, /* seed */
1, /* n_streams */
tree_params);
}
void loadData(T *X, int *y, int nrows, int ncols) {
rng->uniform(X, nrows * ncols, T(-1.0), T(1.0), stream);
rng->bernoulli<float, int>(y, nrows, params.pct_zero_class, stream);
}
float runTrainAndTest() {
auto *forest = new RandomForestMetaData<T, int>;
forest->trees = nullptr;
auto &h = *(handle.get());
fit(h, forest, X_train, params.n_rows_train, 1, y_train, 2, rfp);
CUDA_CHECK(hipStreamSynchronize(stream));
predict(h, forest, X_test, params.n_rows_test, 1, y_pred);
auto metrics = score(h, forest, y_test, params.n_rows_test, y_pred);
delete forest;
return metrics.accuracy;
}
RFInputs params;
RF_params rfp;
std::shared_ptr<cumlHandle> handle;
hipStream_t stream;
T *X_train, *X_test;
int *y_train, *y_test, *y_pred;
std::shared_ptr<Random::Rng> rng;
};
const std::vector<RFInputs> inputs = {
{800, 200, 12345ULL, 40, 0.5f, 0.4f}, {800, 200, 12345ULL, 40, 0.8f, 0.7f},
{800, 200, 67890ULL, 40, 0.5f, 0.4f}, {800, 200, 67890ULL, 40, 0.8f, 0.7f},
{1000, 250, 67890ULL, 40, 0.9f, 0.8f}, {1000, 250, 67890ULL, 40, 0.1f, 0.8f},
};
#define DEFINE_TEST(clz, name, testName, params) \
typedef clz name; \
TEST_P(name, Test) { runTest(); } \
INSTANTIATE_TEST_CASE_P(testName, name, ::testing::ValuesIn(params))
DEFINE_TEST(RFClassifierAccuracyTest<float>, ClsTestF, RFAccuracy, inputs);
DEFINE_TEST(RFClassifierAccuracyTest<double>, ClsTestD, RFAccuracy, inputs);
#undef DEFINE_TEST
} // end namespace ML
| 109e4b49a6514c09cf603a68234b9feb4b84e17f.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <cuml/ensemble/randomforest.hpp>
#include <random/rng.cuh>
namespace ML {
using namespace MLCommon;
struct RFInputs {
int n_rows_train;
int n_rows_test;
uint64_t seed;
int n_reps;
float pct_zero_class;
float min_expected_acc;
};
template <typename T>
class RFClassifierAccuracyTest : public ::testing::TestWithParam<RFInputs> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<RFInputs>::GetParam();
rng.reset(new Random::Rng(params.seed));
CUDA_CHECK(cudaStreamCreate(&stream));
handle.reset(new cumlHandle(1));
handle->setStream(stream);
auto allocator = handle->getDeviceAllocator();
setRFParams();
X_train = (T *)allocator->allocate(params.n_rows_train * sizeof(T), stream);
y_train =
(int *)allocator->allocate(params.n_rows_train * sizeof(int), stream);
X_test = (T *)allocator->allocate(params.n_rows_test * sizeof(T), stream);
y_test =
(int *)allocator->allocate(params.n_rows_test * sizeof(int), stream);
y_pred =
(int *)allocator->allocate(params.n_rows_test * sizeof(int), stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
auto allocator = handle->getDeviceAllocator();
allocator->deallocate(X_train, params.n_rows_train * sizeof(T), stream);
allocator->deallocate(y_train, params.n_rows_train * sizeof(int), stream);
allocator->deallocate(X_test, params.n_rows_test * sizeof(T), stream);
allocator->deallocate(y_test, params.n_rows_test * sizeof(int), stream);
allocator->deallocate(y_pred, params.n_rows_test * sizeof(int), stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
handle.reset();
rng.reset();
}
void runTest() {
for (int i = 0; i < params.n_reps; ++i) {
loadData(X_train, y_train, params.n_rows_train, 1);
loadData(X_test, y_test, params.n_rows_test, 1);
CUDA_CHECK(cudaStreamSynchronize(stream));
auto accuracy = runTrainAndTest();
ASSERT_GT(accuracy, params.min_expected_acc) << " @repetition=" << i;
}
}
private:
void setRFParams() {
DecisionTree::DecisionTreeParams tree_params;
auto algo = SPLIT_ALGO::GLOBAL_QUANTILE;
auto sc = CRITERION::CRITERION_END;
set_tree_params(tree_params, 1, /* max_depth */
-1, /* max_leaves */
1.0, /* max_features */
16, /* n_bins */
algo, /* split_algo */
2, /* min_rows_per_node */
0.f, /* min_impurity_decrease */
false, /* bootstrap_features */
sc, /* split_criterion */
false, /* quantile_per_tree */
false /* shuffle_features */
);
set_all_rf_params(rfp, 1, /* n_trees */
true, /* bootstrap */
1.0, /* rows_sample */
-1, /* seed */
1, /* n_streams */
tree_params);
}
void loadData(T *X, int *y, int nrows, int ncols) {
rng->uniform(X, nrows * ncols, T(-1.0), T(1.0), stream);
rng->bernoulli<float, int>(y, nrows, params.pct_zero_class, stream);
}
float runTrainAndTest() {
auto *forest = new RandomForestMetaData<T, int>;
forest->trees = nullptr;
auto &h = *(handle.get());
fit(h, forest, X_train, params.n_rows_train, 1, y_train, 2, rfp);
CUDA_CHECK(cudaStreamSynchronize(stream));
predict(h, forest, X_test, params.n_rows_test, 1, y_pred);
auto metrics = score(h, forest, y_test, params.n_rows_test, y_pred);
delete forest;
return metrics.accuracy;
}
RFInputs params;
RF_params rfp;
std::shared_ptr<cumlHandle> handle;
cudaStream_t stream;
T *X_train, *X_test;
int *y_train, *y_test, *y_pred;
std::shared_ptr<Random::Rng> rng;
};
const std::vector<RFInputs> inputs = {
{800, 200, 12345ULL, 40, 0.5f, 0.4f}, {800, 200, 12345ULL, 40, 0.8f, 0.7f},
{800, 200, 67890ULL, 40, 0.5f, 0.4f}, {800, 200, 67890ULL, 40, 0.8f, 0.7f},
{1000, 250, 67890ULL, 40, 0.9f, 0.8f}, {1000, 250, 67890ULL, 40, 0.1f, 0.8f},
};
#define DEFINE_TEST(clz, name, testName, params) \
typedef clz name; \
TEST_P(name, Test) { runTest(); } \
INSTANTIATE_TEST_CASE_P(testName, name, ::testing::ValuesIn(params))
DEFINE_TEST(RFClassifierAccuracyTest<float>, ClsTestF, RFAccuracy, inputs);
DEFINE_TEST(RFClassifierAccuracyTest<double>, ClsTestD, RFAccuracy, inputs);
#undef DEFINE_TEST
} // end namespace ML
|
6631785b85504282239060d49c836fed64187aa1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_floor.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_floor), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_floor), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_floor), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6631785b85504282239060d49c836fed64187aa1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_floor.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_floor<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_floor<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_floor<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a0f2ec0cbf6ca05263661a89cd81f7035820c9c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "lab3_cuda.h"
#include <iostream>
#include <cmath>
#include <malloc.h>
#include <fstream>
#include <bits/stdc++.h>
#include <hip/hip_runtime.h>
#define pb push_back
using namespace std;
#define TOLERANCE 0.001
#define JACOBI_UPDATE_TOLERANCE 0.001
#define FILENAME1 "testcase_1000_300"
#define FILENAME2 "iris_stndardized"
#define samples 150
#define features 4
#define BLOCK_SIZE 16
double **S; //Symmetric matrix (input)
double *e; //eigenvalues
double **E; //eigenvectors
int *ind;
bool *changed;
int state;
int N;
void read_file(char* filename, int num_samples, int num_features, double** A) {
ifstream ifile;
ifile.open(filename, ios::in);
double tmp;
for (int i=0; i<num_samples; i++) {
for (int j=0; j<num_features; j++){
ifile >> tmp;
A[i][j] = tmp;
}
}
ifile.close();
}
double** mat_transpose(double** A, int Am, int An) {
double **B;
B = (double**)malloc(__SIZEOF_POINTER__*An);
for (int i=0; i<An; i++)
B[i] = (double*)malloc(__SIZEOF_DOUBLE__*Am);
for (int i=0; i<Am; i++){
for (int j=0; j<An; j++){
B[j][i] = A[i][j];
}
}
return B;
}
double** mat_mul(double** A, int Am, int An,
double** B, int Bm, int Bn){
double **C;
C = (double**)malloc(__SIZEOF_POINTER__*Am);
for (int i=0; i<Am; i++)
C[i] = (double*)malloc(__SIZEOF_DOUBLE__*Bn);
for (int i=0; i<Am; i++){
for (int j=0; j<Bn; j++){
C[i][j] = 0;
for (int k=0; k<An; k++){
C[i][j] += A[i][k] * B[k][j];
}
}
}
return C;
}
// dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
// dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
int maxind(int k) {
int m = k+1;
for (int i = k+2; i < N; i++){
if (fabs(S[k][i]) > fabs(S[k][m])){
m = i;
}
}
return m;
}
void update(int k, double t) {
double ek_prev = e[k];
e[k] = ek_prev + t;
if (e[k] < 0) e[k] = 0;
if (changed[k] && (ek_prev - e[k]) < JACOBI_UPDATE_TOLERANCE) {
changed[k] = false;
state = state - 1;
}
else if ((! changed[k]) && (ek_prev - e[k]) > JACOBI_UPDATE_TOLERANCE) {
changed[k] = true;
state = state + 1;
}
}
void rotate(int k, int l, int i, int j, double c, double s,
bool eigenvectors){
double** mat1;
double** mat2;
double** mat3;
mat1 = (double**)malloc(__SIZEOF_POINTER__*2);
mat1[0] = (double*)malloc(__SIZEOF_DOUBLE__*2);
mat1[1] = (double*)malloc(__SIZEOF_DOUBLE__*2);
mat1[0][0] = c; mat1[0][1] = -s;
mat1[1][0] = s; mat1[1][1] = c;
mat2 = (double**)malloc(__SIZEOF_POINTER__*2);
mat2[0] = (double*)malloc(__SIZEOF_DOUBLE__*1);
mat2[1] = (double*)malloc(__SIZEOF_DOUBLE__*1);
if (eigenvectors){
mat2[0][0] = E[i][k];
mat2[1][0] = E[i][l];
}
else {
mat2[0][0] = S[k][l];
mat2[1][0] = S[i][j];
}
mat3 = mat_mul(mat1, 2, 2, mat2, 2, 1);
if (eigenvectors){
E[i][k] = mat3[0][0];
E[i][l] = mat3[1][0];
}
else{
S[k][l] = mat3[0][0];
S[i][j] = mat3[1][0];
}
free(mat1[0]);
free(mat1[1]);
free(mat1);
free(mat2[0]);
free(mat2[1]);
free(mat2);
free(mat3[0]);
free(mat3[1]);
free(mat3);
}
void print_matrix(double** A, int Am, int An) {
cout << "[";
for (int i=0; i<Am; i++){
if (i>0)
cout<<" ";
cout<<"[";
for (int j=0; j<An-1; j++){
cout << A[i][j] << ", ";
}
if (i < Am-1)
cout << A[i][An-1] << "]" << endl;
}
cout << A[Am-1][An-1] << "]]" << endl;
}
void print_vector(double* A, int An) {
cout << "[";
for(int i=0; i<An-1; i++)
cout << A[i] << ",";
cout << A[An-1] << "]" << endl;
}
void init_jacobi() {
E = (double**)malloc(__SIZEOF_POINTER__*N);
for (int i=0; i<N; i++){
E[i] = (double*)malloc(__SIZEOF_DOUBLE__*N);
for (int j=0; j<N; j++){
E[i][j] = 0;
}
E[i][i] = 1;
}
state = N;
e = (double*)malloc(__SIZEOF_DOUBLE__*N);
ind = (int*)malloc(__SIZEOF_INT__*N);
changed = (bool*)malloc(sizeof(bool)*N);
for (int k=0; k<N; k++){
ind[k] = maxind(k);
e[k] = S[k][k];
changed[k] = true;
}
}
void Jacobi(double **input_matrix, int n,
double **eigenvalues, double ***eigenvectors) {
N = n;
S = input_matrix;
init_jacobi();
while(state != 0){
int m = 0;
for (int k=1; k<N-1; k++){
if (fabs(S[k][ind[k]]) > fabs(S[m][ind[m]])){
m = k;
}
}
int k = m;
int l = ind[m];
double p = S[k][l];
double y = (e[l] - e[k]) / 2.0;
double d = fabs(y) + sqrt(p*p + y*y);
double r = sqrt(p*p + d*d);
double c = d / r;
double s = p / r;
double t = (p*p) / d;
if (y < 0.0) { s = -s; t = -t; }
S[k][l] = 0.0;
update(k, -t);
update(l, t);
for (int i=0; i<k; i++) { rotate(i, k, i, l, c, s, false); }
for (int i=k+1; i<l; i++){ rotate(k, i, i, l, c, s, false); }
for (int i=l+1; i<N; i++) { rotate(k, i, l, i, c, s, false); }
for (int i=0; i<N; i++){
rotate(k, l, i, i, c, s, true);
}
ind[k] = maxind(k);
ind[l] = maxind(l);
}
*eigenvalues = e;
*eigenvectors = E;
}
// int main(){
// double **D, **D_T;
// double **prod, *eigenvalues, **eigenvectors;
// D = (double**)malloc(sizeof(double*)*samples);
// for (int i=0; i<samples; i++)
// D[i] = (double*)malloc(sizeof(double)*features);
// read_file((char*)FILENAME1, samples, features, D);
// D_T = mat_transpose(D, samples, features);
// prod = mat_mul(D_T, features, samples, D, samples, features);
// Jacobi(prod, features, &eigenvalues, &eigenvectors);
// cout << "\neigenvalues:" << endl;
// print_vector(eigenvalues, features);
// cout << "\neigenvectors:" << endl;
// print_matrix(eigenvectors, features, features);
// return 0;
// }
// /*
// *****************************************************
// TODO -- You must implement this function
// *****************************************************
// */
// void SVD_and_PCA (int M,
void SVD_and_PCA (int M,
int N,
double* D,
double** U,
double** SIGMA,
double** V_T,
double** D_HAT,
int *K,
int retention) {
// write your code here
double **d;
double **d_t;
double **product, *eigenvalues, **eigenvectors;
// double **v;
d = (double**)malloc(sizeof(double*)*M);
for (int i=0; i<M; i++)
d[i] = (double*)malloc(sizeof(double)*N);
for(int i=0;i<M;i++){
for(int j=0;j<N;j++) d[i][j] = D[i*N+j];
}
d_t = mat_transpose(d, M, N);
product = mat_mul(d_t, N, M, d, M, N);
float computation_time1;
hipEvent_t start1, stop1;
hipEventCreate(&start1);
hipEventCreate(&stop1);
hipEventRecord(start1);
Jacobi(product, N, &eigenvalues, &eigenvectors);
hipEventRecord(stop1);
hipEventSynchronize(stop1);
hipEventElapsedTime(&computation_time1, start1, stop1);
printf("Time taken for Jacobi: %f\n", computation_time1);
// for(int i=0;i<N;i++) printf("%f\n", eigenvalues[i]);
// vector<double> eigenvals;
// for(int i=0; i<N; i++) eigenvals.pb(eigenvalues[i]);
vector<pair<double, int> > eigenv_index;
for(int i=0; i<N; i++){
eigenv_index.pb(make_pair(eigenvalues[i],i));
}
// for(int i=0; i<N; i++){
// // eigenv_index[i] = {eigenvalues[i],i};
// eigenv_index[i] = make_pair(eigenvalues[i],i);
// // eigenv_index[i].first = eigenvalues[i];
// // eigenv_index[i].second = i;
// // eigenv_index[i] = make_pair(eigenvalues[i],i);
// cout << eigenvalues[i] << ' ' << eigenv_index[i].first << ' ' << i << ' ' << eigenv_index[i].second << endl;
// // printf("%f %d\n", eigenv_index[i].first, eigenv_index[i].second);
// }
// for(int i=0;i<N;i++) printf("%f %d\n", eigenv_index[i].first, eigenv_index[i].second);
sort(eigenv_index.begin(), eigenv_index.end());
// for(int i=0;i<N;i++) printf("%f\n", eigenv_index[i].first);
int e = eigenv_index.size()-1;
for(int i=0;i<N;i++){
// printf("%f\n", sqrt(eigenv_index[e].first));
(*SIGMA)[i] = sqrt(eigenv_index[e].first);
e--;
}
// for(int i=0;i<N;i++) printf("%f\n", (*SIGMA)[i]);
double **u = (double**)malloc(sizeof(double*)*N);
for (int i=0; i<N; i++)
u[i] = (double*)malloc(sizeof(double)*N);
e = eigenv_index.size()-1;
for(int j=0;j<N;j++){
int index = eigenv_index[e].second;
for(int i=0;i<N;i++){
u[i][j] = eigenvectors[i][index];
}
e--;
}
for(int j=0;j<N;j++){
for(int i=0;i<N;i++){
(*U)[i*N+j] = u[i][j];
}
}
// for(int j=0;j<N;j++){
// for(int i=0;i<N;i++){
// printf("%f ", (*U)[i*N+j]);
// }
// printf("\n");
// }
// size N*M
double **sigma_invT = (double**)malloc(sizeof(double*)*N);
for (int i=0; i<N; i++)
sigma_invT[i] = (double*)malloc(sizeof(double)*M);
for(int i=0; i<N; i++){
for(int j=0; j<M; j++) sigma_invT[i][j]=0;
}
e = eigenv_index.size()-1;
for(int i=0; i<N;i++){
if(eigenv_index[e].first<1e-5){
sigma_invT[i][i]= 0;
}
else{
sigma_invT[i][i]= 1/sqrt(eigenv_index[e].first);
}
e--;
}
double **temp = mat_mul(d, M, N, u, N, N);
double **v = mat_mul(temp, M, N, sigma_invT, N, M);
double **v_t = mat_transpose(v, M, M);
// for(int i=0; i<M; i++){
// for(int j=0; j<M; j++) printf("%f ", v_t[i][j]);
// printf("\n");
// }
for(int i=0; i<M; i++){
for(int j=0; j<M; j++) (*V_T)[i*M+j] = v_t[i][j];
}
// for(int i=0; i<M; i++){
// for(int j=0; j<M; j++) printf("%f ", V_T[i][j]);
// printf("\n");
// }
double num=0;
double sigmasqsum=0;
for(int i=0; i<N; i++){
sigmasqsum += (*SIGMA)[i]*(*SIGMA)[i];
}
// printf("\n%f\n", sigmasqsum);
int k=0;
for(k=0; k<N; k++){
num += ((*SIGMA)[k]*(*SIGMA)[k])/sigmasqsum;
if(num >= retention/100.0){
break;
}
}
*K = k+1;
// double **newU;
// double **newU = (double**)malloc(sizeof(double*)*N*(k+1));
double **newU = (double**)malloc(sizeof(double*)*N);
for (int i=0; i<N; i++)
newU[i] = (double*)malloc(sizeof(double)*(k+1));
for(int i=0; i<N; i++){
for(int j=0;j<k+1;j++){
newU[i][j] = (u)[i][j];
}
}
// for(int i=0; i<N; i++){
// for(int j=0; j<(k+1); j++) printf("%f ", newU[i][j]);
// printf("\n");
// }
double **d_hat = (double**)malloc(sizeof(double*)*M);
for (int i=0; i<(k+1); i++)
d_hat[i] = (double*)malloc(sizeof(double)*(k+1));
d_hat = mat_mul(d, M, N, newU, N, (k+1));
*D_HAT = (double*) malloc(sizeof(double) * M*(k+1));
for(int i=0; i<M; i++){
for(int j=0;j<k+1;j++){
(*D_HAT)[i*(k+1)+j] = d_hat[i][j];
}
}
}
| a0f2ec0cbf6ca05263661a89cd81f7035820c9c9.cu | #include "lab3_cuda.h"
#include <iostream>
#include <cmath>
#include <malloc.h>
#include <fstream>
#include <bits/stdc++.h>
#include <cuda.h>
#define pb push_back
using namespace std;
#define TOLERANCE 0.001
#define JACOBI_UPDATE_TOLERANCE 0.001
#define FILENAME1 "testcase_1000_300"
#define FILENAME2 "iris_stndardized"
#define samples 150
#define features 4
#define BLOCK_SIZE 16
double **S; //Symmetric matrix (input)
double *e; //eigenvalues
double **E; //eigenvectors
int *ind;
bool *changed;
int state;
int N;
void read_file(char* filename, int num_samples, int num_features, double** A) {
ifstream ifile;
ifile.open(filename, ios::in);
double tmp;
for (int i=0; i<num_samples; i++) {
for (int j=0; j<num_features; j++){
ifile >> tmp;
A[i][j] = tmp;
}
}
ifile.close();
}
double** mat_transpose(double** A, int Am, int An) {
double **B;
B = (double**)malloc(__SIZEOF_POINTER__*An);
for (int i=0; i<An; i++)
B[i] = (double*)malloc(__SIZEOF_DOUBLE__*Am);
for (int i=0; i<Am; i++){
for (int j=0; j<An; j++){
B[j][i] = A[i][j];
}
}
return B;
}
double** mat_mul(double** A, int Am, int An,
double** B, int Bm, int Bn){
double **C;
C = (double**)malloc(__SIZEOF_POINTER__*Am);
for (int i=0; i<Am; i++)
C[i] = (double*)malloc(__SIZEOF_DOUBLE__*Bn);
for (int i=0; i<Am; i++){
for (int j=0; j<Bn; j++){
C[i][j] = 0;
for (int k=0; k<An; k++){
C[i][j] += A[i][k] * B[k][j];
}
}
}
return C;
}
// dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
// dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
int maxind(int k) {
int m = k+1;
for (int i = k+2; i < N; i++){
if (fabs(S[k][i]) > fabs(S[k][m])){
m = i;
}
}
return m;
}
void update(int k, double t) {
double ek_prev = e[k];
e[k] = ek_prev + t;
if (e[k] < 0) e[k] = 0;
if (changed[k] && (ek_prev - e[k]) < JACOBI_UPDATE_TOLERANCE) {
changed[k] = false;
state = state - 1;
}
else if ((! changed[k]) && (ek_prev - e[k]) > JACOBI_UPDATE_TOLERANCE) {
changed[k] = true;
state = state + 1;
}
}
void rotate(int k, int l, int i, int j, double c, double s,
bool eigenvectors){
double** mat1;
double** mat2;
double** mat3;
mat1 = (double**)malloc(__SIZEOF_POINTER__*2);
mat1[0] = (double*)malloc(__SIZEOF_DOUBLE__*2);
mat1[1] = (double*)malloc(__SIZEOF_DOUBLE__*2);
mat1[0][0] = c; mat1[0][1] = -s;
mat1[1][0] = s; mat1[1][1] = c;
mat2 = (double**)malloc(__SIZEOF_POINTER__*2);
mat2[0] = (double*)malloc(__SIZEOF_DOUBLE__*1);
mat2[1] = (double*)malloc(__SIZEOF_DOUBLE__*1);
if (eigenvectors){
mat2[0][0] = E[i][k];
mat2[1][0] = E[i][l];
}
else {
mat2[0][0] = S[k][l];
mat2[1][0] = S[i][j];
}
mat3 = mat_mul(mat1, 2, 2, mat2, 2, 1);
if (eigenvectors){
E[i][k] = mat3[0][0];
E[i][l] = mat3[1][0];
}
else{
S[k][l] = mat3[0][0];
S[i][j] = mat3[1][0];
}
free(mat1[0]);
free(mat1[1]);
free(mat1);
free(mat2[0]);
free(mat2[1]);
free(mat2);
free(mat3[0]);
free(mat3[1]);
free(mat3);
}
void print_matrix(double** A, int Am, int An) {
cout << "[";
for (int i=0; i<Am; i++){
if (i>0)
cout<<" ";
cout<<"[";
for (int j=0; j<An-1; j++){
cout << A[i][j] << ", ";
}
if (i < Am-1)
cout << A[i][An-1] << "]" << endl;
}
cout << A[Am-1][An-1] << "]]" << endl;
}
void print_vector(double* A, int An) {
cout << "[";
for(int i=0; i<An-1; i++)
cout << A[i] << ",";
cout << A[An-1] << "]" << endl;
}
void init_jacobi() {
E = (double**)malloc(__SIZEOF_POINTER__*N);
for (int i=0; i<N; i++){
E[i] = (double*)malloc(__SIZEOF_DOUBLE__*N);
for (int j=0; j<N; j++){
E[i][j] = 0;
}
E[i][i] = 1;
}
state = N;
e = (double*)malloc(__SIZEOF_DOUBLE__*N);
ind = (int*)malloc(__SIZEOF_INT__*N);
changed = (bool*)malloc(sizeof(bool)*N);
for (int k=0; k<N; k++){
ind[k] = maxind(k);
e[k] = S[k][k];
changed[k] = true;
}
}
void Jacobi(double **input_matrix, int n,
double **eigenvalues, double ***eigenvectors) {
N = n;
S = input_matrix;
init_jacobi();
while(state != 0){
int m = 0;
for (int k=1; k<N-1; k++){
if (fabs(S[k][ind[k]]) > fabs(S[m][ind[m]])){
m = k;
}
}
int k = m;
int l = ind[m];
double p = S[k][l];
double y = (e[l] - e[k]) / 2.0;
double d = fabs(y) + sqrt(p*p + y*y);
double r = sqrt(p*p + d*d);
double c = d / r;
double s = p / r;
double t = (p*p) / d;
if (y < 0.0) { s = -s; t = -t; }
S[k][l] = 0.0;
update(k, -t);
update(l, t);
for (int i=0; i<k; i++) { rotate(i, k, i, l, c, s, false); }
for (int i=k+1; i<l; i++){ rotate(k, i, i, l, c, s, false); }
for (int i=l+1; i<N; i++) { rotate(k, i, l, i, c, s, false); }
for (int i=0; i<N; i++){
rotate(k, l, i, i, c, s, true);
}
ind[k] = maxind(k);
ind[l] = maxind(l);
}
*eigenvalues = e;
*eigenvectors = E;
}
// int main(){
// double **D, **D_T;
// double **prod, *eigenvalues, **eigenvectors;
// D = (double**)malloc(sizeof(double*)*samples);
// for (int i=0; i<samples; i++)
// D[i] = (double*)malloc(sizeof(double)*features);
// read_file((char*)FILENAME1, samples, features, D);
// D_T = mat_transpose(D, samples, features);
// prod = mat_mul(D_T, features, samples, D, samples, features);
// Jacobi(prod, features, &eigenvalues, &eigenvectors);
// cout << "\neigenvalues:" << endl;
// print_vector(eigenvalues, features);
// cout << "\neigenvectors:" << endl;
// print_matrix(eigenvectors, features, features);
// return 0;
// }
// /*
// *****************************************************
// TODO -- You must implement this function
// *****************************************************
// */
// void SVD_and_PCA (int M,
void SVD_and_PCA (int M,
int N,
double* D,
double** U,
double** SIGMA,
double** V_T,
double** D_HAT,
int *K,
int retention) {
// write your code here
double **d;
double **d_t;
double **product, *eigenvalues, **eigenvectors;
// double **v;
d = (double**)malloc(sizeof(double*)*M);
for (int i=0; i<M; i++)
d[i] = (double*)malloc(sizeof(double)*N);
for(int i=0;i<M;i++){
for(int j=0;j<N;j++) d[i][j] = D[i*N+j];
}
d_t = mat_transpose(d, M, N);
product = mat_mul(d_t, N, M, d, M, N);
float computation_time1;
cudaEvent_t start1, stop1;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventRecord(start1);
Jacobi(product, N, &eigenvalues, &eigenvectors);
cudaEventRecord(stop1);
cudaEventSynchronize(stop1);
cudaEventElapsedTime(&computation_time1, start1, stop1);
printf("Time taken for Jacobi: %f\n", computation_time1);
// for(int i=0;i<N;i++) printf("%f\n", eigenvalues[i]);
// vector<double> eigenvals;
// for(int i=0; i<N; i++) eigenvals.pb(eigenvalues[i]);
vector<pair<double, int> > eigenv_index;
for(int i=0; i<N; i++){
eigenv_index.pb(make_pair(eigenvalues[i],i));
}
// for(int i=0; i<N; i++){
// // eigenv_index[i] = {eigenvalues[i],i};
// eigenv_index[i] = make_pair(eigenvalues[i],i);
// // eigenv_index[i].first = eigenvalues[i];
// // eigenv_index[i].second = i;
// // eigenv_index[i] = make_pair(eigenvalues[i],i);
// cout << eigenvalues[i] << ' ' << eigenv_index[i].first << ' ' << i << ' ' << eigenv_index[i].second << endl;
// // printf("%f %d\n", eigenv_index[i].first, eigenv_index[i].second);
// }
// for(int i=0;i<N;i++) printf("%f %d\n", eigenv_index[i].first, eigenv_index[i].second);
sort(eigenv_index.begin(), eigenv_index.end());
// for(int i=0;i<N;i++) printf("%f\n", eigenv_index[i].first);
int e = eigenv_index.size()-1;
for(int i=0;i<N;i++){
// printf("%f\n", sqrt(eigenv_index[e].first));
(*SIGMA)[i] = sqrt(eigenv_index[e].first);
e--;
}
// for(int i=0;i<N;i++) printf("%f\n", (*SIGMA)[i]);
double **u = (double**)malloc(sizeof(double*)*N);
for (int i=0; i<N; i++)
u[i] = (double*)malloc(sizeof(double)*N);
e = eigenv_index.size()-1;
for(int j=0;j<N;j++){
int index = eigenv_index[e].second;
for(int i=0;i<N;i++){
u[i][j] = eigenvectors[i][index];
}
e--;
}
for(int j=0;j<N;j++){
for(int i=0;i<N;i++){
(*U)[i*N+j] = u[i][j];
}
}
// for(int j=0;j<N;j++){
// for(int i=0;i<N;i++){
// printf("%f ", (*U)[i*N+j]);
// }
// printf("\n");
// }
// size N*M
double **sigma_invT = (double**)malloc(sizeof(double*)*N);
for (int i=0; i<N; i++)
sigma_invT[i] = (double*)malloc(sizeof(double)*M);
for(int i=0; i<N; i++){
for(int j=0; j<M; j++) sigma_invT[i][j]=0;
}
e = eigenv_index.size()-1;
for(int i=0; i<N;i++){
if(eigenv_index[e].first<1e-5){
sigma_invT[i][i]= 0;
}
else{
sigma_invT[i][i]= 1/sqrt(eigenv_index[e].first);
}
e--;
}
double **temp = mat_mul(d, M, N, u, N, N);
double **v = mat_mul(temp, M, N, sigma_invT, N, M);
double **v_t = mat_transpose(v, M, M);
// for(int i=0; i<M; i++){
// for(int j=0; j<M; j++) printf("%f ", v_t[i][j]);
// printf("\n");
// }
for(int i=0; i<M; i++){
for(int j=0; j<M; j++) (*V_T)[i*M+j] = v_t[i][j];
}
// for(int i=0; i<M; i++){
// for(int j=0; j<M; j++) printf("%f ", V_T[i][j]);
// printf("\n");
// }
double num=0;
double sigmasqsum=0;
for(int i=0; i<N; i++){
sigmasqsum += (*SIGMA)[i]*(*SIGMA)[i];
}
// printf("\n%f\n", sigmasqsum);
int k=0;
for(k=0; k<N; k++){
num += ((*SIGMA)[k]*(*SIGMA)[k])/sigmasqsum;
if(num >= retention/100.0){
break;
}
}
*K = k+1;
// double **newU;
// double **newU = (double**)malloc(sizeof(double*)*N*(k+1));
double **newU = (double**)malloc(sizeof(double*)*N);
for (int i=0; i<N; i++)
newU[i] = (double*)malloc(sizeof(double)*(k+1));
for(int i=0; i<N; i++){
for(int j=0;j<k+1;j++){
newU[i][j] = (u)[i][j];
}
}
// for(int i=0; i<N; i++){
// for(int j=0; j<(k+1); j++) printf("%f ", newU[i][j]);
// printf("\n");
// }
double **d_hat = (double**)malloc(sizeof(double*)*M);
for (int i=0; i<(k+1); i++)
d_hat[i] = (double*)malloc(sizeof(double)*(k+1));
d_hat = mat_mul(d, M, N, newU, N, (k+1));
*D_HAT = (double*) malloc(sizeof(double) * M*(k+1));
for(int i=0; i<M; i++){
for(int j=0;j<k+1;j++){
(*D_HAT)[i*(k+1)+j] = d_hat[i][j];
}
}
}
|
0c9c73ece563d1cd9ecc639c7223ea8484ea97f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define N 50000
#define numThreads 512
__global__ void init(
unsigned int seed,
hiprandState_t *states,
unsigned int size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
hiprand_init(
seed,
i,
0,
&states[i]);
}
}
__global__ void GeometricBrownianMotion(
float *d_a,
float mu,
float sigma,
float dt,
hiprandState_t *states,
unsigned int size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
d_a[i] += d_a[i] * ( (dt*mu) + (sigma*sqrt(dt)*hiprand_normal(&states[i])));
}
}
int main()
{
float mu = .01;
float sigma = 0.5;
float dt = 1/252.0f;
float h_a[N];
float *d_a;
for (int i = 0; i < N; i++)
{
h_a[i] = 100.0f;
}
hipMalloc((void**)&d_a, N*sizeof(float));
hipMemcpy(d_a, h_a, N*sizeof(float), hipMemcpyHostToDevice);
hiprandState_t *states;
hipMalloc((void**)&states, N * sizeof(hiprandState_t));
init << <(N + numThreads - 1)/numThreads, numThreads >> >(time(NULL), states, N);
for (int t = 0; t < 252; t++)
{
GeometricBrownianMotion << < (N + numThreads - 1) / numThreads, numThreads >> >(
d_a,
mu,
sigma,
dt,
states,
N);
}
hipMemcpy(h_a, d_a, N*sizeof(float), hipMemcpyDeviceToHost);
float avg = 0;
for (int i = 0; i < N; i++)
{
avg += h_a[i];
}
avg /= float(N);
std::cout << "The Average Value Is: " << avg << std::endl;
hipFree(d_a); hipFree(states);
return 0;
}
| 0c9c73ece563d1cd9ecc639c7223ea8484ea97f3.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <curand.h>
#include <curand_kernel.h>
#define N 50000
#define numThreads 512
__global__ void init(
unsigned int seed,
curandState_t *states,
unsigned int size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
curand_init(
seed,
i,
0,
&states[i]);
}
}
__global__ void GeometricBrownianMotion(
float *d_a,
float mu,
float sigma,
float dt,
curandState_t *states,
unsigned int size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
d_a[i] += d_a[i] * ( (dt*mu) + (sigma*sqrt(dt)*curand_normal(&states[i])));
}
}
int main()
{
float mu = .01;
float sigma = 0.5;
float dt = 1/252.0f;
float h_a[N];
float *d_a;
for (int i = 0; i < N; i++)
{
h_a[i] = 100.0f;
}
cudaMalloc((void**)&d_a, N*sizeof(float));
cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice);
curandState_t *states;
cudaMalloc((void**)&states, N * sizeof(curandState_t));
init << <(N + numThreads - 1)/numThreads, numThreads >> >(time(NULL), states, N);
for (int t = 0; t < 252; t++)
{
GeometricBrownianMotion << < (N + numThreads - 1) / numThreads, numThreads >> >(
d_a,
mu,
sigma,
dt,
states,
N);
}
cudaMemcpy(h_a, d_a, N*sizeof(float), cudaMemcpyDeviceToHost);
float avg = 0;
for (int i = 0; i < N; i++)
{
avg += h_a[i];
}
avg /= float(N);
std::cout << "The Average Value Is: " << avg << std::endl;
cudaFree(d_a); cudaFree(states);
return 0;
}
|
7614da54d00b3bc968c08bea8c4e995e44c9c3c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Illinois Open Source License
University of Illinois/NCSA
Open Source License
Copyright 2009, University of Illinois. All rights reserved.
Developed by:
Innovative Systems Lab
National Center for Supercomputing Applications
http://www.ncsa.uiuc.edu/AboutUs/Directorates/ISL.html
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the Software), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution.
* Neither the names of Innovative Systems Lab and National Center for Supercomputing Applications, nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission.
THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
*/
#ifndef _GPU_COMPUTE_H_
#define _GPU_COMPUTE_H_
#include "ACF_kernel.cu"
#include "histogram_kernel.cu"
#include "model_io.cu"
#include "args.h"
#include <sys/time.h>
#define TDIFF(ts, te) (te.tv_sec - ts.tv_sec + (te.tv_usec - ts.tv_usec) * 1e-6)
#define GRID_SIZE (1 << LOG2_GRID_SIZE)
const dim3 grid(128, 128, 1);
const dim3 threads(128, 1, 1);
// Device-side data storage
cartesian d_idata1;
cartesian d_idata2;
unsigned int* d_odata1;
// Host-side data storage
cartesian h_idata1;
cartesian h_idata2;
// Performance
struct timeval t1, t0;
float t_Compute = 0.0f;
// Writes bin boundaries to GPU constant memory.
void writeBoundaries(double *binbs) {
hipMemcpyToSymbol(binbounds, (void*)binbs, (NUMBINS-1)*sizeof(double));
}
// Used to compute DD or RR, takes advantage of symmetry to reduce number of dot products and waterfall searches
// required by half. Unfortunately, due to the limitations of the histogram kernel, every element of d_odata still
// represents a histogram bin assignment; consequently the histogram kernel does just as much work in tileComputeSymm
// as it does in tileCompute.
// type: type = 0 corresponds to DD, type = 1 corresponds to RR.
// size: Number of elements in data or random set (dependent upon type)
// njk: Number of jackknives
// jkSizes: List of jackknife sizes, in order
// nBins: Number of histogram bins
// histo: The function outputs by adding on to this histogram
// stream: CUDA stream
void tileComputeSymm(int type, int size, int njk, int* jkSizes, int nBins, long long* histo, hipStream_t &stream) {
// Storage for GPUHistogram output
unsigned int* subHistoTemp = (unsigned int*)malloc(njk*nBins*sizeof(unsigned int));
// Find number of kernels necessary on each 'axis'
int nkernels = iDivUp(size, GRID_SIZE);
// Stores location in d_odata; used in GPUHistogram calls
int index;
for(int i=0; i<nkernels; i++) {
if(type == 0) {
hipMemcpyAsync(d_idata1.x, &h_idata1.x[i*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_idata1.y, &h_idata1.y[i*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_idata1.z, &h_idata1.z[i*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
}
else {
hipMemcpyAsync(d_idata1.x, &h_idata2.x[i*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_idata1.y, &h_idata2.y[i*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_idata1.z, &h_idata2.z[i*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
}
hipLaunchKernelGGL(( ACFKernelSymm), dim3(grid), dim3(threads), 128*sizeof(double3), stream , d_idata1, d_odata1);
index = 0;
memset(subHistoTemp, 0, njk*nBins*sizeof(unsigned int));
for(int k=0; k<njk; k++) {
if(jkSizes[i*njk + k] != 0) {
GPUHistogram(&subHistoTemp[nBins*k], &d_odata1[index], jkSizes[i*njk + k]*GRID_SIZE, stream);
index += jkSizes[i*njk + k] * (GRID_SIZE >> 2);
}
}
for(int k=0; k<njk*nBins; k++) {
histo[k] += subHistoTemp[k];
}
for(int j=i+1; j<nkernels; j++) {
if(type == 0) {
hipMemcpyAsync(d_idata2.x, &h_idata1.x[j*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_idata2.y, &h_idata1.y[j*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_idata2.z, &h_idata1.z[j*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
}
else {
hipMemcpyAsync(d_idata2.x, &h_idata2.x[j*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_idata2.y, &h_idata2.y[j*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_idata2.z, &h_idata2.z[j*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
}
hipLaunchKernelGGL(( ACFKernel), dim3(grid), dim3(threads), 128*sizeof(double3), stream , d_idata1, d_idata2, d_odata1);
index = 0;
memset(subHistoTemp, 0, njk*nBins*sizeof(unsigned int));
for(int k=0; k<njk; k++) {
if(jkSizes[i*njk + k] != 0) {
GPUHistogram(&subHistoTemp[nBins*k], &d_odata1[index], jkSizes[i*njk + k]*GRID_SIZE, stream);
index += jkSizes[i*njk + k] * (GRID_SIZE >> 2);
}
}
for(int k=0; k<njk*nBins; k++) {
histo[k] += subHistoTemp[k];
}
}
}
}
// Used to compute DR.
// dataSize: Size of data set
// randomSize: Size of random set
// All else: See descriptions in tileComputeSymm
void tileCompute(int dataSize, int randomSize, int njk, int* jkSizes, int nBins, long long* histo, hipStream_t &stream) {
unsigned int* subHistoTemp = (unsigned int*)malloc(njk*nBins*sizeof(unsigned int));
int ndkernels = iDivUp(dataSize, GRID_SIZE);
int nrkernels = iDivUp(randomSize, GRID_SIZE);
int index;
for(int i=0; i<ndkernels; i++) {
hipMemcpyAsync(d_idata1.x, &h_idata1.x[i*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_idata1.y, &h_idata1.y[i*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_idata1.z, &h_idata1.z[i*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
for(int j=0; j<nrkernels; j++) {
hipMemcpyAsync(d_idata2.x, &h_idata2.x[j*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_idata2.y, &h_idata2.y[j*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_idata2.z, &h_idata2.z[j*GRID_SIZE], GRID_SIZE*sizeof(double), hipMemcpyHostToDevice, stream);
hipLaunchKernelGGL(( ACFKernel), dim3(grid), dim3(threads), 128*sizeof(double3), stream , d_idata1, d_idata2, d_odata1);
index = 0;
memset(subHistoTemp, 0, njk*nBins*sizeof(unsigned int));
for(int k=0; k<njk; k++) {
if(jkSizes[i*njk + k] != 0) {
GPUHistogram(&subHistoTemp[nBins*k], &d_odata1[index], jkSizes[i*njk + k]*GRID_SIZE, stream);
index += jkSizes[i*njk + k] * (GRID_SIZE >> 2);
}
}
for(int k=0; k<njk*nBins; k++) {
histo[k] += subHistoTemp[k];
}
}
}
}
// Computes histograms and writes to DDs, DRs, RRs. These must be compiled by the host program;
// the function outputs njk sub-histograms for each, the sum of which is the full histogram.
// Note that data should be sorted according to jackknife; otherwise results will be meaningless.
// dataName: File name of data points file.
// randomNames: File name stem of random points file.
// nr: Number of random files. Random files are assumed to be of the form randomNames.i where 1 <= i <= nr.
// dataSize: Number of elements to read from data points file.
// randomSize: Number of elements to read from each random points file.
// njk: Number of jackknives.
// jkSizes: Ordered list of jackknife sizes. Each size must be a multiple of 4 currently.
// nBins: Number of histogram bins.
// zeroBin: Index of bin which contains 0.0f: Necessary to correct for padding
// DDs, DRs, RRs: Output subhistogram lists.
void doComputeGPU(char* dataName, char* randomNames, int nr, int dataSize, int randomSize, int njk, int* jkSizes,
int nBins, int zeroBin, long long** DDs, long long** DRs, long long** RRs) {
// DDs, DRs, RRs are not assumed to be allocated or cleared.
*DDs = (long long*)malloc(nBins*njk*sizeof(long long));
*DRs = (long long*)malloc(nBins*njk*nr*sizeof(long long));
*RRs = (long long*)malloc(nBins*nr*sizeof(long long));
memset(*DDs, 0, nBins*njk*sizeof(long long));
memset(*DRs, 0, nBins*njk*nr*sizeof(long long));
memset(*RRs, 0, nBins*nr*sizeof(long long));
int ndkernels = iDivUp(dataSize, GRID_SIZE);
int nrkernels = iDivUp(randomSize, GRID_SIZE);
int* dkerneljkSizes = (int*)malloc(njk*ndkernels*sizeof(int));
int* rkerneljkSizes = (int*)malloc(njk*nrkernels*sizeof(int));
memset(dkerneljkSizes, 0, njk*ndkernels*sizeof(int));
memset(rkerneljkSizes, 0, njk*nrkernels*sizeof(int));
int currentjk = 0;
int numwrittencurrentjk = 0;
for(int i=0; i<ndkernels; i++) {
int remainder = GRID_SIZE;
while(remainder > 0 && currentjk < njk) {
if(remainder < jkSizes[currentjk] - numwrittencurrentjk) {
dkerneljkSizes[i*njk + currentjk] += remainder;
numwrittencurrentjk += remainder;
remainder = 0;
}
else {
remainder = remainder - (jkSizes[currentjk] - numwrittencurrentjk);
dkerneljkSizes[i*njk + currentjk] += (jkSizes[currentjk] - numwrittencurrentjk);
currentjk++;
numwrittencurrentjk = 0;
}
}
}
for(int i=0; i<nrkernels-1; i++) {
rkerneljkSizes[i*njk] += GRID_SIZE;
}
rkerneljkSizes[(nrkernels-1)*njk] += (randomSize % GRID_SIZE == 0) ? GRID_SIZE : randomSize % GRID_SIZE;
// Kernel invocations require that the input data have 16384 elements, so pad the data to a multiple of 16384.
int dataSizePadded = dataSize + ((GRID_SIZE - (dataSize % GRID_SIZE)) % GRID_SIZE);
int randomSizePadded = randomSize + ((GRID_SIZE - (randomSize % GRID_SIZE)) % GRID_SIZE);
// Use page-locked host memory; somewhat faster, and host-side memory requirements are rather small.
hipHostMalloc((void**)&h_idata1.x, dataSizePadded*sizeof(double));
hipHostMalloc((void**)&h_idata1.y, dataSizePadded*sizeof(double));
hipHostMalloc((void**)&h_idata1.z, dataSizePadded*sizeof(double));
h_idata1.jk = (int*)malloc(dataSize*sizeof(int));
hipHostMalloc((void**)&h_idata2.x, randomSizePadded*sizeof(double));
hipHostMalloc((void**)&h_idata2.y, randomSizePadded*sizeof(double));
hipHostMalloc((void**)&h_idata2.z, randomSizePadded*sizeof(double));
h_idata2.jk = (int*)malloc(randomSize*sizeof(int));
// Ensure that the dot product of a padding element and any other element gets mapped to the bin containing 0.0f.
for(int i=dataSize; i<dataSizePadded; i++) {
h_idata1.x[i] = double(0.0);
h_idata1.y[i] = double(0.0);
h_idata1.z[i] = double(0.0);
}
for(int i=randomSize; i<randomSizePadded; i++) {
h_idata2.x[i] = double(0.0);
h_idata2.y[i] = double(0.0);
h_idata2.z[i] = double(0.0);
}
// Allocate device memory for inputs and output.
hipMalloc((void**)&d_idata1.x, GRID_SIZE*sizeof(double));
hipMalloc((void**)&d_idata1.y, GRID_SIZE*sizeof(double));
hipMalloc((void**)&d_idata1.z, GRID_SIZE*sizeof(double));
d_idata1.jk = NULL;
hipMalloc((void**)&d_idata2.x, GRID_SIZE*sizeof(double));
hipMalloc((void**)&d_idata2.y, GRID_SIZE*sizeof(double));
hipMalloc((void**)&d_idata2.z, GRID_SIZE*sizeof(double));
d_idata2.jk = NULL;
hipMalloc((void**)&d_odata1, GRID_SIZE*GRID_SIZE*sizeof(unsigned int)/4);
histoInit();
hipStream_t stream;
hipStreamCreate(&stream);
struct timeval t3, t2, t1, t0;
float t_computeDD=0, t_computeRRS=0, t_computeDRS=0, t_fileIO=0;
gettimeofday(&t0, NULL);
char fname[256];
readdatafile(dataName, h_idata1, dataSize);
gettimeofday(&t1, NULL);
// Compute DD
tileComputeSymm(0, dataSize, njk, dkerneljkSizes, nBins, *DDs, stream);
gettimeofday(&t2, NULL);
t_fileIO += TDIFF(t0, t1);
t_computeDD = TDIFF(t1, t2);
for(int i=0; i<nr; i++) {
sprintf(fname, "%s.%i\0", randomNames, i+1);
gettimeofday(&t0, NULL);
readdatafile(fname, h_idata2, randomSize);
gettimeofday(&t1, NULL);
// Compute DR_i
tileCompute(dataSize, randomSize, njk, dkerneljkSizes, nBins, &(*DRs)[njk*nBins*i], stream);
gettimeofday(&t2, NULL);
// Compute RR_i
tileComputeSymm(1, randomSize, njk, rkerneljkSizes, nBins, &(*RRs)[nBins*i], stream);
gettimeofday(&t3, NULL);
t_fileIO += TDIFF(t0, t1);
t_computeDRS += TDIFF(t1, t2);
t_computeRRS += TDIFF(t2, t3);
}
// Correct for error introduced by padding vectors.
int padfactor = randomSizePadded - randomSize;
for(int i=0; i<nr; i++) {
(*RRs)[nBins*i + zeroBin] -= padfactor*randomSize;
for(int j=0; j<njk; j++) {
(*DRs)[nBins*njk*i + nBins*j + zeroBin] -= padfactor*jkSizes[j];
}
}
for(int i=0; i<njk; i++) {
(*DDs)[nBins*i + zeroBin] -= padfactor*jkSizes[i];
}
// Tidy up.
hipStreamDestroy(stream);
histoClose();
hipFree(d_idata1.x);
hipFree(d_idata1.y);
hipFree(d_idata1.z);
hipFree(d_idata2.x);
hipFree(d_idata2.y);
hipFree(d_idata2.z);
hipFree(d_odata1);
hipHostFree(h_idata1.x);
hipHostFree(h_idata1.y);
hipHostFree(h_idata1.z);
free(h_idata1.jk);
hipHostFree(h_idata2.x);
hipHostFree(h_idata2.y);
hipHostFree(h_idata2.z);
free(h_idata2.jk);
printf("================================================\n");
printf("Time to compute DD: %.4f sec\n", t_computeDD);
printf("Time to compute RRS: %.4f sec\n", t_computeRRS);
printf("Time to compute DRS: %.4f sec\n", t_computeDRS);
printf("Time to load data files: %.4f sec\n", t_fileIO);
printf("Time to compute DD, RRS, & DRS: %.4f sec\n", t_computeDD+t_computeRRS+t_computeDRS);
printf("TOTAL time (DD+RRS+DRS+IO): %.4f sec\n", t_computeDD+t_computeRRS+t_computeDRS+t_fileIO);
printf("================================================\n");
}
void compileHistograms(long long* DDs, long long* DRs, long long* RRs, long long*** DD, long long*** DR,
long long*** RR, options *args) {
*DD = (long long**)malloc(((*args).njk+1)*sizeof(long long*));
*DR = (long long**)malloc(((*args).njk+1)*sizeof(long long*));
*RR = (long long**)malloc(1*sizeof(long long*));
for(int i=0; i<=(*args).njk; i++) {
(*DD)[i] = (long long*)malloc(NUMBINS*sizeof(long long));
(*DR)[i] = (long long*)malloc(NUMBINS*sizeof(long long));
memset((*DD)[i], 0, NUMBINS*sizeof(long long));
memset((*DR)[i], 0, NUMBINS*sizeof(long long));
}
(*RR)[0] = (long long*)malloc(NUMBINS*sizeof(long long));
memset((*RR)[0], 0, NUMBINS*sizeof(long long));
for(int i=0; i<NUMBINS; i++) {
for(int k=0; k<(*args).njk; k++) {
(*DD)[0][i] += DDs[NUMBINS*k + i];
}
for(int j=0; j<(*args).random_count; j++) {
for(int k=0; k<(*args).njk; k++) {
(*DR)[0][i] += DRs[(j*(*args).njk + k)*NUMBINS + i];
}
(*RR)[0][i] += RRs[j*NUMBINS + i];
}
}
for(int k=1; k<=(*args).njk; k++) {
for(int i=0; i<NUMBINS; i++) {
(*DD)[k][i] = (*DD)[0][i] - DDs[(k-1)*NUMBINS + i];
(*DR)[k][i] = (*DR)[0][i];
for(int j=0; j<(*args).random_count; j++) {
(*DR)[k][i] -= DRs[(j*(*args).njk + k - 1)*NUMBINS + i];
}
}
}
}
#endif
| 7614da54d00b3bc968c08bea8c4e995e44c9c3c8.cu | /*
Illinois Open Source License
University of Illinois/NCSA
Open Source License
Copyright © 2009, University of Illinois. All rights reserved.
Developed by:
Innovative Systems Lab
National Center for Supercomputing Applications
http://www.ncsa.uiuc.edu/AboutUs/Directorates/ISL.html
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution.
* Neither the names of Innovative Systems Lab and National Center for Supercomputing Applications, nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
*/
#ifndef _GPU_COMPUTE_H_
#define _GPU_COMPUTE_H_
#include "ACF_kernel.cu"
#include "histogram_kernel.cu"
#include "model_io.cu"
#include "args.h"
#include <sys/time.h>
#define TDIFF(ts, te) (te.tv_sec - ts.tv_sec + (te.tv_usec - ts.tv_usec) * 1e-6)
#define GRID_SIZE (1 << LOG2_GRID_SIZE)
const dim3 grid(128, 128, 1);
const dim3 threads(128, 1, 1);
// Device-side data storage
cartesian d_idata1;
cartesian d_idata2;
unsigned int* d_odata1;
// Host-side data storage
cartesian h_idata1;
cartesian h_idata2;
// Performance
struct timeval t1, t0;
float t_Compute = 0.0f;
// Writes bin boundaries to GPU constant memory.
void writeBoundaries(double *binbs) {
cudaMemcpyToSymbol(binbounds, (void*)binbs, (NUMBINS-1)*sizeof(double));
}
// Used to compute DD or RR, takes advantage of symmetry to reduce number of dot products and waterfall searches
// required by half. Unfortunately, due to the limitations of the histogram kernel, every element of d_odata still
// represents a histogram bin assignment; consequently the histogram kernel does just as much work in tileComputeSymm
// as it does in tileCompute.
// type: type = 0 corresponds to DD, type = 1 corresponds to RR.
// size: Number of elements in data or random set (dependent upon type)
// njk: Number of jackknives
// jkSizes: List of jackknife sizes, in order
// nBins: Number of histogram bins
// histo: The function outputs by adding on to this histogram
// stream: CUDA stream
void tileComputeSymm(int type, int size, int njk, int* jkSizes, int nBins, long long* histo, cudaStream_t &stream) {
// Storage for GPUHistogram output
unsigned int* subHistoTemp = (unsigned int*)malloc(njk*nBins*sizeof(unsigned int));
// Find number of kernels necessary on each 'axis'
int nkernels = iDivUp(size, GRID_SIZE);
// Stores location in d_odata; used in GPUHistogram calls
int index;
for(int i=0; i<nkernels; i++) {
if(type == 0) {
cudaMemcpyAsync(d_idata1.x, &h_idata1.x[i*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_idata1.y, &h_idata1.y[i*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_idata1.z, &h_idata1.z[i*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
}
else {
cudaMemcpyAsync(d_idata1.x, &h_idata2.x[i*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_idata1.y, &h_idata2.y[i*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_idata1.z, &h_idata2.z[i*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
}
ACFKernelSymm<<< grid, threads, 128*sizeof(double3), stream >>>(d_idata1, d_odata1);
index = 0;
memset(subHistoTemp, 0, njk*nBins*sizeof(unsigned int));
for(int k=0; k<njk; k++) {
if(jkSizes[i*njk + k] != 0) {
GPUHistogram(&subHistoTemp[nBins*k], &d_odata1[index], jkSizes[i*njk + k]*GRID_SIZE, stream);
index += jkSizes[i*njk + k] * (GRID_SIZE >> 2);
}
}
for(int k=0; k<njk*nBins; k++) {
histo[k] += subHistoTemp[k];
}
for(int j=i+1; j<nkernels; j++) {
if(type == 0) {
cudaMemcpyAsync(d_idata2.x, &h_idata1.x[j*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_idata2.y, &h_idata1.y[j*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_idata2.z, &h_idata1.z[j*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
}
else {
cudaMemcpyAsync(d_idata2.x, &h_idata2.x[j*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_idata2.y, &h_idata2.y[j*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_idata2.z, &h_idata2.z[j*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
}
ACFKernel<<< grid, threads, 128*sizeof(double3), stream >>>(d_idata1, d_idata2, d_odata1);
index = 0;
memset(subHistoTemp, 0, njk*nBins*sizeof(unsigned int));
for(int k=0; k<njk; k++) {
if(jkSizes[i*njk + k] != 0) {
GPUHistogram(&subHistoTemp[nBins*k], &d_odata1[index], jkSizes[i*njk + k]*GRID_SIZE, stream);
index += jkSizes[i*njk + k] * (GRID_SIZE >> 2);
}
}
for(int k=0; k<njk*nBins; k++) {
histo[k] += subHistoTemp[k];
}
}
}
}
// Used to compute DR.
// dataSize: Size of data set
// randomSize: Size of random set
// All else: See descriptions in tileComputeSymm
void tileCompute(int dataSize, int randomSize, int njk, int* jkSizes, int nBins, long long* histo, cudaStream_t &stream) {
unsigned int* subHistoTemp = (unsigned int*)malloc(njk*nBins*sizeof(unsigned int));
int ndkernels = iDivUp(dataSize, GRID_SIZE);
int nrkernels = iDivUp(randomSize, GRID_SIZE);
int index;
for(int i=0; i<ndkernels; i++) {
cudaMemcpyAsync(d_idata1.x, &h_idata1.x[i*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_idata1.y, &h_idata1.y[i*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_idata1.z, &h_idata1.z[i*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
for(int j=0; j<nrkernels; j++) {
cudaMemcpyAsync(d_idata2.x, &h_idata2.x[j*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_idata2.y, &h_idata2.y[j*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_idata2.z, &h_idata2.z[j*GRID_SIZE], GRID_SIZE*sizeof(double), cudaMemcpyHostToDevice, stream);
ACFKernel<<< grid, threads, 128*sizeof(double3), stream >>>(d_idata1, d_idata2, d_odata1);
index = 0;
memset(subHistoTemp, 0, njk*nBins*sizeof(unsigned int));
for(int k=0; k<njk; k++) {
if(jkSizes[i*njk + k] != 0) {
GPUHistogram(&subHistoTemp[nBins*k], &d_odata1[index], jkSizes[i*njk + k]*GRID_SIZE, stream);
index += jkSizes[i*njk + k] * (GRID_SIZE >> 2);
}
}
for(int k=0; k<njk*nBins; k++) {
histo[k] += subHistoTemp[k];
}
}
}
}
// Computes histograms and writes to DDs, DRs, RRs. These must be compiled by the host program;
// the function outputs njk sub-histograms for each, the sum of which is the full histogram.
// Note that data should be sorted according to jackknife; otherwise results will be meaningless.
// dataName: File name of data points file.
// randomNames: File name stem of random points file.
// nr: Number of random files. Random files are assumed to be of the form randomNames.i where 1 <= i <= nr.
// dataSize: Number of elements to read from data points file.
// randomSize: Number of elements to read from each random points file.
// njk: Number of jackknives.
// jkSizes: Ordered list of jackknife sizes. Each size must be a multiple of 4 currently.
// nBins: Number of histogram bins.
// zeroBin: Index of bin which contains 0.0f: Necessary to correct for padding
// DDs, DRs, RRs: Output subhistogram lists.
void doComputeGPU(char* dataName, char* randomNames, int nr, int dataSize, int randomSize, int njk, int* jkSizes,
int nBins, int zeroBin, long long** DDs, long long** DRs, long long** RRs) {
// DDs, DRs, RRs are not assumed to be allocated or cleared.
*DDs = (long long*)malloc(nBins*njk*sizeof(long long));
*DRs = (long long*)malloc(nBins*njk*nr*sizeof(long long));
*RRs = (long long*)malloc(nBins*nr*sizeof(long long));
memset(*DDs, 0, nBins*njk*sizeof(long long));
memset(*DRs, 0, nBins*njk*nr*sizeof(long long));
memset(*RRs, 0, nBins*nr*sizeof(long long));
int ndkernels = iDivUp(dataSize, GRID_SIZE);
int nrkernels = iDivUp(randomSize, GRID_SIZE);
int* dkerneljkSizes = (int*)malloc(njk*ndkernels*sizeof(int));
int* rkerneljkSizes = (int*)malloc(njk*nrkernels*sizeof(int));
memset(dkerneljkSizes, 0, njk*ndkernels*sizeof(int));
memset(rkerneljkSizes, 0, njk*nrkernels*sizeof(int));
int currentjk = 0;
int numwrittencurrentjk = 0;
for(int i=0; i<ndkernels; i++) {
int remainder = GRID_SIZE;
while(remainder > 0 && currentjk < njk) {
if(remainder < jkSizes[currentjk] - numwrittencurrentjk) {
dkerneljkSizes[i*njk + currentjk] += remainder;
numwrittencurrentjk += remainder;
remainder = 0;
}
else {
remainder = remainder - (jkSizes[currentjk] - numwrittencurrentjk);
dkerneljkSizes[i*njk + currentjk] += (jkSizes[currentjk] - numwrittencurrentjk);
currentjk++;
numwrittencurrentjk = 0;
}
}
}
for(int i=0; i<nrkernels-1; i++) {
rkerneljkSizes[i*njk] += GRID_SIZE;
}
rkerneljkSizes[(nrkernels-1)*njk] += (randomSize % GRID_SIZE == 0) ? GRID_SIZE : randomSize % GRID_SIZE;
// Kernel invocations require that the input data have 16384 elements, so pad the data to a multiple of 16384.
int dataSizePadded = dataSize + ((GRID_SIZE - (dataSize % GRID_SIZE)) % GRID_SIZE);
int randomSizePadded = randomSize + ((GRID_SIZE - (randomSize % GRID_SIZE)) % GRID_SIZE);
// Use page-locked host memory; somewhat faster, and host-side memory requirements are rather small.
cudaMallocHost((void**)&h_idata1.x, dataSizePadded*sizeof(double));
cudaMallocHost((void**)&h_idata1.y, dataSizePadded*sizeof(double));
cudaMallocHost((void**)&h_idata1.z, dataSizePadded*sizeof(double));
h_idata1.jk = (int*)malloc(dataSize*sizeof(int));
cudaMallocHost((void**)&h_idata2.x, randomSizePadded*sizeof(double));
cudaMallocHost((void**)&h_idata2.y, randomSizePadded*sizeof(double));
cudaMallocHost((void**)&h_idata2.z, randomSizePadded*sizeof(double));
h_idata2.jk = (int*)malloc(randomSize*sizeof(int));
// Ensure that the dot product of a padding element and any other element gets mapped to the bin containing 0.0f.
for(int i=dataSize; i<dataSizePadded; i++) {
h_idata1.x[i] = double(0.0);
h_idata1.y[i] = double(0.0);
h_idata1.z[i] = double(0.0);
}
for(int i=randomSize; i<randomSizePadded; i++) {
h_idata2.x[i] = double(0.0);
h_idata2.y[i] = double(0.0);
h_idata2.z[i] = double(0.0);
}
// Allocate device memory for inputs and output.
cudaMalloc((void**)&d_idata1.x, GRID_SIZE*sizeof(double));
cudaMalloc((void**)&d_idata1.y, GRID_SIZE*sizeof(double));
cudaMalloc((void**)&d_idata1.z, GRID_SIZE*sizeof(double));
d_idata1.jk = NULL;
cudaMalloc((void**)&d_idata2.x, GRID_SIZE*sizeof(double));
cudaMalloc((void**)&d_idata2.y, GRID_SIZE*sizeof(double));
cudaMalloc((void**)&d_idata2.z, GRID_SIZE*sizeof(double));
d_idata2.jk = NULL;
cudaMalloc((void**)&d_odata1, GRID_SIZE*GRID_SIZE*sizeof(unsigned int)/4);
histoInit();
cudaStream_t stream;
cudaStreamCreate(&stream);
struct timeval t3, t2, t1, t0;
float t_computeDD=0, t_computeRRS=0, t_computeDRS=0, t_fileIO=0;
gettimeofday(&t0, NULL);
char fname[256];
readdatafile(dataName, h_idata1, dataSize);
gettimeofday(&t1, NULL);
// Compute DD
tileComputeSymm(0, dataSize, njk, dkerneljkSizes, nBins, *DDs, stream);
gettimeofday(&t2, NULL);
t_fileIO += TDIFF(t0, t1);
t_computeDD = TDIFF(t1, t2);
for(int i=0; i<nr; i++) {
sprintf(fname, "%s.%i\0", randomNames, i+1);
gettimeofday(&t0, NULL);
readdatafile(fname, h_idata2, randomSize);
gettimeofday(&t1, NULL);
// Compute DR_i
tileCompute(dataSize, randomSize, njk, dkerneljkSizes, nBins, &(*DRs)[njk*nBins*i], stream);
gettimeofday(&t2, NULL);
// Compute RR_i
tileComputeSymm(1, randomSize, njk, rkerneljkSizes, nBins, &(*RRs)[nBins*i], stream);
gettimeofday(&t3, NULL);
t_fileIO += TDIFF(t0, t1);
t_computeDRS += TDIFF(t1, t2);
t_computeRRS += TDIFF(t2, t3);
}
// Correct for error introduced by padding vectors.
int padfactor = randomSizePadded - randomSize;
for(int i=0; i<nr; i++) {
(*RRs)[nBins*i + zeroBin] -= padfactor*randomSize;
for(int j=0; j<njk; j++) {
(*DRs)[nBins*njk*i + nBins*j + zeroBin] -= padfactor*jkSizes[j];
}
}
for(int i=0; i<njk; i++) {
(*DDs)[nBins*i + zeroBin] -= padfactor*jkSizes[i];
}
// Tidy up.
cudaStreamDestroy(stream);
histoClose();
cudaFree(d_idata1.x);
cudaFree(d_idata1.y);
cudaFree(d_idata1.z);
cudaFree(d_idata2.x);
cudaFree(d_idata2.y);
cudaFree(d_idata2.z);
cudaFree(d_odata1);
cudaFreeHost(h_idata1.x);
cudaFreeHost(h_idata1.y);
cudaFreeHost(h_idata1.z);
free(h_idata1.jk);
cudaFreeHost(h_idata2.x);
cudaFreeHost(h_idata2.y);
cudaFreeHost(h_idata2.z);
free(h_idata2.jk);
printf("================================================\n");
printf("Time to compute DD: %.4f sec\n", t_computeDD);
printf("Time to compute RRS: %.4f sec\n", t_computeRRS);
printf("Time to compute DRS: %.4f sec\n", t_computeDRS);
printf("Time to load data files: %.4f sec\n", t_fileIO);
printf("Time to compute DD, RRS, & DRS: %.4f sec\n", t_computeDD+t_computeRRS+t_computeDRS);
printf("TOTAL time (DD+RRS+DRS+IO): %.4f sec\n", t_computeDD+t_computeRRS+t_computeDRS+t_fileIO);
printf("================================================\n");
}
void compileHistograms(long long* DDs, long long* DRs, long long* RRs, long long*** DD, long long*** DR,
long long*** RR, options *args) {
*DD = (long long**)malloc(((*args).njk+1)*sizeof(long long*));
*DR = (long long**)malloc(((*args).njk+1)*sizeof(long long*));
*RR = (long long**)malloc(1*sizeof(long long*));
for(int i=0; i<=(*args).njk; i++) {
(*DD)[i] = (long long*)malloc(NUMBINS*sizeof(long long));
(*DR)[i] = (long long*)malloc(NUMBINS*sizeof(long long));
memset((*DD)[i], 0, NUMBINS*sizeof(long long));
memset((*DR)[i], 0, NUMBINS*sizeof(long long));
}
(*RR)[0] = (long long*)malloc(NUMBINS*sizeof(long long));
memset((*RR)[0], 0, NUMBINS*sizeof(long long));
for(int i=0; i<NUMBINS; i++) {
for(int k=0; k<(*args).njk; k++) {
(*DD)[0][i] += DDs[NUMBINS*k + i];
}
for(int j=0; j<(*args).random_count; j++) {
for(int k=0; k<(*args).njk; k++) {
(*DR)[0][i] += DRs[(j*(*args).njk + k)*NUMBINS + i];
}
(*RR)[0][i] += RRs[j*NUMBINS + i];
}
}
for(int k=1; k<=(*args).njk; k++) {
for(int i=0; i<NUMBINS; i++) {
(*DD)[k][i] = (*DD)[0][i] - DDs[(k-1)*NUMBINS + i];
(*DR)[k][i] = (*DR)[0][i];
for(int j=0; j<(*args).random_count; j++) {
(*DR)[k][i] -= DRs[(j*(*args).njk + k - 1)*NUMBINS + i];
}
}
}
}
#endif
|
749bd2120802bee69fc16c9bfeaca534b28915b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <caffe2/core/context_gpu.h>
#include <caffe2/operator/mean_stdev_op.h>
namespace caffe2 {
namespace {
__global__ void MeanStdevKernel(const int N, const int C, const float D,
const float* X, float* M, float* S) {
CUDA_1D_KERNEL_LOOP(i, N) {
float sum = 0;
for (int j = i * C, e = j + C; j != e; j++) {
sum += X[j];
}
M[i] = sum / D;
float sumsq = 0;
for (int j = i * C, e = j + C; j != e; j++) {
float v = X[j] - M[i];
sumsq += v * v;
}
S[i] = sqrtf(sumsq / D);
}
}
} // namespace
template <>
bool MeanStdevOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* M = Output(0);
auto* S = Output(1);
M->Resize(X.dim(0));
S->Resize(X.dim(0));
if (X.size() > 0) {
auto size = X.size() / X.dim(0);
hipLaunchKernelGGL(( MeanStdevKernel), dim3(CAFFE_GET_BLOCKS(M->size())), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
M->size(), size, (float)size, X.data<float>(), M->mutable_data<float>(),
S->mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(MeanStdev, MeanStdevOp<float, CUDAContext>);
} // namespace caffe2
| 749bd2120802bee69fc16c9bfeaca534b28915b4.cu | #include <caffe2/core/context_gpu.h>
#include <caffe2/operator/mean_stdev_op.h>
namespace caffe2 {
namespace {
__global__ void MeanStdevKernel(const int N, const int C, const float D,
const float* X, float* M, float* S) {
CUDA_1D_KERNEL_LOOP(i, N) {
float sum = 0;
for (int j = i * C, e = j + C; j != e; j++) {
sum += X[j];
}
M[i] = sum / D;
float sumsq = 0;
for (int j = i * C, e = j + C; j != e; j++) {
float v = X[j] - M[i];
sumsq += v * v;
}
S[i] = sqrtf(sumsq / D);
}
}
} // namespace
template <>
bool MeanStdevOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* M = Output(0);
auto* S = Output(1);
M->Resize(X.dim(0));
S->Resize(X.dim(0));
if (X.size() > 0) {
auto size = X.size() / X.dim(0);
MeanStdevKernel<<<CAFFE_GET_BLOCKS(M->size()), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
M->size(), size, (float)size, X.data<float>(), M->mutable_data<float>(),
S->mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(MeanStdev, MeanStdevOp<float, CUDAContext>);
} // namespace caffe2
|
0a2440e029e5f48cb0f2f212ba9cb4fe4489cb50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* JCuda - Java bindings for NVIDIA CUDA driver and runtime API
* http://www.jcuda.org
*
* Copyright 2010 Marco Hutter - http://www.jcuda.org
*/
/**
* Kernels for the JCudaDriverTextureTest class. These
* kernels will read data via the texture references at
* the given positions, and store the value that is
* read into the given output memory.
*/
texture<float, 1, hipReadModeElementType> texture_float_1D;
texture<float, 2, hipReadModeElementType> texture_float_2D;
texture<float, 3, hipReadModeElementType> texture_float_3D;
texture<float4, 1, hipReadModeElementType> texture_float4_1D;
texture<float4, 2, hipReadModeElementType> texture_float4_2D;
texture<float4, 3, hipReadModeElementType> texture_float4_3D;
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
__global__ void test_float4_3D(float4 *output, float posX, float posY, float posZ)
{
float4 result = tex3D(texture_float4_3D, posX, posY, posZ);
output[0] = result;
} | 0a2440e029e5f48cb0f2f212ba9cb4fe4489cb50.cu | #include "includes.h"
/*
* JCuda - Java bindings for NVIDIA CUDA driver and runtime API
* http://www.jcuda.org
*
* Copyright 2010 Marco Hutter - http://www.jcuda.org
*/
/**
* Kernels for the JCudaDriverTextureTest class. These
* kernels will read data via the texture references at
* the given positions, and store the value that is
* read into the given output memory.
*/
texture<float, 1, cudaReadModeElementType> texture_float_1D;
texture<float, 2, cudaReadModeElementType> texture_float_2D;
texture<float, 3, cudaReadModeElementType> texture_float_3D;
texture<float4, 1, cudaReadModeElementType> texture_float4_1D;
texture<float4, 2, cudaReadModeElementType> texture_float4_2D;
texture<float4, 3, cudaReadModeElementType> texture_float4_3D;
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
__global__ void test_float4_3D(float4 *output, float posX, float posY, float posZ)
{
float4 result = tex3D(texture_float4_3D, posX, posY, posZ);
output[0] = result;
} |
0d207d48760f19e047bb7ec9a8c155d116d24669.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cnn/cuda.h"
#include "cnn/gpu-ops.h"
#include "cnn/gpu-kernels.h"
#include "cnn/functors.h"
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include "gpu-ops.cuh"
namespace cnn {
namespace gpu {
// this wraps kernel dispatches for various operations (preventing us from
// having to compile a version of nodes.cc with NVCC)
void saxpy_fast(cnn::real A, thrust::device_vector<cnn::real>& X, thrust::device_vector<cnn::real>& Y)
{
// Y <- A * X + Y
thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), saxpy_functor(A));
}
void set_to_value_of(int n, cnn::real* x0, cnn::real val)
{
thrust::device_ptr<cnn::real> dev_ptr = thrust::device_pointer_cast(x0);
thrust::fill(thrust::device, dev_ptr, dev_ptr + n, val);
}
void set_to_value_of(int n, cnn::real* x0, cnn::real *val) {
thrust::device_ptr<cnn::real> dev_ptr = thrust::device_pointer_cast(x0);
thrust::device_ptr<cnn::real> src_dev_ptr = thrust::device_pointer_cast(val);
thrust::copy(src_dev_ptr, src_dev_ptr + n, dev_ptr);
}
void vpairwise_rank_loss(int n, cnn::real margin, const cnn::real* xgood, const cnn::real* xbad, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( binaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, xgood, xbad, y, FPairwiseRankLoss(margin));
}
void vpairwise_rank_loss_backward(int n, bool d_wrt_correct, const cnn::real* fx, const cnn::real* dEdf, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
if (d_wrt_correct) {
hipLaunchKernelGGL(( accBinaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, fx, dEdf, dEdx, FRectifyNegateBackward());
} else {
hipLaunchKernelGGL(( accBinaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, fx, dEdf, dEdx, FRectifyBackward());
}
}
void vcwise_product(int n, const cnn::real* x0, const cnn::real* x1, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( binaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, x0, x1, y, FProduct());
}
void vcwise_product_backward(int n, const cnn::real* dEdy, const cnn::real* x_other, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( accBinaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, dEdy, x_other, dEdx, FProduct());
}
void vcwise_quotient(int n, const cnn::real* x0, const cnn::real* x1, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
binaryExprKernel << <tb.first, tb.second >> >(n, x0, x1, y, FQuotient());
}
void vcwise_quotient_backward(int n, const cnn::real* dEdy, const cnn::real* x_other, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel << <tb.first, tb.second >> >(n, dEdy, x_other, dEdx, FQuotient());
}
void vconstant_minusx(int n, cnn::real c, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel << <tb.first, tb.second >> >(n, x, y, FConstantMinus(c));
}
void vconstant_minusx_backward(int n, cnn::real c, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
accUnaryExprKernel << <tb.first, tb.second >> >(n, x, y, FConstantMinus(c));
}
void vconstant_multiplyx(int n, cnn::real c, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel << <tb.first, tb.second >> >(n, x, y, FConstantMultiply(c));
}
void vconstant_multiplyx_backward(int n, cnn::real c, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
accUnaryExprKernel << <tb.first, tb.second >> >(n, x, y, FConstantMultiply(c));
}
void vexp(int n, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel << <tb.first, tb.second >> >(n, x, y, FExp());
}
void vnegate(int n, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( unaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, x, y, FNegate());
}
void vnegate_backward(int n, const cnn::real* dEdf, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( accUnaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, dEdf, dEdx, FNegate());
}
void vrelu(int n, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( unaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, x, y, FRectify());
}
void vrelu_backward(int n, const cnn::real* fx, const cnn::real* dEdf, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( accBinaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, fx, dEdf, dEdx, FRectifyBackward());
}
void vexponential_linear_units(int n, const cnn::real* x, const cnn::real scale, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel << <tb.first, tb.second >> >(n, x, y, FExponentialLinearUnits(scale));
}
void vexponential_linear_units_backward(int n, const cnn::real* fx, const cnn::real* dEdf, const cnn::real scale, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel << <tb.first, tb.second >> >(n, fx, dEdf, dEdx, FExponentialLinearUnitsBackward(scale));
}
void vtanh(int n, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( unaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, x, y, FTanh());
}
void vtanh_backward(int n, const cnn::real* fx, const cnn::real* dEdf, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( accBinaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, fx, dEdf, dEdx, FTanhBackward());
}
void vlog(int n, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( unaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, x, y, FLog());
}
void vlog_backward(int n, const cnn::real* fx, const cnn::real* dEdf, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( accBinaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, fx, dEdf, dEdx, FLogBackward());
}
void vlogistic(int n, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( unaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, x, y, FLogisticSigmoid());
}
void vlogistic_backward(int n, const cnn::real* fx, const cnn::real* dEdf, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( accBinaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, fx, dEdf, dEdx, FLogisticSigmoidBackward());
}
void sqeucdist_backward(int n, const cnn::real* dEdy, const cnn::real* x0, const cnn::real* x1, cnn::real* dEdx, int i) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( accBinaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, x0, x1, dEdx, FEuclideanBackward(i, dEdy));
}
void sgd_update(int n, const cnn::real* g, cnn::real* x, cnn::real scale, cnn::real lambda) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel << <tb.first, tb.second >> >(n, x, g, x, FL2SGDUpdate(lambda, scale));
}
void sgd_update(int n, const cnn::real* g, cnn::real* x, cnn::real* scale, cnn::real* lambda) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel << <tb.first, tb.second >> >(n, x, g, x, FL2SGDUpdatePtrArguments (lambda, scale));
}
void sgd_momentum_update(int n, const cnn::real* g, cnn::real* x, cnn::real* v, cnn::real scale, cnn::real lambda, cnn::real momentum) {
auto tb = SizeToBlockThreadPair(n);
accTripletExprKernel << <tb.first, tb.second >> >(n, x, g, v, x, FL2SGDMomentumUpdate(lambda, scale, momentum));
}
void rmsprop_update(int n, const cnn::real* g, cnn::real* x, cnn::real *r, cnn::real scale, cnn::real lambda, cnn::real rho, cnn::real epsilon, cnn::real grd_squared_norm) {
auto tb = SizeToBlockThreadPair(n);
/// it may be more efficient to compute in cpu and not do reduce in gpu, but my observation is not
/// that case
*r = rho * (*r) + (1 - rho) * grd_squared_norm;
cnn::real den = sqrt(*r + epsilon);
accBinaryExprKernel << <tb.first, tb.second >> >(n, x, g, x, FL2SGDUpdate(lambda, scale / den));
//CUDA_CHECK(hipFree(sqnorm));
}
/** followed some examples of using thrust at
https://github.com/OrangeOwlSolutions/Thrust/blob/master/Calculating_the_norm_of_arrays.cu
*/
/// this is old code that computes gradient norm for every parameter
/*
void rmsprop_momentum_update(int n, const cnn::real* g, cnn::real* x, cnn::real* v, cnn::real *r, cnn::real scale, cnn::real lambda, cnn::real momentum, cnn::real rho, cnn::real epsilon) {
auto tb = SizeToBlockThreadPair(n);
/// it may be more efficient to compute in cpu and not do reduce in gpu, but my observation is not
/// that case
cnn::real squared_norm = thrust::transform_reduce(thrust::device_pointer_cast(g), thrust::device_pointer_cast(g + n), FSquare(), (cnn::real)0.0, thrust::plus<cnn::real>());
*r = rho * (*r) + (1 - rho) * squared_norm;
cnn::real den = sqrt(*r + epsilon);
accTripletExprKernel << <tb.first, tb.second >> >(n, x, g, v, x, FL2SGDMomentumUpdate(lambda, scale / den, momentum));
//CUDA_CHECK(hipFree(sqnorm));
}
*/
/// this is a newer code that uses gradient norms computed elsewhere.
/// potential speed-up can be achieved to compute all of gradient norms in GPU and then transfer them to
/// CPU in a bulk.
void rmsprop_momentum_update(int n, const cnn::real* g, cnn::real* x, cnn::real* v, cnn::real *r, cnn::real scale, cnn::real lambda, cnn::real momentum, cnn::real rho, cnn::real epsilon, cnn::real grd_squared_norm) {
auto tb = SizeToBlockThreadPair(n);
/// it may be more efficient to compute in cpu and not do reduce in gpu, but my observation is not
/// that case
*r = rho * (*r) + (1 - rho) * grd_squared_norm;
cnn::real den = sqrt(*r + epsilon);
accTripletExprKernel << <tb.first, tb.second >> >(n, x, g, v, x, FL2SGDMomentumUpdate(lambda, scale / den, momentum));
//CUDA_CHECK(hipFree(sqnorm));
}
void sqeucdist(int n, const cnn::real* x0, const cnn::real *x1, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( ker_sqeucdist), dim3(tb.first),dim3(tb.second), 0, 0, n, x0, x1, y);
}
void l2_norm_reducer(int n, const cnn::real* x0, cnn::real* y, bool square, bool accumulate) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( ker_l2_norm_reducer), dim3(tb.first),dim3(tb.second), 0, 0, n, x0, y, square, accumulate);
}
void sqrt_of_l2_norm_reducer(int n, cnn::real* x0, cnn::real& res)
{
thrust::device_ptr<cnn::real> dv_ptr = thrust::device_pointer_cast(x0);
FSquare unary_op;
thrust::plus<cnn::real> binary_op;
res = std::sqrt(thrust::transform_reduce(dv_ptr, dv_ptr + n, unary_op, 0.0, binary_op));
}
void vector_sum(int rows, int cols, const cnn::real * a, cnn::real* c, const bool isColWise)
{
assert(rows > 0 && cols > 0); // converting from size_t to int may cause overflow
int m = cols;
int n = rows;
hipEvent_t done = nullptr;
int blocksPerGrid = 0;
if (isColWise) // col-wise
{
blocksPerGrid = (int)ceil(1.0 * m / MAX_THREADS_PER_BLOCK);
}
else
{
blocksPerGrid = (int)ceil(1.0 * n / MAX_THREADS_PER_BLOCK);
}
hipEventCreate(&done);
_vector_sum<cnn::real> << <blocksPerGrid, MAX_THREADS_PER_BLOCK, 0, hipStreamDefault >> >(c, a, n, m, isColWise);
hipEventRecord(done);
hipEventSynchronize(done);
hipEventDestroy(done);
}
void vector_add_const(int rows, int cols, const cnn::real * a, int brow, int bcol, const cnn::real* b, cnn::real * c, bool isColWise)
{
assert(rows > 0 && cols > 0); // converting from size_t to int may cause overflow
int m = cols;
int n = rows;
if (brow != bcol && brow != 1)
cuda_exception("const dimension has to be a scalar");
hipEvent_t done = nullptr;
int blocksPerGrid = 0;
if (isColWise) // col-wise
{
blocksPerGrid = (int)ceil(1.0 * m / MAX_THREADS_PER_BLOCK);
}
else
{
blocksPerGrid = (int)ceil(1.0 * n / MAX_THREADS_PER_BLOCK);
}
hipEventCreate(&done);
_vector_add_const<cnn::real> << <blocksPerGrid, MAX_THREADS_PER_BLOCK, 0, hipStreamDefault >> >(c, a, n, m, b, isColWise);
hipEventRecord(done);
hipEventSynchronize(done);
hipEventDestroy(done);
}
/// assume that a is a vector with col dimension
void row_element_multiply_with(int arow, int acol, const cnn::real * a, int brow, int bcol, cnn::real * b)
{
if (arow != 1 || acol != bcol)
{
abort();
}
int N = brow;
int M = acol;
int blocksPerGrid = (int)ceil(1.0 * M / MAX_THREADS_PER_BLOCK);
hipEvent_t done = nullptr;
hipEventCreate(&done);
_rowElementMultiplyWith<cnn::real> << <blocksPerGrid, MAX_THREADS_PER_BLOCK >> >(b, a, N, M);
hipEventRecord(done);
hipEventSynchronize(done);
hipEventDestroy(done);
}
/**
logsoftmax opreations using cudnn
notice that cuNN uses rwo-major.
so the N here is col.
*/
void logsoftmax(int row, int col, const cnn::real* x0, cnn::real* y)
{
cudnnTensorDescriptor_t pInputDesc;
int n = col; int c = 1; int h = 1; int w = row;
cnn::real one = 1.0, zero = 0.0;
CHECK_CUDNN(cudnnCreateTensorDescriptor(&pInputDesc));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(pInputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, n, c, h, w));
CHECK_CUDNN(cudnnSoftmaxForward(cudnn_handle, CUDNN_SOFTMAX_LOG, CUDNN_SOFTMAX_MODE_INSTANCE,
&one, pInputDesc, x0,
&zero, pInputDesc, y));
CHECK_CUDNN(cudnnDestroyTensorDescriptor(pInputDesc));
/*
old code
hipStream_t t_stream = hipStreamDefault;
int N = col;
int M = row;
hipEvent_t done = nullptr;
hipEventCreate(&done);
/// TO-DO: The N is the number of columns and is also the number of blocks. For small N, it is fine. For very large N, it may slow down computation.
_assignColumnwiseLogSoftmaxOf<cnn::real> << <N, 512, 0, t_stream >> >(x0, y, N, M);
hipEventRecord(done);
hipEventSynchronize(done);
hipEventDestroy(done);
*/
}
void logsoftmax_backward(int row, int col, const cnn::real *fx, const cnn::real *dEdf, cnn::real *dEdx)
{
cudnnTensorDescriptor_t pInputDesc;
int n = col; int c = 1; int h = 1; int w = row;
cnn::real one = 1.0;
CHECK_CUDNN(cudnnCreateTensorDescriptor(&pInputDesc));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(pInputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, n, c, h, w));
CHECK_CUDNN(cudnnSoftmaxBackward(cudnn_handle, CUDNN_SOFTMAX_LOG, CUDNN_SOFTMAX_MODE_INSTANCE,
&one, pInputDesc, fx, pInputDesc, dEdf,
&one, pInputDesc, dEdx));
CHECK_CUDNN(cudnnDestroyTensorDescriptor(pInputDesc));
/*
old code
vexp(row * col, fx, gpu_softmax);
vector_sum(row, col, dEdf, grd, true);
row_element_multiply_with(1, col, grd, row, col, gpu_softmax);
auto tb = SizeToBlockThreadPair(col * row);
accBinaryExprKernel << <tb.first, tb.second >> >(col * row, dEdf, gpu_softmax, dEdx, FSubtract());
*/
}
/**
softmax opreations using cudnn
notice that cuNN uses rwo-major.
so the N here is col.
*/
void softmax(int row, int col, const cnn::real* x0, cnn::real* y)
{
cudnnTensorDescriptor_t pInputDesc;
int n = col; int c = 1; int h = 1; int w = row;
cnn::real one = 1.0, zero = 0.0;
CHECK_CUDNN(cudnnCreateTensorDescriptor(&pInputDesc));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(pInputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, n, c, h, w));
CHECK_CUDNN(cudnnSoftmaxForward(cudnn_handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE,
&one, pInputDesc, x0,
&zero, pInputDesc, y));
CHECK_CUDNN(cudnnDestroyTensorDescriptor(pInputDesc));
}
void softmax_backward(int row, int col, const cnn::real *fx, const cnn::real *dEdf, cnn::real *dEdx)
{
cudnnTensorDescriptor_t pInputDesc;
int n = col; int c = 1; int h = 1; int w = row;
cnn::real one = 1.0;
CHECK_CUDNN(cudnnCreateTensorDescriptor(&pInputDesc));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(pInputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, n, c, h, w));
CHECK_CUDNN(cudnnSoftmaxBackward(cudnn_handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE,
&one, pInputDesc, fx, pInputDesc, dEdf,
&one, pInputDesc, dEdx));
CHECK_CUDNN(cudnnDestroyTensorDescriptor(pInputDesc));
}
/*
old implementation
void softmax(int row, int col, const cnn::real* x0, cnn::real* y)
{
hipStream_t t_stream = hipStreamDefault;
int N = col;
int M = row;
hipEvent_t done = nullptr;
hipEventCreate(&done);
_assignColumnwiseSoftmaxOf<cnn::real> << <N, MAX_THREADS_PER_BLOCK, 0, t_stream >> >(x0, y, N, M);
hipEventRecord(done);
hipEventSynchronize(done);
hipEventDestroy(done);
}
///
/// see http://research.microsoft.com/pubs/226641/CNTKBook-20160217..pdf
/// input gradient += (\frac{\partial J}{\partial v_{ij}} - \sum_r \frac{\partial J}{\partial v_{rj} v_{rj}) v_{ij}
void softmax_backward(int row, int col, const cnn::real *fx, const cnn::real *dEdf, cnn::real *dEdx)
{
hipStream_t t_stream = hipStreamDefault;
hipEvent_t done = nullptr;
hipEventCreate(&done);
_assignColumnwiseSoftmaxOfBackward<cnn::real> << <col, MAX_THREADS_PER_BLOCK, 0, t_stream >> >(fx, dEdf, dEdx, col, row);
hipEventRecord(done);
hipEventSynchronize(done);
hipEventDestroy(done);
}
*/
// adapted from NVIDIA example
__global__ void ker_pnlsoftmax(int n, int elem_idx, const cnn::real *x0, cnn::real* res, cnn::real* logz) {
__shared__ cnn::real buf[256];
for (int i = threadIdx.x; i < 256; i += blockDim.x) {
cnn::real me = __int_as_float(0xff800000);
for (int pos = i; pos < n; pos += 256) {
const cnn::real d = x0[pos];
me = d > me ? d : me;
}
buf[i] = me;
}
for (int stride = 128; stride > 0; stride >>= 1) {
__syncthreads();
for (int i = threadIdx.x; i < stride; i += blockDim.x)
buf[i] = buf[i] > buf[stride + i] ? buf[i] : buf[stride + i];
}
__syncthreads();
const cnn::real max_elem = buf[0];
for (int i = threadIdx.x; i < 256; i += blockDim.x) {
cnn::real sum = 0;
for (int pos = i; pos < n; pos += 256)
sum += expf(x0[pos] - max_elem);
buf[i] = sum;
}
for (int stride = 128; stride > 0; stride >>= 1) {
__syncthreads();
for (int i = threadIdx.x; i < stride; i += blockDim.x)
buf[i] += buf[stride + i];
}
__syncthreads();
if (threadIdx.x == 0) {
cnn::real lz = log(buf[0]) + max_elem;
logz[0] = lz;
res[0] = lz - x0[elem_idx];
}
}
void pnlsoftmax(int n, int elem_idx, const cnn::real* x0, cnn::real* y, cnn::real* logz) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( ker_pnlsoftmax), dim3(tb.first),dim3(tb.second), 0, 0, n, elem_idx, x0, y, logz);
}
__global__ void fixup_pnl(const cnn::real* dEdf, cnn::real* dEdxi, int i) {
if (threadIdx.x == 0) dEdxi[i] -= dEdf[0];
}
void pnlsoftmax_backward(int n, int elem_idx, const cnn::real* x0, const cnn::real* dEdf, const cnn::real* logz, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
hipLaunchKernelGGL(( accUnaryExprKernel), dim3(tb.first), dim3(tb.second), 0, 0, n, x0, dEdx, FPtrNegLogSoftmaxBackward(logz, dEdf));
hipLaunchKernelGGL(( fixup_pnl), dim3(1),dim3(1), 0, 0, dEdf, dEdx, elem_idx);
}
/**
conv1dnarrow using cuDNN, which is faster. however, cudnn is row-major as [n,c,h,w].
we always assume column-major,
to accomodate to cudnn, n,c,h,w are interprated as
[ncols, 1, nrows, 1]
can only do 1d convolution for each column
# CUDNN/Caffe sizes for various arrays in column-major notation:
conv x: (N,C,H,W): W,H=image size, C=channels, N=instances
conv w: (K,C,Y,X): X,Y=filter size, C=input channels, K=output channels
conv y: (N,K,H-Y+1,W-X+1)
conv b: (1,K,1,1)
*/
void conv2dnarrow(const cnn::real* kscalar_one, const cnn::real* kscalar_zero,
const int xrow, const int xcol, const cnn::real* xs,
const int i_wkspace_sz, cnn::real* wkspace,
const int frow, const int fcol, const cnn::real *fx,
const int yrow, const int ycol, cnn::real *fy)
{
/*
cudnnTensorDescriptor_t pInputDesc;
cudnnTensorDescriptor_t pOutputDesc;
cudnnFilterDescriptor_t pFilterDesc = nullptr;
cudnnConvolutionDescriptor_t pConvDesc = nullptr;
int n = 1; int c = 1; int h = xcol; int w = xrow;
int k_pFilter_in = 1; /// number of output feature maps
int c_pFilter_in = 1; /// number of input feature maps
int h_pFilter_in = fcol;
int w_pFilter_in = frow;
int n_out, c_out, h_out, w_out;
CHECK_CUDNN(cudnnCreateTensorDescriptor(&pInputDesc));
CHECK_CUDNN(cudnnCreateTensorDescriptor(&pOutputDesc));
CHECK_CUDNN(cudnnCreateFilterDescriptor(&pFilterDesc));
CHECK_CUDNN(cudnnCreateConvolutionDescriptor(&pConvDesc));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(pInputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, n, c, h, w));
CHECK_CUDNN(cudnnSetFilter4dDescriptor(pFilterDesc, cudnnDataType, k_pFilter_in, c_pFilter_in, h_pFilter_in, w_pFilter_in));
CHECK_CUDNN(cudnnSetConvolution2dDescriptor(pConvDesc, 0, 0, 1, 1, 1, 1, CUDNN_CONVOLUTION));
/// get the output layout
CHECK_CUDNN(cudnnGetConvolution2dForwardOutputDim(pConvDesc, pInputDesc, pFilterDesc, &n_out, &c_out, &h_out, &w_out));
assert(n_out * c_out * h_out * w_out == yrow * ycol);
CHECK_CUDNN(cudnnSetTensor4dDescriptor(pOutputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, n_out, c_out, h_out, w_out));
size_t sz_wkspace;
bool bNeedAllocateNewSpace = false;
cnn::real *tmp_work_space;
CHECK_CUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle, pInputDesc, pFilterDesc, pConvDesc, pOutputDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM, &sz_wkspace));
if (sz_wkspace < i_wkspace_sz)
{
tmp_work_space = wkspace;
}
else{
bNeedAllocateNewSpace = true;
CUDA_CHECK(hipMalloc(&tmp_work_space, sz_wkspace));
}
CHECK_CUDNN(cudnnConvolutionForward(cudnn_handle, kscalar_one, pInputDesc, xs, pFilterDesc, fx,
pConvDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM, tmp_work_space, sz_wkspace, kscalar_zero, pOutputDesc, fy));
CHECK_CUDNN(cudnnDestroyTensorDescriptor(pInputDesc));
CHECK_CUDNN(cudnnDestroyTensorDescriptor(pOutputDesc));
CHECK_CUDNN(cudnnDestroyFilterDescriptor(pFilterDesc));
CHECK_CUDNN(cudnnDestroyConvolutionDescriptor(pConvDesc));
if (bNeedAllocateNewSpace)
CUDA_CHECK(hipFree(tmp_work_space));
*/
}
void conv1dwide(const int n, const int m, const cnn::real* xs,
const int k, const cnn::real *fx, cnn::real *fy)
{
thrust::device_vector<cnn::real> dv((m + k) * n, 0.0);
thrust::device_ptr<cnn::real> vp = dv.data();
thrust::device_ptr<cnn::real> fp((cnn::real*)fx);
thrust::device_ptr<cnn::real> xp((cnn::real*)xs);
thrust::device_ptr<cnn::real> yp(fy);
for (size_t tk = 0; tk < k; tk++)
{
for (size_t j = 0; j < m; j++)
thrust::transform(xp + j * n, xp + (j + 1) * n, fp + tk * n, vp + tk * n + j * n, thrust::multiplies<cnn::real>());
}
thrust::copy(vp, vp + (m + k) * n, thrust::device_pointer_cast(fy));
}
void conv1dwide_backward(const int i, const int n, const int m, const cnn::real* xs, const int k, const cnn::real *fx, const cnn::real* dEdf, cnn::real *dEdx)
{
thrust::device_vector<cnn::real> dv(m * n, 0.0);
thrust::device_ptr<cnn::real> vp = dv.data();
thrust::device_ptr<cnn::real> fp((cnn::real*)fx);
thrust::device_ptr<cnn::real> xp((cnn::real*)xs);
thrust::device_ptr<cnn::real> d((cnn::real*)dEdf);
thrust::device_ptr<cnn::real> yp(dEdx);
for (size_t tk = 0; tk < k; tk++)
{
if (i == 0) { // derivative wrt input x
for (size_t j = 0; j < m; j++)
thrust::transform(d + j * n + tk*n, d + (j + 1) * n + tk*n, fp + tk * n, dv.data() + j * n, thrust::multiplies<cnn::real>());
}
else { // derivative wrt filter f
for (size_t j = 0; j < m; j++)
thrust::transform(d + j * n + tk*n, d + (j + 1) * n + tk*n, xp + j * n, dv.data() + tk * n, thrust::multiplies<cnn::real>());
}
}
if (i == 0)
thrust::transform(dv.data(), dv.data() + m * n, yp, yp, thrust::plus<cnn::real>());
else
thrust::transform(dv.data(), dv.data() + k * n, yp, yp, thrust::plus<cnn::real>());
}
void addVectorToAllColumns(const int n, const cnn::real * xs, const int m, const cnn::real* fx, cnn::real *fy)
{
thrust::device_ptr<cnn::real> fp((cnn::real*)fx);
thrust::device_ptr<cnn::real> xp((cnn::real*)xs);
thrust::device_ptr<cnn::real> yp(fy);
for (size_t j = 0; j < n / m; j++)
thrust::transform(xp + j * m, xp + (j + 1) * m, fp, yp + j * m, thrust::plus<cnn::real>());
}
void addVectorToAllColumns_backward(const int i, const int r, const int c, const cnn::real* dEdf, cnn::real *dEdxi)
{
thrust::device_ptr<const cnn::real> dp(dEdf);
thrust::device_ptr<cnn::real> dx(dEdxi);
if (i == 0)
{
// x
thrust::transform(dp, dp + r * c, dx, dx, thrust::plus<cnn::real>());
}
else
{
// bias
for (int k = 0; k < c; k++)
thrust::transform(dp + k * r, dp + (k + 1)*r, dx, dx, thrust::plus<cnn::real>());
}
}
/**
stride : the jump step
*/
void foldRows(const int n, const int m, const cnn::real *xs, const int stride, const int orows, cnn::real *fy)
{
thrust::device_ptr<cnn::real> xp((cnn::real*)xs), pp;
thrust::device_ptr<cnn::real> yp(fy);
thrust::host_vector<cnn::real> vo(orows * m);
pp = xp;
for (size_t j = 0; j < m; j++)
{
for (size_t r = 0; r < orows; r++)
{
vo[j * orows + r] = thrust::reduce(pp, pp + stride);
pp += stride;
}
}
}
void foldRows_backward(const int orows, const cnn::real* dEdf, const int n, const int m, cnn::real *fy)
{
thrust::device_ptr<cnn::real> dp((cnn::real*)dEdf);
thrust::device_ptr<cnn::real> yp(fy);
for (int i = 0; i < orows; ++i)
{
int stride = n / orows;
for (int j = 0; j < m; j++)
{ // loop over columns
for (int k = 0; k < stride; k++)
{
*(yp + i * stride + k + j * n) += *(dp + i + j * n);
}
}
}
}
void kMaxPooling(const int n, const int m, const cnn::real *xs, const int k, cnn::real *fy, int* aux_mem)
{
thrust::device_ptr<cnn::real> xp((cnn::real*)xs), pp;
thrust::device_ptr<cnn::real> yp(fy);
thrust::device_vector<cnn::real> vo(m);
thrust::device_vector<cnn::real> vp(k);
pp = xp;
int* maxmap = static_cast<int*>(aux_mem);
size_t mi = 0;
for (unsigned i = 0; i < n; ++i) {
for (size_t j = 0; j < m; j++)
vo[j] = (*(pp + i + j * n));
thrust::sort(thrust::device, vo.data(), vo.data() + m);
size_t mk = 0;
for (int j = 0; j < m; j++)
{
if (mk == k)
break;
if (*(pp + i + j * n) >= vo[m - k])
{
*(yp + i + mk * n) = *(pp + i + j*n);
hipMemcpy(&maxmap[mi], &j, sizeof(int), hipMemcpyHostToDevice);
mi++;
mk++;
}
}
}
}
void kMaxPooling_backward(const int n, const int m, const cnn::real *xs, const int k, const cnn::real * dEdf, cnn::real *dEdxi, const int* aux_mem)
{
const int* maxmap = aux_mem;
int mk = 0;
int oj;
thrust::device_ptr<const cnn::real> xp(xs);
thrust::device_ptr<const cnn::real> dp(dEdf);
thrust::device_ptr<cnn::real> yp(dEdxi);
thrust::host_vector<int> hv(n, 0);
hipMemcpy(hv.data(), maxmap, sizeof(int)*n, hipMemcpyDeviceToHost);
for (unsigned i = 0; i < n; ++i) {
for (unsigned j = 0; j < k; ++j) {
oj = hv[mk++];
if (oj < k && oj >= 0){
thrust::transform(dp + i + j * n, dp + i + j * n + 1, yp + i + oj * n, yp + i + oj * n, thrust::plus<cnn::real>());
}
}
}
}
} // namespace gpu
} // namespace cnn
| 0d207d48760f19e047bb7ec9a8c155d116d24669.cu | #include "cnn/cuda.h"
#include "cnn/gpu-ops.h"
#include "cnn/gpu-kernels.h"
#include "cnn/functors.h"
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include "gpu-ops.cuh"
namespace cnn {
namespace gpu {
// this wraps kernel dispatches for various operations (preventing us from
// having to compile a version of nodes.cc with NVCC)
void saxpy_fast(cnn::real A, thrust::device_vector<cnn::real>& X, thrust::device_vector<cnn::real>& Y)
{
// Y <- A * X + Y
thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), saxpy_functor(A));
}
void set_to_value_of(int n, cnn::real* x0, cnn::real val)
{
thrust::device_ptr<cnn::real> dev_ptr = thrust::device_pointer_cast(x0);
thrust::fill(thrust::device, dev_ptr, dev_ptr + n, val);
}
void set_to_value_of(int n, cnn::real* x0, cnn::real *val) {
thrust::device_ptr<cnn::real> dev_ptr = thrust::device_pointer_cast(x0);
thrust::device_ptr<cnn::real> src_dev_ptr = thrust::device_pointer_cast(val);
thrust::copy(src_dev_ptr, src_dev_ptr + n, dev_ptr);
}
void vpairwise_rank_loss(int n, cnn::real margin, const cnn::real* xgood, const cnn::real* xbad, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
binaryExprKernel<<<tb.first, tb.second>>>(n, xgood, xbad, y, FPairwiseRankLoss(margin));
}
void vpairwise_rank_loss_backward(int n, bool d_wrt_correct, const cnn::real* fx, const cnn::real* dEdf, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
if (d_wrt_correct) {
accBinaryExprKernel<<<tb.first, tb.second>>>(n, fx, dEdf, dEdx, FRectifyNegateBackward());
} else {
accBinaryExprKernel<<<tb.first, tb.second>>>(n, fx, dEdf, dEdx, FRectifyBackward());
}
}
void vcwise_product(int n, const cnn::real* x0, const cnn::real* x1, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
binaryExprKernel<<<tb.first, tb.second>>>(n, x0, x1, y, FProduct());
}
void vcwise_product_backward(int n, const cnn::real* dEdy, const cnn::real* x_other, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel<<<tb.first, tb.second>>>(n, dEdy, x_other, dEdx, FProduct());
}
void vcwise_quotient(int n, const cnn::real* x0, const cnn::real* x1, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
binaryExprKernel << <tb.first, tb.second >> >(n, x0, x1, y, FQuotient());
}
void vcwise_quotient_backward(int n, const cnn::real* dEdy, const cnn::real* x_other, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel << <tb.first, tb.second >> >(n, dEdy, x_other, dEdx, FQuotient());
}
void vconstant_minusx(int n, cnn::real c, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel << <tb.first, tb.second >> >(n, x, y, FConstantMinus(c));
}
void vconstant_minusx_backward(int n, cnn::real c, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
accUnaryExprKernel << <tb.first, tb.second >> >(n, x, y, FConstantMinus(c));
}
void vconstant_multiplyx(int n, cnn::real c, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel << <tb.first, tb.second >> >(n, x, y, FConstantMultiply(c));
}
void vconstant_multiplyx_backward(int n, cnn::real c, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
accUnaryExprKernel << <tb.first, tb.second >> >(n, x, y, FConstantMultiply(c));
}
void vexp(int n, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel << <tb.first, tb.second >> >(n, x, y, FExp());
}
void vnegate(int n, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel<<<tb.first, tb.second>>>(n, x, y, FNegate());
}
void vnegate_backward(int n, const cnn::real* dEdf, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
accUnaryExprKernel<<<tb.first, tb.second>>>(n, dEdf, dEdx, FNegate());
}
void vrelu(int n, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel<<<tb.first, tb.second>>>(n, x, y, FRectify());
}
void vrelu_backward(int n, const cnn::real* fx, const cnn::real* dEdf, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel<<<tb.first, tb.second>>>(n, fx, dEdf, dEdx, FRectifyBackward());
}
void vexponential_linear_units(int n, const cnn::real* x, const cnn::real scale, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel << <tb.first, tb.second >> >(n, x, y, FExponentialLinearUnits(scale));
}
void vexponential_linear_units_backward(int n, const cnn::real* fx, const cnn::real* dEdf, const cnn::real scale, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel << <tb.first, tb.second >> >(n, fx, dEdf, dEdx, FExponentialLinearUnitsBackward(scale));
}
void vtanh(int n, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel<<<tb.first, tb.second>>>(n, x, y, FTanh());
}
void vtanh_backward(int n, const cnn::real* fx, const cnn::real* dEdf, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel<<<tb.first, tb.second>>>(n, fx, dEdf, dEdx, FTanhBackward());
}
void vlog(int n, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel<<<tb.first, tb.second>>>(n, x, y, FLog());
}
void vlog_backward(int n, const cnn::real* fx, const cnn::real* dEdf, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel<<<tb.first, tb.second>>>(n, fx, dEdf, dEdx, FLogBackward());
}
void vlogistic(int n, const cnn::real* x, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
unaryExprKernel<<<tb.first, tb.second>>>(n, x, y, FLogisticSigmoid());
}
void vlogistic_backward(int n, const cnn::real* fx, const cnn::real* dEdf, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel<<<tb.first, tb.second>>>(n, fx, dEdf, dEdx, FLogisticSigmoidBackward());
}
void sqeucdist_backward(int n, const cnn::real* dEdy, const cnn::real* x0, const cnn::real* x1, cnn::real* dEdx, int i) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel<<<tb.first, tb.second>>>(n, x0, x1, dEdx, FEuclideanBackward(i, dEdy));
}
void sgd_update(int n, const cnn::real* g, cnn::real* x, cnn::real scale, cnn::real lambda) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel << <tb.first, tb.second >> >(n, x, g, x, FL2SGDUpdate(lambda, scale));
}
void sgd_update(int n, const cnn::real* g, cnn::real* x, cnn::real* scale, cnn::real* lambda) {
auto tb = SizeToBlockThreadPair(n);
accBinaryExprKernel << <tb.first, tb.second >> >(n, x, g, x, FL2SGDUpdatePtrArguments (lambda, scale));
}
void sgd_momentum_update(int n, const cnn::real* g, cnn::real* x, cnn::real* v, cnn::real scale, cnn::real lambda, cnn::real momentum) {
auto tb = SizeToBlockThreadPair(n);
accTripletExprKernel << <tb.first, tb.second >> >(n, x, g, v, x, FL2SGDMomentumUpdate(lambda, scale, momentum));
}
void rmsprop_update(int n, const cnn::real* g, cnn::real* x, cnn::real *r, cnn::real scale, cnn::real lambda, cnn::real rho, cnn::real epsilon, cnn::real grd_squared_norm) {
auto tb = SizeToBlockThreadPair(n);
/// it may be more efficient to compute in cpu and not do reduce in gpu, but my observation is not
/// that case
*r = rho * (*r) + (1 - rho) * grd_squared_norm;
cnn::real den = sqrt(*r + epsilon);
accBinaryExprKernel << <tb.first, tb.second >> >(n, x, g, x, FL2SGDUpdate(lambda, scale / den));
//CUDA_CHECK(cudaFree(sqnorm));
}
/** followed some examples of using thrust at
https://github.com/OrangeOwlSolutions/Thrust/blob/master/Calculating_the_norm_of_arrays.cu
*/
/// this is old code that computes gradient norm for every parameter
/*
void rmsprop_momentum_update(int n, const cnn::real* g, cnn::real* x, cnn::real* v, cnn::real *r, cnn::real scale, cnn::real lambda, cnn::real momentum, cnn::real rho, cnn::real epsilon) {
auto tb = SizeToBlockThreadPair(n);
/// it may be more efficient to compute in cpu and not do reduce in gpu, but my observation is not
/// that case
cnn::real squared_norm = thrust::transform_reduce(thrust::device_pointer_cast(g), thrust::device_pointer_cast(g + n), FSquare(), (cnn::real)0.0, thrust::plus<cnn::real>());
*r = rho * (*r) + (1 - rho) * squared_norm;
cnn::real den = sqrt(*r + epsilon);
accTripletExprKernel << <tb.first, tb.second >> >(n, x, g, v, x, FL2SGDMomentumUpdate(lambda, scale / den, momentum));
//CUDA_CHECK(cudaFree(sqnorm));
}
*/
/// this is a newer code that uses gradient norms computed elsewhere.
/// potential speed-up can be achieved to compute all of gradient norms in GPU and then transfer them to
/// CPU in a bulk.
void rmsprop_momentum_update(int n, const cnn::real* g, cnn::real* x, cnn::real* v, cnn::real *r, cnn::real scale, cnn::real lambda, cnn::real momentum, cnn::real rho, cnn::real epsilon, cnn::real grd_squared_norm) {
auto tb = SizeToBlockThreadPair(n);
/// it may be more efficient to compute in cpu and not do reduce in gpu, but my observation is not
/// that case
*r = rho * (*r) + (1 - rho) * grd_squared_norm;
cnn::real den = sqrt(*r + epsilon);
accTripletExprKernel << <tb.first, tb.second >> >(n, x, g, v, x, FL2SGDMomentumUpdate(lambda, scale / den, momentum));
//CUDA_CHECK(cudaFree(sqnorm));
}
void sqeucdist(int n, const cnn::real* x0, const cnn::real *x1, cnn::real* y) {
auto tb = SizeToBlockThreadPair(n);
ker_sqeucdist<<<tb.first,tb.second>>>(n, x0, x1, y);
}
void l2_norm_reducer(int n, const cnn::real* x0, cnn::real* y, bool square, bool accumulate) {
auto tb = SizeToBlockThreadPair(n);
ker_l2_norm_reducer<<<tb.first,tb.second>>>(n, x0, y, square, accumulate);
}
void sqrt_of_l2_norm_reducer(int n, cnn::real* x0, cnn::real& res)
{
thrust::device_ptr<cnn::real> dv_ptr = thrust::device_pointer_cast(x0);
FSquare unary_op;
thrust::plus<cnn::real> binary_op;
res = std::sqrt(thrust::transform_reduce(dv_ptr, dv_ptr + n, unary_op, 0.0, binary_op));
}
void vector_sum(int rows, int cols, const cnn::real * a, cnn::real* c, const bool isColWise)
{
assert(rows > 0 && cols > 0); // converting from size_t to int may cause overflow
int m = cols;
int n = rows;
cudaEvent_t done = nullptr;
int blocksPerGrid = 0;
if (isColWise) // col-wise
{
blocksPerGrid = (int)ceil(1.0 * m / MAX_THREADS_PER_BLOCK);
}
else
{
blocksPerGrid = (int)ceil(1.0 * n / MAX_THREADS_PER_BLOCK);
}
cudaEventCreate(&done);
_vector_sum<cnn::real> << <blocksPerGrid, MAX_THREADS_PER_BLOCK, 0, cudaStreamDefault >> >(c, a, n, m, isColWise);
cudaEventRecord(done);
cudaEventSynchronize(done);
cudaEventDestroy(done);
}
void vector_add_const(int rows, int cols, const cnn::real * a, int brow, int bcol, const cnn::real* b, cnn::real * c, bool isColWise)
{
assert(rows > 0 && cols > 0); // converting from size_t to int may cause overflow
int m = cols;
int n = rows;
if (brow != bcol && brow != 1)
cuda_exception("const dimension has to be a scalar");
cudaEvent_t done = nullptr;
int blocksPerGrid = 0;
if (isColWise) // col-wise
{
blocksPerGrid = (int)ceil(1.0 * m / MAX_THREADS_PER_BLOCK);
}
else
{
blocksPerGrid = (int)ceil(1.0 * n / MAX_THREADS_PER_BLOCK);
}
cudaEventCreate(&done);
_vector_add_const<cnn::real> << <blocksPerGrid, MAX_THREADS_PER_BLOCK, 0, cudaStreamDefault >> >(c, a, n, m, b, isColWise);
cudaEventRecord(done);
cudaEventSynchronize(done);
cudaEventDestroy(done);
}
/// assume that a is a vector with col dimension
void row_element_multiply_with(int arow, int acol, const cnn::real * a, int brow, int bcol, cnn::real * b)
{
if (arow != 1 || acol != bcol)
{
abort();
}
int N = brow;
int M = acol;
int blocksPerGrid = (int)ceil(1.0 * M / MAX_THREADS_PER_BLOCK);
cudaEvent_t done = nullptr;
cudaEventCreate(&done);
_rowElementMultiplyWith<cnn::real> << <blocksPerGrid, MAX_THREADS_PER_BLOCK >> >(b, a, N, M);
cudaEventRecord(done);
cudaEventSynchronize(done);
cudaEventDestroy(done);
}
/**
logsoftmax opreations using cudnn
notice that cuNN uses rwo-major.
so the N here is col.
*/
void logsoftmax(int row, int col, const cnn::real* x0, cnn::real* y)
{
cudnnTensorDescriptor_t pInputDesc;
int n = col; int c = 1; int h = 1; int w = row;
cnn::real one = 1.0, zero = 0.0;
CHECK_CUDNN(cudnnCreateTensorDescriptor(&pInputDesc));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(pInputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, n, c, h, w));
CHECK_CUDNN(cudnnSoftmaxForward(cudnn_handle, CUDNN_SOFTMAX_LOG, CUDNN_SOFTMAX_MODE_INSTANCE,
&one, pInputDesc, x0,
&zero, pInputDesc, y));
CHECK_CUDNN(cudnnDestroyTensorDescriptor(pInputDesc));
/*
old code
cudaStream_t t_stream = cudaStreamDefault;
int N = col;
int M = row;
cudaEvent_t done = nullptr;
cudaEventCreate(&done);
/// TO-DO: The N is the number of columns and is also the number of blocks. For small N, it is fine. For very large N, it may slow down computation.
_assignColumnwiseLogSoftmaxOf<cnn::real> << <N, 512, 0, t_stream >> >(x0, y, N, M);
cudaEventRecord(done);
cudaEventSynchronize(done);
cudaEventDestroy(done);
*/
}
void logsoftmax_backward(int row, int col, const cnn::real *fx, const cnn::real *dEdf, cnn::real *dEdx)
{
cudnnTensorDescriptor_t pInputDesc;
int n = col; int c = 1; int h = 1; int w = row;
cnn::real one = 1.0;
CHECK_CUDNN(cudnnCreateTensorDescriptor(&pInputDesc));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(pInputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, n, c, h, w));
CHECK_CUDNN(cudnnSoftmaxBackward(cudnn_handle, CUDNN_SOFTMAX_LOG, CUDNN_SOFTMAX_MODE_INSTANCE,
&one, pInputDesc, fx, pInputDesc, dEdf,
&one, pInputDesc, dEdx));
CHECK_CUDNN(cudnnDestroyTensorDescriptor(pInputDesc));
/*
old code
vexp(row * col, fx, gpu_softmax);
vector_sum(row, col, dEdf, grd, true);
row_element_multiply_with(1, col, grd, row, col, gpu_softmax);
auto tb = SizeToBlockThreadPair(col * row);
accBinaryExprKernel << <tb.first, tb.second >> >(col * row, dEdf, gpu_softmax, dEdx, FSubtract());
*/
}
/**
softmax opreations using cudnn
notice that cuNN uses rwo-major.
so the N here is col.
*/
void softmax(int row, int col, const cnn::real* x0, cnn::real* y)
{
cudnnTensorDescriptor_t pInputDesc;
int n = col; int c = 1; int h = 1; int w = row;
cnn::real one = 1.0, zero = 0.0;
CHECK_CUDNN(cudnnCreateTensorDescriptor(&pInputDesc));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(pInputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, n, c, h, w));
CHECK_CUDNN(cudnnSoftmaxForward(cudnn_handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE,
&one, pInputDesc, x0,
&zero, pInputDesc, y));
CHECK_CUDNN(cudnnDestroyTensorDescriptor(pInputDesc));
}
void softmax_backward(int row, int col, const cnn::real *fx, const cnn::real *dEdf, cnn::real *dEdx)
{
cudnnTensorDescriptor_t pInputDesc;
int n = col; int c = 1; int h = 1; int w = row;
cnn::real one = 1.0;
CHECK_CUDNN(cudnnCreateTensorDescriptor(&pInputDesc));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(pInputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, n, c, h, w));
CHECK_CUDNN(cudnnSoftmaxBackward(cudnn_handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE,
&one, pInputDesc, fx, pInputDesc, dEdf,
&one, pInputDesc, dEdx));
CHECK_CUDNN(cudnnDestroyTensorDescriptor(pInputDesc));
}
/*
old implementation
void softmax(int row, int col, const cnn::real* x0, cnn::real* y)
{
cudaStream_t t_stream = cudaStreamDefault;
int N = col;
int M = row;
cudaEvent_t done = nullptr;
cudaEventCreate(&done);
_assignColumnwiseSoftmaxOf<cnn::real> << <N, MAX_THREADS_PER_BLOCK, 0, t_stream >> >(x0, y, N, M);
cudaEventRecord(done);
cudaEventSynchronize(done);
cudaEventDestroy(done);
}
///
/// see http://research.microsoft.com/pubs/226641/CNTKBook-20160217..pdf
/// input gradient += (\frac{\partial J}{\partial v_{ij}} - \sum_r \frac{\partial J}{\partial v_{rj} v_{rj}) v_{ij}
void softmax_backward(int row, int col, const cnn::real *fx, const cnn::real *dEdf, cnn::real *dEdx)
{
cudaStream_t t_stream = cudaStreamDefault;
cudaEvent_t done = nullptr;
cudaEventCreate(&done);
_assignColumnwiseSoftmaxOfBackward<cnn::real> << <col, MAX_THREADS_PER_BLOCK, 0, t_stream >> >(fx, dEdf, dEdx, col, row);
cudaEventRecord(done);
cudaEventSynchronize(done);
cudaEventDestroy(done);
}
*/
// adapted from NVIDIA example
__global__ void ker_pnlsoftmax(int n, int elem_idx, const cnn::real *x0, cnn::real* res, cnn::real* logz) {
__shared__ cnn::real buf[256];
for (int i = threadIdx.x; i < 256; i += blockDim.x) {
cnn::real me = __int_as_float(0xff800000);
for (int pos = i; pos < n; pos += 256) {
const cnn::real d = x0[pos];
me = d > me ? d : me;
}
buf[i] = me;
}
for (int stride = 128; stride > 0; stride >>= 1) {
__syncthreads();
for (int i = threadIdx.x; i < stride; i += blockDim.x)
buf[i] = buf[i] > buf[stride + i] ? buf[i] : buf[stride + i];
}
__syncthreads();
const cnn::real max_elem = buf[0];
for (int i = threadIdx.x; i < 256; i += blockDim.x) {
cnn::real sum = 0;
for (int pos = i; pos < n; pos += 256)
sum += expf(x0[pos] - max_elem);
buf[i] = sum;
}
for (int stride = 128; stride > 0; stride >>= 1) {
__syncthreads();
for (int i = threadIdx.x; i < stride; i += blockDim.x)
buf[i] += buf[stride + i];
}
__syncthreads();
if (threadIdx.x == 0) {
cnn::real lz = log(buf[0]) + max_elem;
logz[0] = lz;
res[0] = lz - x0[elem_idx];
}
}
void pnlsoftmax(int n, int elem_idx, const cnn::real* x0, cnn::real* y, cnn::real* logz) {
auto tb = SizeToBlockThreadPair(n);
ker_pnlsoftmax<<<tb.first,tb.second>>>(n, elem_idx, x0, y, logz);
}
__global__ void fixup_pnl(const cnn::real* dEdf, cnn::real* dEdxi, int i) {
if (threadIdx.x == 0) dEdxi[i] -= dEdf[0];
}
void pnlsoftmax_backward(int n, int elem_idx, const cnn::real* x0, const cnn::real* dEdf, const cnn::real* logz, cnn::real* dEdx) {
auto tb = SizeToBlockThreadPair(n);
accUnaryExprKernel<<<tb.first, tb.second>>>(n, x0, dEdx, FPtrNegLogSoftmaxBackward(logz, dEdf));
fixup_pnl<<<1,1>>>(dEdf, dEdx, elem_idx);
}
/**
conv1dnarrow using cuDNN, which is faster. however, cudnn is row-major as [n,c,h,w].
we always assume column-major,
to accomodate to cudnn, n,c,h,w are interprated as
[ncols, 1, nrows, 1]
can only do 1d convolution for each column
# CUDNN/Caffe sizes for various arrays in column-major notation:
conv x: (N,C,H,W): W,H=image size, C=channels, N=instances
conv w: (K,C,Y,X): X,Y=filter size, C=input channels, K=output channels
conv y: (N,K,H-Y+1,W-X+1)
conv b: (1,K,1,1)
*/
void conv2dnarrow(const cnn::real* kscalar_one, const cnn::real* kscalar_zero,
const int xrow, const int xcol, const cnn::real* xs,
const int i_wkspace_sz, cnn::real* wkspace,
const int frow, const int fcol, const cnn::real *fx,
const int yrow, const int ycol, cnn::real *fy)
{
/*
cudnnTensorDescriptor_t pInputDesc;
cudnnTensorDescriptor_t pOutputDesc;
cudnnFilterDescriptor_t pFilterDesc = nullptr;
cudnnConvolutionDescriptor_t pConvDesc = nullptr;
int n = 1; int c = 1; int h = xcol; int w = xrow;
int k_pFilter_in = 1; /// number of output feature maps
int c_pFilter_in = 1; /// number of input feature maps
int h_pFilter_in = fcol;
int w_pFilter_in = frow;
int n_out, c_out, h_out, w_out;
CHECK_CUDNN(cudnnCreateTensorDescriptor(&pInputDesc));
CHECK_CUDNN(cudnnCreateTensorDescriptor(&pOutputDesc));
CHECK_CUDNN(cudnnCreateFilterDescriptor(&pFilterDesc));
CHECK_CUDNN(cudnnCreateConvolutionDescriptor(&pConvDesc));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(pInputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, n, c, h, w));
CHECK_CUDNN(cudnnSetFilter4dDescriptor(pFilterDesc, cudnnDataType, k_pFilter_in, c_pFilter_in, h_pFilter_in, w_pFilter_in));
CHECK_CUDNN(cudnnSetConvolution2dDescriptor(pConvDesc, 0, 0, 1, 1, 1, 1, CUDNN_CONVOLUTION));
/// get the output layout
CHECK_CUDNN(cudnnGetConvolution2dForwardOutputDim(pConvDesc, pInputDesc, pFilterDesc, &n_out, &c_out, &h_out, &w_out));
assert(n_out * c_out * h_out * w_out == yrow * ycol);
CHECK_CUDNN(cudnnSetTensor4dDescriptor(pOutputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, n_out, c_out, h_out, w_out));
size_t sz_wkspace;
bool bNeedAllocateNewSpace = false;
cnn::real *tmp_work_space;
CHECK_CUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle, pInputDesc, pFilterDesc, pConvDesc, pOutputDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM, &sz_wkspace));
if (sz_wkspace < i_wkspace_sz)
{
tmp_work_space = wkspace;
}
else{
bNeedAllocateNewSpace = true;
CUDA_CHECK(cudaMalloc(&tmp_work_space, sz_wkspace));
}
CHECK_CUDNN(cudnnConvolutionForward(cudnn_handle, kscalar_one, pInputDesc, xs, pFilterDesc, fx,
pConvDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM, tmp_work_space, sz_wkspace, kscalar_zero, pOutputDesc, fy));
CHECK_CUDNN(cudnnDestroyTensorDescriptor(pInputDesc));
CHECK_CUDNN(cudnnDestroyTensorDescriptor(pOutputDesc));
CHECK_CUDNN(cudnnDestroyFilterDescriptor(pFilterDesc));
CHECK_CUDNN(cudnnDestroyConvolutionDescriptor(pConvDesc));
if (bNeedAllocateNewSpace)
CUDA_CHECK(cudaFree(tmp_work_space));
*/
}
void conv1dwide(const int n, const int m, const cnn::real* xs,
const int k, const cnn::real *fx, cnn::real *fy)
{
thrust::device_vector<cnn::real> dv((m + k) * n, 0.0);
thrust::device_ptr<cnn::real> vp = dv.data();
thrust::device_ptr<cnn::real> fp((cnn::real*)fx);
thrust::device_ptr<cnn::real> xp((cnn::real*)xs);
thrust::device_ptr<cnn::real> yp(fy);
for (size_t tk = 0; tk < k; tk++)
{
for (size_t j = 0; j < m; j++)
thrust::transform(xp + j * n, xp + (j + 1) * n, fp + tk * n, vp + tk * n + j * n, thrust::multiplies<cnn::real>());
}
thrust::copy(vp, vp + (m + k) * n, thrust::device_pointer_cast(fy));
}
void conv1dwide_backward(const int i, const int n, const int m, const cnn::real* xs, const int k, const cnn::real *fx, const cnn::real* dEdf, cnn::real *dEdx)
{
thrust::device_vector<cnn::real> dv(m * n, 0.0);
thrust::device_ptr<cnn::real> vp = dv.data();
thrust::device_ptr<cnn::real> fp((cnn::real*)fx);
thrust::device_ptr<cnn::real> xp((cnn::real*)xs);
thrust::device_ptr<cnn::real> d((cnn::real*)dEdf);
thrust::device_ptr<cnn::real> yp(dEdx);
for (size_t tk = 0; tk < k; tk++)
{
if (i == 0) { // derivative wrt input x
for (size_t j = 0; j < m; j++)
thrust::transform(d + j * n + tk*n, d + (j + 1) * n + tk*n, fp + tk * n, dv.data() + j * n, thrust::multiplies<cnn::real>());
}
else { // derivative wrt filter f
for (size_t j = 0; j < m; j++)
thrust::transform(d + j * n + tk*n, d + (j + 1) * n + tk*n, xp + j * n, dv.data() + tk * n, thrust::multiplies<cnn::real>());
}
}
if (i == 0)
thrust::transform(dv.data(), dv.data() + m * n, yp, yp, thrust::plus<cnn::real>());
else
thrust::transform(dv.data(), dv.data() + k * n, yp, yp, thrust::plus<cnn::real>());
}
void addVectorToAllColumns(const int n, const cnn::real * xs, const int m, const cnn::real* fx, cnn::real *fy)
{
thrust::device_ptr<cnn::real> fp((cnn::real*)fx);
thrust::device_ptr<cnn::real> xp((cnn::real*)xs);
thrust::device_ptr<cnn::real> yp(fy);
for (size_t j = 0; j < n / m; j++)
thrust::transform(xp + j * m, xp + (j + 1) * m, fp, yp + j * m, thrust::plus<cnn::real>());
}
void addVectorToAllColumns_backward(const int i, const int r, const int c, const cnn::real* dEdf, cnn::real *dEdxi)
{
thrust::device_ptr<const cnn::real> dp(dEdf);
thrust::device_ptr<cnn::real> dx(dEdxi);
if (i == 0)
{
// x
thrust::transform(dp, dp + r * c, dx, dx, thrust::plus<cnn::real>());
}
else
{
// bias
for (int k = 0; k < c; k++)
thrust::transform(dp + k * r, dp + (k + 1)*r, dx, dx, thrust::plus<cnn::real>());
}
}
/**
stride : the jump step
*/
void foldRows(const int n, const int m, const cnn::real *xs, const int stride, const int orows, cnn::real *fy)
{
thrust::device_ptr<cnn::real> xp((cnn::real*)xs), pp;
thrust::device_ptr<cnn::real> yp(fy);
thrust::host_vector<cnn::real> vo(orows * m);
pp = xp;
for (size_t j = 0; j < m; j++)
{
for (size_t r = 0; r < orows; r++)
{
vo[j * orows + r] = thrust::reduce(pp, pp + stride);
pp += stride;
}
}
}
void foldRows_backward(const int orows, const cnn::real* dEdf, const int n, const int m, cnn::real *fy)
{
thrust::device_ptr<cnn::real> dp((cnn::real*)dEdf);
thrust::device_ptr<cnn::real> yp(fy);
for (int i = 0; i < orows; ++i)
{
int stride = n / orows;
for (int j = 0; j < m; j++)
{ // loop over columns
for (int k = 0; k < stride; k++)
{
*(yp + i * stride + k + j * n) += *(dp + i + j * n);
}
}
}
}
void kMaxPooling(const int n, const int m, const cnn::real *xs, const int k, cnn::real *fy, int* aux_mem)
{
thrust::device_ptr<cnn::real> xp((cnn::real*)xs), pp;
thrust::device_ptr<cnn::real> yp(fy);
thrust::device_vector<cnn::real> vo(m);
thrust::device_vector<cnn::real> vp(k);
pp = xp;
int* maxmap = static_cast<int*>(aux_mem);
size_t mi = 0;
for (unsigned i = 0; i < n; ++i) {
for (size_t j = 0; j < m; j++)
vo[j] = (*(pp + i + j * n));
thrust::sort(thrust::device, vo.data(), vo.data() + m);
size_t mk = 0;
for (int j = 0; j < m; j++)
{
if (mk == k)
break;
if (*(pp + i + j * n) >= vo[m - k])
{
*(yp + i + mk * n) = *(pp + i + j*n);
cudaMemcpy(&maxmap[mi], &j, sizeof(int), cudaMemcpyHostToDevice);
mi++;
mk++;
}
}
}
}
void kMaxPooling_backward(const int n, const int m, const cnn::real *xs, const int k, const cnn::real * dEdf, cnn::real *dEdxi, const int* aux_mem)
{
const int* maxmap = aux_mem;
int mk = 0;
int oj;
thrust::device_ptr<const cnn::real> xp(xs);
thrust::device_ptr<const cnn::real> dp(dEdf);
thrust::device_ptr<cnn::real> yp(dEdxi);
thrust::host_vector<int> hv(n, 0);
cudaMemcpy(hv.data(), maxmap, sizeof(int)*n, cudaMemcpyDeviceToHost);
for (unsigned i = 0; i < n; ++i) {
for (unsigned j = 0; j < k; ++j) {
oj = hv[mk++];
if (oj < k && oj >= 0){
thrust::transform(dp + i + j * n, dp + i + j * n + 1, yp + i + oj * n, yp + i + oj * n, thrust::plus<cnn::real>());
}
}
}
}
} // namespace gpu
} // namespace cnn
|
6c97b326fe072860bd8df173575fe618aa0c9a67.hip | // !!! This is a file automatically generated by hipify!!!
#include "Bottleneck.hpp"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "stdio.h"
__global__ void addKernel(const unsigned int* device_data_input,
const unsigned int input_position, const unsigned int width, const unsigned int height, const unsigned int channels,
unsigned int* device_data_output, const unsigned int output_position, const unsigned int output_width, const unsigned int output_height, const unsigned int output_channels,
const unsigned int type
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < width * height * channels) {
int current_channel = i / (width * height);
int current_idx = i % (width * height);
int current_x = (current_idx % width);
int current_y = (current_idx / width);
if (type == 0) {
unsigned char* input = (unsigned char*)&device_data_input[input_position];
unsigned char* output = (unsigned char*)&device_data_output[output_position];
if (current_channel >= channels) {
} else {
output[(current_y) * (output_width * output_channels) + (current_x) * output_channels + current_channel] += input[current_y * (width * channels) + current_x * channels + current_channel];
}
} else {
float* input = (float*)&device_data_input[input_position];
float* output = (float*)&device_data_output[output_position];
if (current_channel >= channels) {
} else {
output[(current_y) * (output_width * output_channels) + (current_x)*output_channels + current_channel] += input[current_y * (width * channels) + current_x * channels + current_channel];
}
}
}
}
void launch_add(const unsigned int* device_data_input,
const unsigned int input_position, const unsigned int width, const unsigned int height, const unsigned int channels,
unsigned int* device_data_output, const unsigned int output_position, const unsigned int output_width, const unsigned int output_height, const unsigned int output_channels,
const unsigned int type
) {
hipError_t err = hipSuccess;
int threadsPerBlock = 256;
int blocksPerGrid = (width * height * output_channels + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( addKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, device_data_input, input_position, width, height, channels, device_data_output, output_position, output_width, output_height, output_channels, type);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed in addKernel (error code %s)\n", hipGetErrorString(err));
}
}
vector3<unsigned int> bottleneck_get_max_size(struct bottleneck* b, struct vector2<unsigned int> input_dim) {
vector3<unsigned int> max_output_dim;
unsigned int max_output_size = 0;
vector3<unsigned int> output_dim_0 = conv2d_get_output_dim(&b->conv1, input_dim);
unsigned int output_size_0 = output_dim_0[0] * output_dim_0[1] * output_dim_0[2];
b->conv1_output_dim = output_dim_0;
max_output_dim = output_dim_0;
max_output_size = output_size_0;
vector3<unsigned int> output_dim_1 = conv2d_get_output_dim(&b->conv2, struct vector2<unsigned int>(output_dim_0[0], output_dim_0[1]));
unsigned int output_size_1 = output_dim_1[0] * output_dim_1[1] * output_dim_1[2];
if (output_size_1 > max_output_size) {
max_output_size = output_size_1;
max_output_dim = output_dim_1;
}
b->conv2_output_dim = output_dim_1;
vector3<unsigned int> output_dim_2 = conv2d_get_output_dim(&b->conv3, struct vector2<unsigned int>(output_dim_1[0], output_dim_1[1]));
unsigned int output_size_2 = output_dim_2[0] * output_dim_2[1] * output_dim_2[2];
if (output_size_2 > max_output_size) {
max_output_size = output_size_2;
max_output_dim = output_dim_2;
}
b->conv3_output_dim = output_dim_2;
return max_output_dim;
} | 6c97b326fe072860bd8df173575fe618aa0c9a67.cu | #include "Bottleneck.hpp"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "stdio.h"
__global__ void addKernel(const unsigned int* device_data_input,
const unsigned int input_position, const unsigned int width, const unsigned int height, const unsigned int channels,
unsigned int* device_data_output, const unsigned int output_position, const unsigned int output_width, const unsigned int output_height, const unsigned int output_channels,
const unsigned int type
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < width * height * channels) {
int current_channel = i / (width * height);
int current_idx = i % (width * height);
int current_x = (current_idx % width);
int current_y = (current_idx / width);
if (type == 0) {
unsigned char* input = (unsigned char*)&device_data_input[input_position];
unsigned char* output = (unsigned char*)&device_data_output[output_position];
if (current_channel >= channels) {
} else {
output[(current_y) * (output_width * output_channels) + (current_x) * output_channels + current_channel] += input[current_y * (width * channels) + current_x * channels + current_channel];
}
} else {
float* input = (float*)&device_data_input[input_position];
float* output = (float*)&device_data_output[output_position];
if (current_channel >= channels) {
} else {
output[(current_y) * (output_width * output_channels) + (current_x)*output_channels + current_channel] += input[current_y * (width * channels) + current_x * channels + current_channel];
}
}
}
}
void launch_add(const unsigned int* device_data_input,
const unsigned int input_position, const unsigned int width, const unsigned int height, const unsigned int channels,
unsigned int* device_data_output, const unsigned int output_position, const unsigned int output_width, const unsigned int output_height, const unsigned int output_channels,
const unsigned int type
) {
cudaError_t err = cudaSuccess;
int threadsPerBlock = 256;
int blocksPerGrid = (width * height * output_channels + threadsPerBlock - 1) / threadsPerBlock;
addKernel<<<blocksPerGrid, threadsPerBlock>>> (device_data_input, input_position, width, height, channels, device_data_output, output_position, output_width, output_height, output_channels, type);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed in addKernel (error code %s)\n", cudaGetErrorString(err));
}
}
vector3<unsigned int> bottleneck_get_max_size(struct bottleneck* b, struct vector2<unsigned int> input_dim) {
vector3<unsigned int> max_output_dim;
unsigned int max_output_size = 0;
vector3<unsigned int> output_dim_0 = conv2d_get_output_dim(&b->conv1, input_dim);
unsigned int output_size_0 = output_dim_0[0] * output_dim_0[1] * output_dim_0[2];
b->conv1_output_dim = output_dim_0;
max_output_dim = output_dim_0;
max_output_size = output_size_0;
vector3<unsigned int> output_dim_1 = conv2d_get_output_dim(&b->conv2, struct vector2<unsigned int>(output_dim_0[0], output_dim_0[1]));
unsigned int output_size_1 = output_dim_1[0] * output_dim_1[1] * output_dim_1[2];
if (output_size_1 > max_output_size) {
max_output_size = output_size_1;
max_output_dim = output_dim_1;
}
b->conv2_output_dim = output_dim_1;
vector3<unsigned int> output_dim_2 = conv2d_get_output_dim(&b->conv3, struct vector2<unsigned int>(output_dim_1[0], output_dim_1[1]));
unsigned int output_size_2 = output_dim_2[0] * output_dim_2[1] * output_dim_2[2];
if (output_size_2 > max_output_size) {
max_output_size = output_size_2;
max_output_dim = output_dim_2;
}
b->conv3_output_dim = output_dim_2;
return max_output_dim;
} |
b0b4ca8234d3b0f1c4f4d91acdad7c05b88ecbf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cmax(float *d_in, float *max, int len)
{
extern __shared__ float smax[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
smax[tid] = d_in[i]>d_in[i+len] ? d_in[i] : d_in[i+len];
__syncthreads();
if(blockDim.x > 512 && tid<512) {if(smax[tid] < smax[tid+512]) smax[tid] = smax[tid+512];} __syncthreads();
if(blockDim.x > 256 && tid<256) {if(smax[tid] < smax[tid+256]) smax[tid] = smax[tid+256];} __syncthreads();
if(blockDim.x > 128 && tid<128) {if(smax[tid] < smax[tid+128]) smax[tid] = smax[tid+128];} __syncthreads();
if(blockDim.x > 64 && tid<64) {if(smax[tid] < smax[tid+64]) smax[tid] = smax[tid+64];} __syncthreads();
if(tid<32) {
if(blockDim.x > 32 && smax[tid] < smax[tid+32]) smax[tid] = smax[tid+32];
if(blockDim.x > 16 && smax[tid] < smax[tid+16]) smax[tid] = smax[tid+16];
if(blockDim.x > 8 && smax[tid] < smax[tid+8]) smax[tid] = smax[tid+8];
if(blockDim.x > 4 && smax[tid] < smax[tid+4]) smax[tid] = smax[tid+4];
if(blockDim.x > 2 && smax[tid] < smax[tid+2]) smax[tid] = smax[tid+2];
if(smax[tid] < smax[tid+1]) smax[tid] = smax[tid+1];
__syncthreads();
}
if(tid == 0 )
{
max[blockIdx.x] = smax[0];
}
} | b0b4ca8234d3b0f1c4f4d91acdad7c05b88ecbf8.cu | #include "includes.h"
__global__ void cmax(float *d_in, float *max, int len)
{
extern __shared__ float smax[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
smax[tid] = d_in[i]>d_in[i+len] ? d_in[i] : d_in[i+len];
__syncthreads();
if(blockDim.x > 512 && tid<512) {if(smax[tid] < smax[tid+512]) smax[tid] = smax[tid+512];} __syncthreads();
if(blockDim.x > 256 && tid<256) {if(smax[tid] < smax[tid+256]) smax[tid] = smax[tid+256];} __syncthreads();
if(blockDim.x > 128 && tid<128) {if(smax[tid] < smax[tid+128]) smax[tid] = smax[tid+128];} __syncthreads();
if(blockDim.x > 64 && tid<64) {if(smax[tid] < smax[tid+64]) smax[tid] = smax[tid+64];} __syncthreads();
if(tid<32) {
if(blockDim.x > 32 && smax[tid] < smax[tid+32]) smax[tid] = smax[tid+32];
if(blockDim.x > 16 && smax[tid] < smax[tid+16]) smax[tid] = smax[tid+16];
if(blockDim.x > 8 && smax[tid] < smax[tid+8]) smax[tid] = smax[tid+8];
if(blockDim.x > 4 && smax[tid] < smax[tid+4]) smax[tid] = smax[tid+4];
if(blockDim.x > 2 && smax[tid] < smax[tid+2]) smax[tid] = smax[tid+2];
if(smax[tid] < smax[tid+1]) smax[tid] = smax[tid+1];
__syncthreads();
}
if(tid == 0 )
{
max[blockIdx.x] = smax[0];
}
} |
d110ccc70c882c0614f5b100d2e4f6b47afcaa88.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <cblas.h>
/*
CUDA Tutorial, matrix-matrix multiply
UC Berkeley Reactor Design and Neutronics Group
Ryan M. Bergmann - 1/22/2014
*/
void matmul_cpu(unsigned len, float* a , float* b , float* c){
// initialize local variable to hold values while the sum is done
float sum;
unsigned row,col,k;
for(col=0 ; col<len ; col++ ){ //scan the rows
for(row=0 ; row<len ; row++ ){ //scan the cols
// zero out sum
sum = 0;
// scan the row of a, the col of b
for(k=0;k<len;k++){
sum += a[ row * len + k ] * b[ k * len + col ];
}
// write final value into output array
c[ len * row + col ] = sum;
}
}
}
__global__ void matmul_kernel( unsigned len, float* a , float* b , float* c){
//
// THIS IS THE SIMPLE WAY TO DO IT, NOT THE ***FAST WAY*** -> uses 2*N^3 global loads
//
// get index in c
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//return if over the length
if(row>=len | col>=len){return;}
// initialize local variable to hold values while the sum is done
float sum = 0;
unsigned j;
// scan the row of a, the col of b
for(j=0;j<len;j++){
sum += a[ row * len + j ] * b[ j * len + col ];
}
// write final value into output array
c[ len * row + col ] = sum;
}
__global__ void matmul_kernel_shared( unsigned len, float* a , float* b , float* c){
// take advantage of data resue w/ shared memory. Uses tiles and loops over through them. Global loads now 2N^3/(blockDim.x*blockDim.y)?
// Programmed for, but **NOT TESTED** FOR cases where the block dimensions do not line up exactly with the matrix dimensions
// get index in c
int offset_x = blockIdx.x * blockDim.x;
int offset_y = blockIdx.y * blockDim.y;
int row = offset_y + threadIdx.y;
int col = offset_x + threadIdx.x;
//return if over the length
if(row>=len | col>=len){return;}
// initialize local variable to hold values while the sum is done
float sum = 0;
unsigned j,g,sub_a_row,sub_a_col,sub_b_row,sub_b_col,sub_lim_x,sub_lim_y;
unsigned n_blocks_x = ( len + blockDim.x - 1 ) / blockDim.x;
unsigned n_blocks_y = ( len + blockDim.y - 1 ) / blockDim.y;
// declare shared memory
extern __shared__ float sub_a[];
float* sub_b = &sub_a[blockDim.x*blockDim.y];
//have 0,0 thread load in data to shared
for(g=0 ; g < n_blocks_x ; g++){ // tile row
// compute the global indicies of this submatrix
sub_a_row = offset_y; //const
sub_a_col = g * blockDim.x ;
sub_b_row = g * blockDim.y ;
sub_b_col = offset_x; //const
// compute limits
sub_lim_x = min( len - sub_a_col , blockDim.x );
sub_lim_y = min( len - sub_b_row , blockDim.y );
// load shared memory
if( threadIdx.x+threadIdx.y == 0){
// load a row by row (saves programming another loop and dealing with another index, icky) also saves having to transpose the second matrix, also icky
for( j = 0 ; j < sub_lim_y ; j++){ // j is row
memcpy( &sub_a[ j*blockDim.x ], &a[ (sub_a_row+j)*len + sub_a_col ], sub_lim_x*sizeof(float)); // copy row
}
// load b row by row
for( j = 0 ; j < sub_lim_y ; j++){ // j is row
memcpy( &sub_b[ j*blockDim.x ], &b[ (sub_b_row+j)*len + sub_b_col ], sub_lim_x*sizeof(float)); // copy row
}
}
// sync, other threads need to wait for data
__syncthreads();
// scan the submatrix, computing partial sum
for( j=0 ; j < sub_lim_x ; j++ ){
sum += sub_a[ threadIdx.y*blockDim.x + j ] * sub_b[ j*blockDim.x + threadIdx.x ];
}
// sync threads again before moving on to next tile
__syncthreads();
}
// write final value into output array
c[ len * row + col ] = sum;
}
double get_time(){
return ((double)clock())/((double)CLOCKS_PER_SEC);
}
int main(){
// declare
float* a;
float* b;
float* c;
float* d_a;
float* d_b;
float* d_c;
unsigned len_a, len_b, j, k;
unsigned bytes_a, bytes_b, bytes_c, shared_mem_size;
dim3 NUM_THREADS, blks;
// timing variables
double time_gpu, time_gpu_shared, time_cpu, time_blas;
//open files, get lengths, make sure they are equal
FILE* af = fopen("a","r");
FILE* bf = fopen("b","r");
FILE* cf;
FILE* cff;
fscanf(af,"%u\n",&len_a);
fscanf(bf,"%u\n",&len_b);
printf("------ Matrix Dimensions ------\n");
printf("dims a,b = %u , %u\n",len_a,len_b);
assert(len_a==len_b);
bytes_a = len_a * len_a * sizeof(float);
bytes_b = len_b * len_b * sizeof(float);
bytes_c = len_b * len_b * sizeof(float);
//allocate arrays
a = (float*) malloc( bytes_a );
b = (float*) malloc( bytes_b );
c = (float*) malloc( bytes_b );
//allocate device arrays
hipMalloc( &d_a , bytes_a ); //must be pointer to the point, since the actual point value is being changed, not the value it points to
hipMalloc( &d_b , bytes_b );
hipMalloc( &d_c , bytes_c );
// read in data
for(j=0;j<len_a;j++){
for(k=0;k<len_a;k++){
fscanf(af,"%E \n",&a[j*len_a+k]); //row major
fscanf(bf,"%E \n",&b[j*len_a+k]);
}
}
// close files
fclose(af); fclose(bf);
// determine gpu parameters, print them
NUM_THREADS.x = NUM_THREADS.y = 16;
blks.x = blks.y = (len_a + NUM_THREADS.x - 1 ) / NUM_THREADS.x;
NUM_THREADS.z = blks.z = 1;
shared_mem_size = 2*NUM_THREADS.y*NUM_THREADS.x*sizeof(float);
printf("------- CUDA Parameters -------\n");
printf("NUM_THREADS(%4u,%4u, 0)\n blks(%4u,%4u, 0)\n",NUM_THREADS.x,NUM_THREADS.y,blks.x,blks.y);
printf("shared_mem_size = %u\n",shared_mem_size);
printf("-------------------------------\n");
// copy data to device
hipMemcpy( d_a , a , bytes_a , hipMemcpyHostToDevice );
hipMemcpy( d_b , b , bytes_b , hipMemcpyHostToDevice );
// launch cpu version to compare
time_cpu = get_time();
matmul_cpu(len_a, a, b, c);
time_cpu = get_time() - time_cpu;
printf("CPU - %9.8f seconds\n",time_cpu);
// launch BLAS version for fair comparison
time_blas = get_time();
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, len_a, len_a, len_a, 1.0, a, len_a, b, len_a, 0.0, c, len_a);
time_blas = get_time() - time_blas;
printf("BLAS - %9.8f seconds\n",time_blas);
//calculate the number of blocks from the number of threads
time_gpu = get_time();
hipLaunchKernelGGL(( matmul_kernel) , dim3(blks), dim3(NUM_THREADS) , 0, 0, len_a , d_a , d_b , d_c);
time_gpu = get_time() - time_gpu;
printf("GPU - %9.8f seconds\n",time_gpu);
// launch kernel for shared memory implementation
time_gpu_shared = get_time();
hipLaunchKernelGGL(( matmul_kernel_shared) , dim3(blks), dim3(NUM_THREADS) , shared_mem_size , 0, len_a , d_a , d_b , d_c);
time_gpu_shared = get_time() - time_gpu_shared;
printf("GPU, shared mem - %9.8f seconds\n",time_gpu_shared);
printf("-------------------------------\n");
// check for errors
if(hipPeekAtLastError()){
printf("CUDA ERROR, %s\n",hipGetErrorString(hipPeekAtLastError()));
return 1;
}
//copy c back, will be values from last GPU implementation
hipMemcpy( c , d_c , bytes_b , hipMemcpyDeviceToHost );
// write a,b,c to files in matrix format to be read by matlab for plotting as well as a regular flat file for python
printf("writing outputs...");
af = fopen("a_matlab","w");
bf = fopen("b_matlab","w");
cf = fopen("c_matlab","w");
cff = fopen("c","w");
fprintf(cff,"%d \n",len_a);
for(j=0;j<len_a;j++){
for(k=0;k<len_a;k++){
fprintf(af,"%10.8E ",a[j*len_a+k]); //row major
fprintf(bf,"%10.8E ",b[j*len_a+k]);
fprintf(cf,"%10.8E ",c[j*len_a+k]);
fprintf(cff,"%10.8E \n",c[j*len_a+k]);
}
fprintf(af,"\n");
fprintf(bf,"\n");
fprintf(cf,"\n");
}
fclose(af);
fclose(bf);
fclose(cf);
fclose(cff);
printf(" done.\n");
printf("-------------------------------\n");
// return zero if all ok
return 0;
}
| d110ccc70c882c0614f5b100d2e4f6b47afcaa88.cu | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <time.h>
#include <cblas.h>
/*
CUDA Tutorial, matrix-matrix multiply
UC Berkeley Reactor Design and Neutronics Group
Ryan M. Bergmann - 1/22/2014
*/
void matmul_cpu(unsigned len, float* a , float* b , float* c){
// initialize local variable to hold values while the sum is done
float sum;
unsigned row,col,k;
for(col=0 ; col<len ; col++ ){ //scan the rows
for(row=0 ; row<len ; row++ ){ //scan the cols
// zero out sum
sum = 0;
// scan the row of a, the col of b
for(k=0;k<len;k++){
sum += a[ row * len + k ] * b[ k * len + col ];
}
// write final value into output array
c[ len * row + col ] = sum;
}
}
}
__global__ void matmul_kernel( unsigned len, float* a , float* b , float* c){
//
// THIS IS THE SIMPLE WAY TO DO IT, NOT THE ***FAST WAY*** -> uses 2*N^3 global loads
//
// get index in c
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//return if over the length
if(row>=len | col>=len){return;}
// initialize local variable to hold values while the sum is done
float sum = 0;
unsigned j;
// scan the row of a, the col of b
for(j=0;j<len;j++){
sum += a[ row * len + j ] * b[ j * len + col ];
}
// write final value into output array
c[ len * row + col ] = sum;
}
__global__ void matmul_kernel_shared( unsigned len, float* a , float* b , float* c){
// take advantage of data resue w/ shared memory. Uses tiles and loops over through them. Global loads now 2N^3/(blockDim.x*blockDim.y)?
// Programmed for, but **NOT TESTED** FOR cases where the block dimensions do not line up exactly with the matrix dimensions
// get index in c
int offset_x = blockIdx.x * blockDim.x;
int offset_y = blockIdx.y * blockDim.y;
int row = offset_y + threadIdx.y;
int col = offset_x + threadIdx.x;
//return if over the length
if(row>=len | col>=len){return;}
// initialize local variable to hold values while the sum is done
float sum = 0;
unsigned j,g,sub_a_row,sub_a_col,sub_b_row,sub_b_col,sub_lim_x,sub_lim_y;
unsigned n_blocks_x = ( len + blockDim.x - 1 ) / blockDim.x;
unsigned n_blocks_y = ( len + blockDim.y - 1 ) / blockDim.y;
// declare shared memory
extern __shared__ float sub_a[];
float* sub_b = &sub_a[blockDim.x*blockDim.y];
//have 0,0 thread load in data to shared
for(g=0 ; g < n_blocks_x ; g++){ // tile row
// compute the global indicies of this submatrix
sub_a_row = offset_y; //const
sub_a_col = g * blockDim.x ;
sub_b_row = g * blockDim.y ;
sub_b_col = offset_x; //const
// compute limits
sub_lim_x = min( len - sub_a_col , blockDim.x );
sub_lim_y = min( len - sub_b_row , blockDim.y );
// load shared memory
if( threadIdx.x+threadIdx.y == 0){
// load a row by row (saves programming another loop and dealing with another index, icky) also saves having to transpose the second matrix, also icky
for( j = 0 ; j < sub_lim_y ; j++){ // j is row
memcpy( &sub_a[ j*blockDim.x ], &a[ (sub_a_row+j)*len + sub_a_col ], sub_lim_x*sizeof(float)); // copy row
}
// load b row by row
for( j = 0 ; j < sub_lim_y ; j++){ // j is row
memcpy( &sub_b[ j*blockDim.x ], &b[ (sub_b_row+j)*len + sub_b_col ], sub_lim_x*sizeof(float)); // copy row
}
}
// sync, other threads need to wait for data
__syncthreads();
// scan the submatrix, computing partial sum
for( j=0 ; j < sub_lim_x ; j++ ){
sum += sub_a[ threadIdx.y*blockDim.x + j ] * sub_b[ j*blockDim.x + threadIdx.x ];
}
// sync threads again before moving on to next tile
__syncthreads();
}
// write final value into output array
c[ len * row + col ] = sum;
}
double get_time(){
return ((double)clock())/((double)CLOCKS_PER_SEC);
}
int main(){
// declare
float* a;
float* b;
float* c;
float* d_a;
float* d_b;
float* d_c;
unsigned len_a, len_b, j, k;
unsigned bytes_a, bytes_b, bytes_c, shared_mem_size;
dim3 NUM_THREADS, blks;
// timing variables
double time_gpu, time_gpu_shared, time_cpu, time_blas;
//open files, get lengths, make sure they are equal
FILE* af = fopen("a","r");
FILE* bf = fopen("b","r");
FILE* cf;
FILE* cff;
fscanf(af,"%u\n",&len_a);
fscanf(bf,"%u\n",&len_b);
printf("------ Matrix Dimensions ------\n");
printf("dims a,b = %u , %u\n",len_a,len_b);
assert(len_a==len_b);
bytes_a = len_a * len_a * sizeof(float);
bytes_b = len_b * len_b * sizeof(float);
bytes_c = len_b * len_b * sizeof(float);
//allocate arrays
a = (float*) malloc( bytes_a );
b = (float*) malloc( bytes_b );
c = (float*) malloc( bytes_b );
//allocate device arrays
cudaMalloc( &d_a , bytes_a ); //must be pointer to the point, since the actual point value is being changed, not the value it points to
cudaMalloc( &d_b , bytes_b );
cudaMalloc( &d_c , bytes_c );
// read in data
for(j=0;j<len_a;j++){
for(k=0;k<len_a;k++){
fscanf(af,"%E \n",&a[j*len_a+k]); //row major
fscanf(bf,"%E \n",&b[j*len_a+k]);
}
}
// close files
fclose(af); fclose(bf);
// determine gpu parameters, print them
NUM_THREADS.x = NUM_THREADS.y = 16;
blks.x = blks.y = (len_a + NUM_THREADS.x - 1 ) / NUM_THREADS.x;
NUM_THREADS.z = blks.z = 1;
shared_mem_size = 2*NUM_THREADS.y*NUM_THREADS.x*sizeof(float);
printf("------- CUDA Parameters -------\n");
printf("NUM_THREADS(%4u,%4u, 0)\n blks(%4u,%4u, 0)\n",NUM_THREADS.x,NUM_THREADS.y,blks.x,blks.y);
printf("shared_mem_size = %u\n",shared_mem_size);
printf("-------------------------------\n");
// copy data to device
cudaMemcpy( d_a , a , bytes_a , cudaMemcpyHostToDevice );
cudaMemcpy( d_b , b , bytes_b , cudaMemcpyHostToDevice );
// launch cpu version to compare
time_cpu = get_time();
matmul_cpu(len_a, a, b, c);
time_cpu = get_time() - time_cpu;
printf("CPU - %9.8f seconds\n",time_cpu);
// launch BLAS version for fair comparison
time_blas = get_time();
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, len_a, len_a, len_a, 1.0, a, len_a, b, len_a, 0.0, c, len_a);
time_blas = get_time() - time_blas;
printf("BLAS - %9.8f seconds\n",time_blas);
//calculate the number of blocks from the number of threads
time_gpu = get_time();
matmul_kernel <<< blks, NUM_THREADS >>> (len_a , d_a , d_b , d_c);
time_gpu = get_time() - time_gpu;
printf("GPU - %9.8f seconds\n",time_gpu);
// launch kernel for shared memory implementation
time_gpu_shared = get_time();
matmul_kernel_shared <<< blks, NUM_THREADS , shared_mem_size >>> (len_a , d_a , d_b , d_c);
time_gpu_shared = get_time() - time_gpu_shared;
printf("GPU, shared mem - %9.8f seconds\n",time_gpu_shared);
printf("-------------------------------\n");
// check for errors
if(cudaPeekAtLastError()){
printf("CUDA ERROR, %s\n",cudaGetErrorString(cudaPeekAtLastError()));
return 1;
}
//copy c back, will be values from last GPU implementation
cudaMemcpy( c , d_c , bytes_b , cudaMemcpyDeviceToHost );
// write a,b,c to files in matrix format to be read by matlab for plotting as well as a regular flat file for python
printf("writing outputs...");
af = fopen("a_matlab","w");
bf = fopen("b_matlab","w");
cf = fopen("c_matlab","w");
cff = fopen("c","w");
fprintf(cff,"%d \n",len_a);
for(j=0;j<len_a;j++){
for(k=0;k<len_a;k++){
fprintf(af,"%10.8E ",a[j*len_a+k]); //row major
fprintf(bf,"%10.8E ",b[j*len_a+k]);
fprintf(cf,"%10.8E ",c[j*len_a+k]);
fprintf(cff,"%10.8E \n",c[j*len_a+k]);
}
fprintf(af,"\n");
fprintf(bf,"\n");
fprintf(cf,"\n");
}
fclose(af);
fclose(bf);
fclose(cf);
fclose(cff);
printf(" done.\n");
printf("-------------------------------\n");
// return zero if all ok
return 0;
}
|
8acaddbb94611c3ff3cab96d0ca72f7501ba7408.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/norm_kernel.h"
#include <algorithm>
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
namespace phi {
__device__ __forceinline__ dtype::float16 square_root(dtype::float16 x) {
return static_cast<dtype::float16>(sqrtf(static_cast<float>(x)));
}
__device__ __forceinline__ float square_root(float x) { return sqrtf(x); }
__device__ __forceinline__ double square_root(double x) { return sqrt(x); }
template <typename T, int BlockDim>
__global__ void Normalize(const T* x,
const int pre,
const int axis_n, // dim in axis
const int post,
const float eps,
T* y,
T* out_norm) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
typedef hipcub::BlockReduce<MT, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int num = pre * post;
for (int i = blockIdx.x; i < num; i += gridDim.x) {
int base = (i / post) * post * axis_n + (i % post);
MT sum = 0.0;
__shared__ MT norm;
for (int j = threadIdx.x; j < axis_n; j += blockDim.x) {
const MT x_ij = static_cast<MT>(x[base + j * post]);
sum += x_ij * x_ij;
}
MT reduce_result = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
norm = square_root(reduce_result + static_cast<MT>(eps));
out_norm[i] = static_cast<T>(norm);
}
__syncthreads();
for (int j = threadIdx.x; j < axis_n; j += blockDim.x) {
const int index = base + j * post;
y[index] = static_cast<T>((static_cast<MT>(x[index]) / norm));
}
}
}
template <typename T, typename Context>
void NormKernel(const Context& ctx,
const DenseTensor& x,
int axis,
float epsilon,
bool is_test,
DenseTensor* out,
DenseTensor* norm) {
auto* in_x = &x;
auto* out_y = out;
auto xdim = in_x->dims();
if (axis < 0) axis = xdim.size() + axis;
DenseTensor* out_norm;
DenseTensor out_norm_tmp;
if (is_test) {
auto out_dim = in_x->dims();
out_dim[axis] = 1;
out_norm = &out_norm_tmp;
out_norm->Resize(out_dim);
} else {
out_norm = norm;
}
const T* x_ptr = in_x->data<T>();
ctx.template Alloc<T>(out_y);
ctx.template Alloc<T>(out_norm);
T* y = out_y->data<T>();
T* norm_ptr = out_norm->data<T>();
int pre, n, post;
funcs::GetPrePostNumel(xdim, axis, &pre, &n, &post);
#ifdef __HIPCC__
const int block = 256;
#else
const int block = 512;
#endif
int max_threads = ctx.GetMaxPhysicalThreadCount();
const int max_blocks = ::max(max_threads / block, 1);
int grid = ::min(max_blocks, pre * post);
hipLaunchKernelGGL(( Normalize<T, block>), dim3(grid), dim3(block), 0, ctx.stream(),
x_ptr, pre, n, post, epsilon, y, norm_ptr);
}
} // namespace phi
PD_REGISTER_KERNEL(norm,
GPU,
ALL_LAYOUT,
phi::NormKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
| 8acaddbb94611c3ff3cab96d0ca72f7501ba7408.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/norm_kernel.h"
#include <algorithm>
#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
namespace phi {
__device__ __forceinline__ dtype::float16 square_root(dtype::float16 x) {
return static_cast<dtype::float16>(sqrtf(static_cast<float>(x)));
}
__device__ __forceinline__ float square_root(float x) { return sqrtf(x); }
__device__ __forceinline__ double square_root(double x) { return sqrt(x); }
template <typename T, int BlockDim>
__global__ void Normalize(const T* x,
const int pre,
const int axis_n, // dim in axis
const int post,
const float eps,
T* y,
T* out_norm) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
typedef cub::BlockReduce<MT, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int num = pre * post;
for (int i = blockIdx.x; i < num; i += gridDim.x) {
int base = (i / post) * post * axis_n + (i % post);
MT sum = 0.0;
__shared__ MT norm;
for (int j = threadIdx.x; j < axis_n; j += blockDim.x) {
const MT x_ij = static_cast<MT>(x[base + j * post]);
sum += x_ij * x_ij;
}
MT reduce_result = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
norm = square_root(reduce_result + static_cast<MT>(eps));
out_norm[i] = static_cast<T>(norm);
}
__syncthreads();
for (int j = threadIdx.x; j < axis_n; j += blockDim.x) {
const int index = base + j * post;
y[index] = static_cast<T>((static_cast<MT>(x[index]) / norm));
}
}
}
template <typename T, typename Context>
void NormKernel(const Context& ctx,
const DenseTensor& x,
int axis,
float epsilon,
bool is_test,
DenseTensor* out,
DenseTensor* norm) {
auto* in_x = &x;
auto* out_y = out;
auto xdim = in_x->dims();
if (axis < 0) axis = xdim.size() + axis;
DenseTensor* out_norm;
DenseTensor out_norm_tmp;
if (is_test) {
auto out_dim = in_x->dims();
out_dim[axis] = 1;
out_norm = &out_norm_tmp;
out_norm->Resize(out_dim);
} else {
out_norm = norm;
}
const T* x_ptr = in_x->data<T>();
ctx.template Alloc<T>(out_y);
ctx.template Alloc<T>(out_norm);
T* y = out_y->data<T>();
T* norm_ptr = out_norm->data<T>();
int pre, n, post;
funcs::GetPrePostNumel(xdim, axis, &pre, &n, &post);
#ifdef __HIPCC__
const int block = 256;
#else
const int block = 512;
#endif
int max_threads = ctx.GetMaxPhysicalThreadCount();
const int max_blocks = std::max(max_threads / block, 1);
int grid = std::min(max_blocks, pre * post);
Normalize<T, block><<<grid, block, 0, ctx.stream()>>>(
x_ptr, pre, n, post, epsilon, y, norm_ptr);
}
} // namespace phi
PD_REGISTER_KERNEL(norm,
GPU,
ALL_LAYOUT,
phi::NormKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
|
20cbfe3f2128ff0f1bc34c3f009c9e782a73cf76.hip | // !!! This is a file automatically generated by hipify!!!
#include "SiftCameraParams.h"
#include "GlobalDefines.h"
__constant__ SiftCameraParams c_siftCameraParams;
extern "C" void updateConstantSiftCameraParams(const SiftCameraParams& params) {
size_t size;
cutilSafeCall(hipGetSymbolSize(&size, c_siftCameraParams));
cutilSafeCall(hipMemcpyToSymbol(c_siftCameraParams, ¶ms, size, 0, hipMemcpyHostToDevice));
#ifdef DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
} | 20cbfe3f2128ff0f1bc34c3f009c9e782a73cf76.cu | #include "SiftCameraParams.h"
#include "GlobalDefines.h"
__constant__ SiftCameraParams c_siftCameraParams;
extern "C" void updateConstantSiftCameraParams(const SiftCameraParams& params) {
size_t size;
cutilSafeCall(cudaGetSymbolSize(&size, c_siftCameraParams));
cutilSafeCall(cudaMemcpyToSymbol(c_siftCameraParams, ¶ms, size, 0, cudaMemcpyHostToDevice));
#ifdef DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
} |
proj.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <math.h>
/*
TODO project: reciprocal
*/
/*
TODO project: negative number human readable
*/
//KEY CONSTANTS
//THIS IS THE STARTING SIZE FOR THE TWO TEMP BUFFERS
//THE LARGER YOU EXPECT YOUR NUMBERS TO GROW, INCREASES THE SIZE OF THIS VALUE TO INCREASE PERFORMANCE
const unsigned int ORIGINAL_TEMP_BUFFER_SIZE = 1024;
//THESE TWO WILL SERVE AS OUR TEMPORARY BUFFERS IN OUR COMPUTATIONS
int *temp_buffer1;
int *temp_buffer2;
int *temp_buffer3;
unsigned int temp_buffer_size;
//NORMALIZATION KEY VALUES
const unsigned int BITS_PER_DIGIT = 32;
const unsigned int NORMALIZATION_EXPANSION = (unsigned int)ceil((BITS_PER_DIGIT * log(2.0)) / (log(10.0)));
//KEY PROCESSING CONSTANTS
//THIS IS THE DEVICE NUMBER THAT WE WILL DO OUR CALCULATIONS ON
const int DEVICE_NUM = 0;
int MAX_THREADS_PER_BLOCK;
//THE FOLLOWING IS HELPFUL INPUT CODE
//BCD - binary coded decimal
//A BCD IS THE DATA STRUCTURE THAT WE WILL USE TO REPRESENT OUR LARGE NUMBERS
//decpos IS THE POSITION OF THE DECIMAL IN THE NUMBER
//length IS THE NUMBER OF DIGITS IN THE NUMBER
//values IS AN ARRAY OF THE DIGITS
//gpuP IS THE POINTER TO THE DIGITS THAT HAVE BEEN COPIED TO THE GPU'S MEMORY
typedef struct bcd {
unsigned int decpos;
unsigned int length;
int *values;
int *gpuP;
} bdc;
//THIS TAKES A STRING REPRESENTATION OF OUR NUMBER, SUCH AS "123456544.23" AND LOADS IT INTO A BCD
void bcdFromString(char* input, bcd* output);
//THIS CREATES A BCD THAT CAN STORE A NUMBER WITH len DIGITS
bcd* createBcd(unsigned int len);
//THIS PRINTS A BCD OUT TO THE CONSOLE
void printBcd(bcd* input);
void printBcdNotNormal(bcd* input);
void zeroBcd(bcd* input);
void freeBcd(bcd* input);
//THE IMPLEMENTATION OF THESE THREE FUNCTIONS FOLLOWS:
void bcdFromString(char* input, bcd* output)
{
unsigned int len = strlen(input);
unsigned int lenstore = len;
unsigned int x = 0;
unsigned char decFound = 0;
unsigned int negative = 0;
for (x = 0; x < len; ++x)
{
char temp = input[x];
switch (temp)
{
case '-':
//lenstore -= 1;
negative = 1;
break;
case '0':
if (decFound > 0)
{
output->values[x - 1] = 0;
}
else
{
output->values[x] = 0;
}
break;
case '1':
if (decFound > 0)
{
output->values[x - 1] = 1;
}
else
{
output->values[x] = 1;
}
break;
case '2':
if (decFound > 0)
{
output->values[x - 1] = 2;
}
else
{
output->values[x] = 2;
}
break;
case '3':
if (decFound > 0)
{
output->values[x - 1] = 3;
}
else
{
output->values[x] = 3;
}
break;
case '4':
if (decFound > 0)
{
output->values[x - 1] = 4;
}
else
{
output->values[x] = 4;
}
break;
case '5':
if (decFound > 0)
{
output->values[x - 1] = 5;
}
else
{
output->values[x] = 5;
}
break;
case '6':
if (decFound > 0)
{
output->values[x - 1] = 6;
}
else
{
output->values[x] = 6;
}
break;
case '7':
if (decFound > 0)
{
output->values[x - 1] = 7;
}
else
{
output->values[x] = 7;
}
break;
case '8':
if (decFound > 0)
{
output->values[x - 1] = 8;
}
else
{
output->values[x] = 8;
}
break;
case '9':
if (decFound > 0)
{
output->values[x - 1] = 9;
}
else
{
output->values[x] = 9;
}
break;
case '.':
output->decpos = x;
lenstore -= 1;
decFound = 1;
break;
}
}
output->length = lenstore;
if (negative == 1)
{
int i = 0;
for(i = 0; i < lenstore; i++)
{
output->values[i] = output->values[i] * (-1);
}
}
if (decFound == 0)
{
output->decpos = lenstore;
}
}
bcd* createBcd(unsigned int len)
{
bcd* output = (bcd *)malloc(sizeof(bcd));
output->length = len;
output->values = (int *)malloc(len * sizeof(int));
return output;
}
void zeroBcd(bcd* input)
{
int c = 0;
for (c = 0; c < input->length; ++c)
{
*(input->values + c) = 0;
}
}
void printBcd(bcd* input)
{
int i = 0;
for(i = 0; i < input->length; i++)
{
if (i == input->decpos)
{
printf(".");
}
printf("%i", input->values[i]);
}
printf("\n");
}
void printBcdNotNormal(bcd* input)
{
int i = 0;
for(i = 0; i < input->length; i++)
{
if (i == input->decpos)
{
printf(".");
}
printf("%i", input->values[i]);
printf("|");
}
printf("\n");
}
void freeBcd(bcd* input)
{
hipFree(input->gpuP);
free(input->values);
free(input);
}
//hipFree
//GPU CODE
//THIS FUNCTION LOADS THE VALUES OF A BCD INTO TEH GPU'S MEMORY AND SETS THE gpuP OF THE BCD TO POINT TO THE GPU-STORED VALUES
void loadBcdIntoGPU(bcd* input);
//THIS COPIES BACK THE RESULTS FROM THE GPU TO THE BCD
void getCompResult(bcd* output);
void loadBcdIntoGPU(bcd* input)
{
hipMalloc(&input->gpuP, input->length * sizeof(int));
hipMemcpy(input->gpuP, input->values, input->length * sizeof(int), hipMemcpyHostToDevice);
}
void getCompResult(bcd* output)
{
hipMemcpy(output->values,output->gpuP, output->length * sizeof(int), hipMemcpyDeviceToHost);
}
//THE FOLLOWING IS ALL SETUP CODE
void cudaSetup();
void initTempBuffers();
void reallocTempBuffers(unsigned int size);
void freeTempBuffers();
void zeroTempBuffers();
//THIS IS THE MAIN SETUP FUNCTION. CALL THIS EARLY ON IN MAIN. BEFORE ANY ADDITIONS OR MULTIPLICATIONS ON BCD'S
//will call initTempBuffers
void cudaSetup()
{
//LET'S FIGURE OUT THE MAXIMUM THREADS PER BLOCK
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, DEVICE_NUM);
MAX_THREADS_PER_BLOCK = deviceProp.maxThreadsPerBlock;
initTempBuffers();
}
void initTempBuffers()
{
hipMalloc(&temp_buffer1, ORIGINAL_TEMP_BUFFER_SIZE * sizeof(int));
hipMalloc(&temp_buffer2, ORIGINAL_TEMP_BUFFER_SIZE * sizeof(int));
hipMalloc(&temp_buffer3, ORIGINAL_TEMP_BUFFER_SIZE * sizeof(int));
hipMemset(temp_buffer1, 0, ORIGINAL_TEMP_BUFFER_SIZE * sizeof(int));
hipMemset(temp_buffer2, 0, ORIGINAL_TEMP_BUFFER_SIZE * sizeof(int));
hipMemset(temp_buffer3, 0, ORIGINAL_TEMP_BUFFER_SIZE * sizeof(int));
temp_buffer_size = ORIGINAL_TEMP_BUFFER_SIZE;
}
void reallocTempBuffers(unsigned int size)
{
if (temp_buffer_size < size)
{
freeTempBuffers();
hipMalloc(&temp_buffer1, size * sizeof(int));
hipMalloc(&temp_buffer2, size * sizeof(int));
hipMalloc(&temp_buffer3, size * sizeof(int));
hipMemset(temp_buffer1, 0, size * sizeof(int));
hipMemset(temp_buffer2, 0, size * sizeof(int));
hipMemset(temp_buffer3, 0, size * sizeof(int));
temp_buffer_size = size;
}
}
void zeroTempBuffers()
{
hipMemset(temp_buffer1, 0, temp_buffer_size * sizeof(int));
hipMemset(temp_buffer2, 0, temp_buffer_size * sizeof(int));
hipMemset(temp_buffer3, 0, temp_buffer_size * sizeof(int));
}
void freeTempBuffers()
{
hipFree(temp_buffer1);
hipFree(temp_buffer2);
hipFree(temp_buffer3);
}
//MEMORY REQUIREMENT CALCULATION CODE
unsigned int memReqForAddition(bcd* num1, bcd* num2);
unsigned int memReqForMulitiplcation(bcd* num1, bcd* num2);
unsigned int memReqForAddition(bcd* num1, bcd* num2)
{
unsigned int maxlen = 0;
if (num1->length > num2->length)
{
maxlen = num1->length;
}
else
{
maxlen = num2->length;
}
return (maxlen + NORMALIZATION_EXPANSION + 1);
}
unsigned int memReqForMulitiplcation(bcd* num1, bcd* num2)
{
return num1->length + num2->length + (2 * NORMALIZATION_EXPANSION);
}
//DECIMAL POSITION CHANGE CODE
unsigned int decimalMovementAddition(bcd* num1, bcd* num2, unsigned int memReq);
unsigned int decimalMovementMultiplication(bcd* num1, bcd* num2, unsigned int memReq);
unsigned int decimalMovementAddition(bcd* num1, bcd* num2, unsigned int memReq)
{
if (num1->length > num2->length)
{
return (memReq - num1->length) + num1->decpos;
}
else
{
return (memReq - num2->length) + num2->decpos;
}
}
/*
TODO START HERE
*/
unsigned int decimalMovementMultiplication(bcd* num1, bcd* num2, unsigned int memReq)
{
return memReq - ((num1->length - num1->decpos) + (num2->length - num2->decpos));
}
//KERNELS
__global__ void addition(int *num1, int *num2, unsigned int num1Len, unsigned int num2Len, unsigned int num1offset, unsigned int num2offset, int *temp_buffer1, int *temp_buffer2, int *output, unsigned int memReq, unsigned int reps);
__global__ void normalize(int *num,unsigned int num1Len, int *result,unsigned int memReq);
__global__ void multiplication(int *num1, int *num2, unsigned int num1Len, unsigned int num2Len, unsigned int num1offset, unsigned int num2offset, int *temp_buffer1, int *temp_buffer2, int *temp_buffer3, int *output, unsigned int memReq, unsigned int reps);
/*
TODO normalize: need to get this working for reps. EG: numbers longer the 512 digits
*/
/*
TODO normalize: need to make this work for negative numbers
*/
__global__ void normalize(int *num, unsigned int numLen, int *result,unsigned int memReq, unsigned int reps)
{
int x = threadIdx.x;
if (x < memReq)
{
if (reps == 1)
{
if (x >= memReq - numLen)
{
result[x] = num[x - (memReq - numLen)];
}
}
else
{
//result[x] = num[x - (memReq - numLen)];
int d = 0;
for (d = 0; d < reps; ++d)
{
if (d == 0)
{
if (x >= memReq - numLen)
{
result[x + (512 * d)] = num[x + (512 * d) - (memReq - numLen)];
}
}
else
{
if ((x + (512 * d) - (memReq - numLen)) < numLen)
result[x + (512 * d)] = num[x + (512 * d) - (memReq - numLen)];
}
}
}
}
__shared__ int carry;
carry = 1;
__syncthreads();
while (carry)
{
if (reps == 1)
{
int c = 0;
if (x < memReq)
{
c = result[x] / 10;
result[x] %= 10;
}
__syncthreads();
if (x < memReq && x != 0)
{
result[x - 1] += c;
}
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
int c = 0;
if ((x + (512 * d)) < memReq)
{
c = result[x + (512 * d)] / 10;
result[x + (512 * d)] %= 10;
}
__syncthreads();
if ((x + (512 * d)) < memReq && (x != 0))
{
result[x - 1 + (512 * d)] += c;
}
}
}
carry = 0;
__syncthreads();
if (x < memReq)
{
if (reps == 1)
{
if (abs(result[x]) > 9)
{
carry = 1;
}
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq && (abs(result[x + (512 * d)]) > 9))
{
carry = 1;
}
}
}
}
__syncthreads();
}
}
/*
TODO addition kernel: need to add in normalization
*/
__global__ void addition(int *num1, int *num2, unsigned int num1Len, unsigned int num2Len, unsigned int num1offset, unsigned int num2offset, int *temp_buffer1, int *temp_buffer2, int *output, unsigned int memReq, unsigned int reps)
{
int x = threadIdx.x;
if (reps == 1)
{
if (x < memReq)
{
if (x >= memReq - num1Len)
{
temp_buffer1[x] = num1[x - (memReq - num1Len)];
}
if (x >= memReq - num2Len)
{
temp_buffer2[x] = num2[x - (memReq - num2Len)];
}
}
}
else
{
int d = 0;
for (d = 0; d < reps; ++d)
{
if (d == 0)
{
if (x >= memReq - num1Len)
{
temp_buffer1[x + (512 * d)] = num1[x + (512 * d) - (memReq - num1Len)];
}
if (x >= memReq - num2Len)
{
temp_buffer2[x + (512 * d)] = num2[x + (512 * d) - (memReq - num2Len)];
}
}
else
{
if ((x + (512 * d) - (memReq - num1Len)) < num1Len)
{
temp_buffer1[x + (512 * d)] = num1[x + (512 * d) - (memReq - num1Len)];
}
if ((x + (512 * d) - (memReq - num2Len)) < num2Len)
{
temp_buffer2[x + (512 * d)] = num2[x + (512 * d) - (memReq - num2Len)];
}
}
}
}
//move everything to temp buffers
__shared__ int carry;
carry = 0;
__syncthreads();
if (reps == 1)
{
if (((unsigned int)(temp_buffer1[x] & ((unsigned int)3 << 30)) > 0) || ((unsigned int)(temp_buffer2[x] & ((unsigned int)3 << 30)) > 0))
{
carry = 1;
}
}
else
{
int d = 0;
for (d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq)
{
if (((unsigned int)(temp_buffer1[x + (512 * d)] & ((unsigned int)3 << 30)) > 0) || ((unsigned int)(temp_buffer2[x + (512 * d)] & ((unsigned int)3 << 30)) > 0))
{
carry = 1;
}
}
}
}
__syncthreads();
while (carry)
{
if (reps == 1)
{
int c1 = 0;
int c2 = 0;
if (x < memReq)
{
c1 = temp_buffer1[x] / 10;
c2 = temp_buffer2[x] / 10;
temp_buffer1[x] %= 10;
temp_buffer2[x] %= 10;
}
__syncthreads();
if (x < memReq && x != 0)
{
temp_buffer1[x - 1] += c1;
temp_buffer2[x - 1] += c2;
}
carry = 0;
__syncthreads();
if (x < memReq)
{
if ((abs(temp_buffer1[x]) > 9) || (abs(temp_buffer2[x]) > 9))
{
carry = 1;
}
}
__syncthreads();
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
int c1 = 0;
int c2 = 0;
if ((x + (512 * d)) < memReq)
{
c1 = temp_buffer1[x + (512 * d)] / 10;
c2 = temp_buffer2[x + (512 * d)] / 10;
temp_buffer1[x + (512 * d)] %= 10;
temp_buffer2[x + (512 * d)] %= 10;
}
__syncthreads();
if ((x + (512 * d)) < memReq && (x != 0))
{
temp_buffer1[x - 1 + (512 * d)] += c1;
temp_buffer2[x - 1 + (512 * d)] += c2;
}
}
}
carry = 0;
__syncthreads();
if (x < memReq)
{
if (reps == 1)
{
if ((abs(temp_buffer1[x]) > 9) || (abs(temp_buffer2[x]) > 9))
{
carry = 1;
}
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq && ((abs(temp_buffer1[x + (512 * d)]) > 9) || (abs(temp_buffer2[x + (512 * d)]) > 9)))
{
carry = 1;
}
}
}
}
__syncthreads();
}
if (x < memReq)
{
if (reps == 1)
{
if (((x + num1offset) < memReq) && ((x + num2offset) < memReq))
{
output[x] = temp_buffer1[x + num1offset] + temp_buffer2[x + num2offset];
}
else if ((x + num2offset) < memReq)
{
output[x] = temp_buffer2[x + num2offset];
}
else if ((x + num1offset) < memReq)
{
output[x] = temp_buffer1[x + num1offset];
}
else
{
//do nothing
}
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
if ((((x + (512 * d)) + num1offset) < memReq) && (((x + (512 * d)) + num2offset) < memReq))
{
output[(x + (512 * d))] = temp_buffer1[(x + (512 * d)) + num1offset] + temp_buffer2[(x + (512 * d)) + num2offset];
}
else if (((x + (512 * d)) + num2offset) < memReq)
{
output[(x + (512 * d))] = temp_buffer2[(x + (512 * d)) + num2offset];
}
else if (((x + (512 * d)) + num1offset) < memReq)
{
output[(x + (512 * d))] = temp_buffer1[(x + (512 * d)) + num1offset];
}
else
{
//do nothing
}
}
}
}
}
//multiplication<<<1,MAX_THREADS_PER_BLOCK>>>(num1->gpuP, num2->gpuP, num1->length, num2->length, dec1_offset, dec2_offset, temp_buffer1, temp_buffer2, temp_buffer3, output->gpuP, result_req, reps);
__global__ void multiplication(int *num1, int *num2, unsigned int num1Len, unsigned int num2Len, unsigned int num1offset, unsigned int num2offset, int *temp_buffer1, int *temp_buffer2, int *temp_buffer3, int *output2, unsigned int memReq, unsigned int reps)
{
int x = threadIdx.x;
if (reps == 1)
{
if (x < memReq)
{
if (x >= memReq - num1Len)
{
temp_buffer1[x] = num1[x - (memReq - num1Len)];
}
if (x >= memReq - num2Len)
{
temp_buffer2[x] = num2[x - (memReq - num2Len)];
}
}
}
else
{
int d = 0;
for (d = 0; d < reps; ++d)
{
if (d == 0)
{
if (x >= memReq - num1Len)
{
temp_buffer1[x + (512 * d)] = num1[x + (512 * d) - (memReq - num1Len)];
//output2[x + (512 * d)] = num1[x + (512 * d) - (memReq - num1Len)];
}
if (x >= memReq - num2Len)
{
temp_buffer2[x + (512 * d)] = num2[x + (512 * d) - (memReq - num2Len)];
}
}
else
{
if ((x + (512 * d) - (memReq - num1Len)) < num1Len)
{
temp_buffer1[x + (512 * d)] = num1[x + (512 * d) - (memReq - num1Len)];
//output2[x + (512 * d)] = num1[x + (512 * d) - (memReq - num1Len)];
}
if ((x + (512 * d) - (memReq - num2Len)) < num2Len)
{
temp_buffer2[x + (512 * d)] = num2[x + (512 * d) - (memReq - num2Len)];
}
}
}
}
//move everything to temp buffers
__shared__ int carry;
carry = 1;
__syncthreads();
while (carry)
{
if (reps == 1)
{
int c1 = 0;
int c2 = 0;
if (x < memReq)
{
c1 = temp_buffer1[x] / 10;
c2 = temp_buffer2[x] / 10;
temp_buffer1[x] %= 10;
temp_buffer2[x] %= 10;
}
__syncthreads();
if (x < memReq && x != 0)
{
temp_buffer1[x - 1] += c1;
temp_buffer2[x - 1] += c2;
}
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
int c1 = 0;
int c2 = 0;
if ((x + (512 * d)) < memReq)
{
c1 = temp_buffer1[x + (512 * d)] / 10;
c2 = temp_buffer2[x + (512 * d)] / 10;
temp_buffer1[x + (512 * d)] %= 10;
temp_buffer2[x + (512 * d)] %= 10;
}
__syncthreads();
if ((x + (512 * d)) < memReq && (x != 0))
{
temp_buffer1[x - 1 + (512 * d)] += c1;
temp_buffer2[x - 1 + (512 * d)] += c2;
}
}
}
carry = 0;
__syncthreads();
if (reps == 1)
{
if ((x < memReq) && ((abs(temp_buffer1[x]) > 9) || (abs(temp_buffer2[x]) > 9)))
{
carry = 1;
}
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq && ((abs(temp_buffer1[x + (512 * d)]) > 9) || (abs(temp_buffer2[x + (512 * d)]) > 9)))
{
carry = 1;
}
}
}
__syncthreads();
}
//good till here
//TEST INTITIAL NORMALIZATION
__shared__ int multCount;
multCount = 0;
__syncthreads();
//output2[x] = reps;
//__syncthreads(); <-- uncomment this too
//TEST JUST ONE ITERATION
//while (multCount < num2Len)
while (multCount < num2Len)
{
int tempMultCountStore = multCount;
tempMultCountStore += 1;
if (reps == 1)
{
if (x < memReq)
{
if (x > multCount)
{
temp_buffer3[x - multCount] = temp_buffer2[memReq - multCount - 1] * temp_buffer1[x];
}
}
//check for overflow
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
if (d == 0)
{
if ((x > multCount) && ((x + (512 * d)) < memReq))
{
temp_buffer3[(x + (512 * d)) - multCount] = temp_buffer2[memReq - multCount - 1] * temp_buffer1[(x + (512 * d))];
}
}
else
{
if ((x + (512 * d)) < memReq)
{
temp_buffer3[(x + (512 * d)) - multCount] = temp_buffer2[memReq - multCount - 1] * temp_buffer1[(x + (512 * d))];
}
}
}
}
carry = 0;
__syncthreads();
int d = 0;
if (reps == 1)
{
for (d = 0; d <= reps; ++d)
{
if ((x + (512 * d)) < memReq)
{
if (((unsigned int)(temp_buffer3[x + (512 * d)] & ((unsigned int)3 << 30)) > 0) || ((unsigned int)(output2[x + (512 * d)] & ((unsigned int)3 << 30)) > 0))
{
carry = 1;
}
}
}
}
else
{
for (d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq)
{
if (((unsigned int)(temp_buffer3[x + (512 * d)] & ((unsigned int)3 << 30)) > 0) || ((unsigned int)(output2[x + (512 * d)] & ((unsigned int)3 << 30)) > 0))
{
carry = 1;
}
}
}
}
__syncthreads();
while (carry)
{
if (reps == 1)
{
int c1 = 0;
int c2 = 0;
if ((reps == 1) && (x < memReq))
{
c1 = temp_buffer3[x] / 10;
c2 = output2[x] / 10;
temp_buffer3[x] %= 10;
output2[x] %= 10;
}
__syncthreads();
if (x < memReq && x != 0)
{
temp_buffer3[x - 1] += c1;
output2[x - 1] += c2;
}
carry = 0;
__syncthreads();
if (x < memReq)
{
if ((abs(temp_buffer3[x]) > 9) || abs((output2[x]) > 9))
{
carry = 1;
}
}
__syncthreads();
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
int c1 = 0;
int c2 = 0;
if ((x + (512 * d)) < memReq)
{
c1 = temp_buffer3[x + (512 * d)] / 10;
c2 = output2[x + (512 * d)] / 10;
temp_buffer3[x + (512 * d)] %= 10;
output2[x + (512 * d)] %= 10;
}
__syncthreads();
if (d == 0)
{
if ((x != 0) && ((x + (512 * d)) < memReq))
{
//SOMEHOW DESYNCRONIZED
temp_buffer3[x - 1 + (512 * d)] += c1;
output2[x - 1 + (512 * d)] += c2;
}
}
else
{
if (((x + (512 * d)) < memReq))
{
//SOMEHOW DESYNCRONIZED
temp_buffer3[x - 1 + (512 * d)] += c1;
output2[x - 1 + (512 * d)] += c2;
}
}
__syncthreads();
carry = 0;
int d = 0;
for(d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq && ((abs(temp_buffer3[x + (512 * d)]) > 9) || (abs(output2[x + (512 * d)]) > 9)))
{
carry = 1;
}
}
__syncthreads();
}
}
}
//perform addition
if (reps == 1)
{
if (x < memReq)
{
output2[x] += temp_buffer3[x];
temp_buffer3[x] = 0;
}
//check for overflow
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq)
{
output2[x + (512 * d)] += temp_buffer3[x + (512 * d)];
temp_buffer3[x + (512 * d)] = 0;
}
}
}
//update counter
multCount = tempMultCountStore;
__syncthreads();
}
}
//ARITHMETIC FUNCTIONS
bcd* normalize(bcd *num)
{
unsigned int memReq = NORMALIZATION_EXPANSION + num->length;
bcd *output = createBcd(memReq);
zeroBcd(output);
loadBcdIntoGPU(output);
output->decpos = num->decpos + (memReq - num->length);
unsigned int reps = (memReq / MAX_THREADS_PER_BLOCK) + 1;
//printf("reps: %u",reps);
hipLaunchKernelGGL(( normalize), dim3(1),dim3(MAX_THREADS_PER_BLOCK), 0, 0, num->gpuP, num->length, output->gpuP,memReq,reps);
return output;
}
bcd* add(bcd *num1, bcd *num2)
{
//first calc memory requirement for result
unsigned int result_req = memReqForAddition(num1, num2);
bcd *output = createBcd(result_req);
if (result_req > temp_buffer_size)
{
reallocTempBuffers(result_req * 2);
}
zeroBcd(output);
loadBcdIntoGPU(output);
output->decpos = decimalMovementAddition(num1, num2, result_req);
unsigned int reps = (result_req / MAX_THREADS_PER_BLOCK) + 1;
//now we'll figure out the decimal offset
unsigned int decdiff1 = num1->length - num1->decpos;
unsigned int decdiff2 = num2->length - num2->decpos;
unsigned int dec1_offset = 0;
unsigned int dec2_offset = 0;
if (decdiff1 > decdiff2)
{
dec2_offset = decdiff1 - decdiff2;
}
else
{
dec1_offset = decdiff2 - decdiff1;
}
zeroTempBuffers();
hipLaunchKernelGGL(( addition), dim3(1),dim3(MAX_THREADS_PER_BLOCK), 0, 0, num1->gpuP, num2->gpuP, num1->length, num2->length, dec1_offset, dec2_offset, temp_buffer1, temp_buffer2, output->gpuP, result_req, reps);
return output;
}
bcd *multiply(bcd *num1, bcd *num2)
{
unsigned int result_req = memReqForMulitiplcation(num1, num2);
bcd *output = createBcd(result_req);
if (result_req > temp_buffer_size)
{
reallocTempBuffers(result_req * 2);
}
zeroBcd(output);
loadBcdIntoGPU(output);
output->decpos = decimalMovementMultiplication(num1, num2, result_req);
unsigned int reps = (result_req / MAX_THREADS_PER_BLOCK) + 1;
//printf("REPS: %u\n", reps);
//now we'll figure out the decimal offset
unsigned int decdiff1 = num1->length - num1->decpos;
unsigned int decdiff2 = num2->length - num2->decpos;
unsigned int dec1_offset = 0;
unsigned int dec2_offset = 0;
if (decdiff1 > decdiff2)
{
dec2_offset = decdiff1 - decdiff2;
}
else
{
dec1_offset = decdiff2 - decdiff1;
}
//printf("RESULT REQ: %u\n", result_req);
zeroTempBuffers();
hipLaunchKernelGGL(( multiplication), dim3(1),dim3(MAX_THREADS_PER_BLOCK), 0, 0, num1->gpuP, num2->gpuP, num1->length, num2->length, dec1_offset, dec2_offset, temp_buffer1, temp_buffer2, temp_buffer3, output->gpuP, result_req, reps);
return output;
}
//THIS IS A TESTBED FOR OUR LIBRARY
//AN EXAMPLE
int main()
{
//bcd *num1 = createBcd(903);
bcd *num1 = createBcd(7);
bcd *num2 = createBcd(2);
//bcdFromString("111111111111111111111111111111111111111111111112111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111121111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111.9", num1);
bcdFromString("112341.9", num1);
bcdFromString("1.2", num2);
printBcdNotNormal(num1);
printBcd(num2);
cudaSetup();
loadBcdIntoGPU(num1);
loadBcdIntoGPU(num2);
bcd* result = multiply(num1, num2);
bcd* normResult = normalize(result);
getCompResult(normResult);
printf("\n");
printBcd(normResult);
//printBcd(result);
freeBcd(num1);
freeBcd(num2);
freeBcd(result);
freeBcd(normResult);
freeTempBuffers();
return 0;
} | proj.cu | #include <stdio.h>
#include <string.h>
#include <math.h>
/*
TODO project: reciprocal
*/
/*
TODO project: negative number human readable
*/
//KEY CONSTANTS
//THIS IS THE STARTING SIZE FOR THE TWO TEMP BUFFERS
//THE LARGER YOU EXPECT YOUR NUMBERS TO GROW, INCREASES THE SIZE OF THIS VALUE TO INCREASE PERFORMANCE
const unsigned int ORIGINAL_TEMP_BUFFER_SIZE = 1024;
//THESE TWO WILL SERVE AS OUR TEMPORARY BUFFERS IN OUR COMPUTATIONS
int *temp_buffer1;
int *temp_buffer2;
int *temp_buffer3;
unsigned int temp_buffer_size;
//NORMALIZATION KEY VALUES
const unsigned int BITS_PER_DIGIT = 32;
const unsigned int NORMALIZATION_EXPANSION = (unsigned int)ceil((BITS_PER_DIGIT * log(2.0)) / (log(10.0)));
//KEY PROCESSING CONSTANTS
//THIS IS THE DEVICE NUMBER THAT WE WILL DO OUR CALCULATIONS ON
const int DEVICE_NUM = 0;
int MAX_THREADS_PER_BLOCK;
//THE FOLLOWING IS HELPFUL INPUT CODE
//BCD - binary coded decimal
//A BCD IS THE DATA STRUCTURE THAT WE WILL USE TO REPRESENT OUR LARGE NUMBERS
//decpos IS THE POSITION OF THE DECIMAL IN THE NUMBER
//length IS THE NUMBER OF DIGITS IN THE NUMBER
//values IS AN ARRAY OF THE DIGITS
//gpuP IS THE POINTER TO THE DIGITS THAT HAVE BEEN COPIED TO THE GPU'S MEMORY
typedef struct bcd {
unsigned int decpos;
unsigned int length;
int *values;
int *gpuP;
} bdc;
//THIS TAKES A STRING REPRESENTATION OF OUR NUMBER, SUCH AS "123456544.23" AND LOADS IT INTO A BCD
void bcdFromString(char* input, bcd* output);
//THIS CREATES A BCD THAT CAN STORE A NUMBER WITH len DIGITS
bcd* createBcd(unsigned int len);
//THIS PRINTS A BCD OUT TO THE CONSOLE
void printBcd(bcd* input);
void printBcdNotNormal(bcd* input);
void zeroBcd(bcd* input);
void freeBcd(bcd* input);
//THE IMPLEMENTATION OF THESE THREE FUNCTIONS FOLLOWS:
void bcdFromString(char* input, bcd* output)
{
unsigned int len = strlen(input);
unsigned int lenstore = len;
unsigned int x = 0;
unsigned char decFound = 0;
unsigned int negative = 0;
for (x = 0; x < len; ++x)
{
char temp = input[x];
switch (temp)
{
case '-':
//lenstore -= 1;
negative = 1;
break;
case '0':
if (decFound > 0)
{
output->values[x - 1] = 0;
}
else
{
output->values[x] = 0;
}
break;
case '1':
if (decFound > 0)
{
output->values[x - 1] = 1;
}
else
{
output->values[x] = 1;
}
break;
case '2':
if (decFound > 0)
{
output->values[x - 1] = 2;
}
else
{
output->values[x] = 2;
}
break;
case '3':
if (decFound > 0)
{
output->values[x - 1] = 3;
}
else
{
output->values[x] = 3;
}
break;
case '4':
if (decFound > 0)
{
output->values[x - 1] = 4;
}
else
{
output->values[x] = 4;
}
break;
case '5':
if (decFound > 0)
{
output->values[x - 1] = 5;
}
else
{
output->values[x] = 5;
}
break;
case '6':
if (decFound > 0)
{
output->values[x - 1] = 6;
}
else
{
output->values[x] = 6;
}
break;
case '7':
if (decFound > 0)
{
output->values[x - 1] = 7;
}
else
{
output->values[x] = 7;
}
break;
case '8':
if (decFound > 0)
{
output->values[x - 1] = 8;
}
else
{
output->values[x] = 8;
}
break;
case '9':
if (decFound > 0)
{
output->values[x - 1] = 9;
}
else
{
output->values[x] = 9;
}
break;
case '.':
output->decpos = x;
lenstore -= 1;
decFound = 1;
break;
}
}
output->length = lenstore;
if (negative == 1)
{
int i = 0;
for(i = 0; i < lenstore; i++)
{
output->values[i] = output->values[i] * (-1);
}
}
if (decFound == 0)
{
output->decpos = lenstore;
}
}
bcd* createBcd(unsigned int len)
{
bcd* output = (bcd *)malloc(sizeof(bcd));
output->length = len;
output->values = (int *)malloc(len * sizeof(int));
return output;
}
void zeroBcd(bcd* input)
{
int c = 0;
for (c = 0; c < input->length; ++c)
{
*(input->values + c) = 0;
}
}
void printBcd(bcd* input)
{
int i = 0;
for(i = 0; i < input->length; i++)
{
if (i == input->decpos)
{
printf(".");
}
printf("%i", input->values[i]);
}
printf("\n");
}
void printBcdNotNormal(bcd* input)
{
int i = 0;
for(i = 0; i < input->length; i++)
{
if (i == input->decpos)
{
printf(".");
}
printf("%i", input->values[i]);
printf("|");
}
printf("\n");
}
void freeBcd(bcd* input)
{
cudaFree(input->gpuP);
free(input->values);
free(input);
}
//cudaFree
//GPU CODE
//THIS FUNCTION LOADS THE VALUES OF A BCD INTO TEH GPU'S MEMORY AND SETS THE gpuP OF THE BCD TO POINT TO THE GPU-STORED VALUES
void loadBcdIntoGPU(bcd* input);
//THIS COPIES BACK THE RESULTS FROM THE GPU TO THE BCD
void getCompResult(bcd* output);
void loadBcdIntoGPU(bcd* input)
{
cudaMalloc(&input->gpuP, input->length * sizeof(int));
cudaMemcpy(input->gpuP, input->values, input->length * sizeof(int), cudaMemcpyHostToDevice);
}
void getCompResult(bcd* output)
{
cudaMemcpy(output->values,output->gpuP, output->length * sizeof(int), cudaMemcpyDeviceToHost);
}
//THE FOLLOWING IS ALL SETUP CODE
void cudaSetup();
void initTempBuffers();
void reallocTempBuffers(unsigned int size);
void freeTempBuffers();
void zeroTempBuffers();
//THIS IS THE MAIN SETUP FUNCTION. CALL THIS EARLY ON IN MAIN. BEFORE ANY ADDITIONS OR MULTIPLICATIONS ON BCD'S
//will call initTempBuffers
void cudaSetup()
{
//LET'S FIGURE OUT THE MAXIMUM THREADS PER BLOCK
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, DEVICE_NUM);
MAX_THREADS_PER_BLOCK = deviceProp.maxThreadsPerBlock;
initTempBuffers();
}
void initTempBuffers()
{
cudaMalloc(&temp_buffer1, ORIGINAL_TEMP_BUFFER_SIZE * sizeof(int));
cudaMalloc(&temp_buffer2, ORIGINAL_TEMP_BUFFER_SIZE * sizeof(int));
cudaMalloc(&temp_buffer3, ORIGINAL_TEMP_BUFFER_SIZE * sizeof(int));
cudaMemset(temp_buffer1, 0, ORIGINAL_TEMP_BUFFER_SIZE * sizeof(int));
cudaMemset(temp_buffer2, 0, ORIGINAL_TEMP_BUFFER_SIZE * sizeof(int));
cudaMemset(temp_buffer3, 0, ORIGINAL_TEMP_BUFFER_SIZE * sizeof(int));
temp_buffer_size = ORIGINAL_TEMP_BUFFER_SIZE;
}
void reallocTempBuffers(unsigned int size)
{
if (temp_buffer_size < size)
{
freeTempBuffers();
cudaMalloc(&temp_buffer1, size * sizeof(int));
cudaMalloc(&temp_buffer2, size * sizeof(int));
cudaMalloc(&temp_buffer3, size * sizeof(int));
cudaMemset(temp_buffer1, 0, size * sizeof(int));
cudaMemset(temp_buffer2, 0, size * sizeof(int));
cudaMemset(temp_buffer3, 0, size * sizeof(int));
temp_buffer_size = size;
}
}
void zeroTempBuffers()
{
cudaMemset(temp_buffer1, 0, temp_buffer_size * sizeof(int));
cudaMemset(temp_buffer2, 0, temp_buffer_size * sizeof(int));
cudaMemset(temp_buffer3, 0, temp_buffer_size * sizeof(int));
}
void freeTempBuffers()
{
cudaFree(temp_buffer1);
cudaFree(temp_buffer2);
cudaFree(temp_buffer3);
}
//MEMORY REQUIREMENT CALCULATION CODE
unsigned int memReqForAddition(bcd* num1, bcd* num2);
unsigned int memReqForMulitiplcation(bcd* num1, bcd* num2);
unsigned int memReqForAddition(bcd* num1, bcd* num2)
{
unsigned int maxlen = 0;
if (num1->length > num2->length)
{
maxlen = num1->length;
}
else
{
maxlen = num2->length;
}
return (maxlen + NORMALIZATION_EXPANSION + 1);
}
unsigned int memReqForMulitiplcation(bcd* num1, bcd* num2)
{
return num1->length + num2->length + (2 * NORMALIZATION_EXPANSION);
}
//DECIMAL POSITION CHANGE CODE
unsigned int decimalMovementAddition(bcd* num1, bcd* num2, unsigned int memReq);
unsigned int decimalMovementMultiplication(bcd* num1, bcd* num2, unsigned int memReq);
unsigned int decimalMovementAddition(bcd* num1, bcd* num2, unsigned int memReq)
{
if (num1->length > num2->length)
{
return (memReq - num1->length) + num1->decpos;
}
else
{
return (memReq - num2->length) + num2->decpos;
}
}
/*
TODO START HERE
*/
unsigned int decimalMovementMultiplication(bcd* num1, bcd* num2, unsigned int memReq)
{
return memReq - ((num1->length - num1->decpos) + (num2->length - num2->decpos));
}
//KERNELS
__global__ void addition(int *num1, int *num2, unsigned int num1Len, unsigned int num2Len, unsigned int num1offset, unsigned int num2offset, int *temp_buffer1, int *temp_buffer2, int *output, unsigned int memReq, unsigned int reps);
__global__ void normalize(int *num,unsigned int num1Len, int *result,unsigned int memReq);
__global__ void multiplication(int *num1, int *num2, unsigned int num1Len, unsigned int num2Len, unsigned int num1offset, unsigned int num2offset, int *temp_buffer1, int *temp_buffer2, int *temp_buffer3, int *output, unsigned int memReq, unsigned int reps);
/*
TODO normalize: need to get this working for reps. EG: numbers longer the 512 digits
*/
/*
TODO normalize: need to make this work for negative numbers
*/
__global__ void normalize(int *num, unsigned int numLen, int *result,unsigned int memReq, unsigned int reps)
{
int x = threadIdx.x;
if (x < memReq)
{
if (reps == 1)
{
if (x >= memReq - numLen)
{
result[x] = num[x - (memReq - numLen)];
}
}
else
{
//result[x] = num[x - (memReq - numLen)];
int d = 0;
for (d = 0; d < reps; ++d)
{
if (d == 0)
{
if (x >= memReq - numLen)
{
result[x + (512 * d)] = num[x + (512 * d) - (memReq - numLen)];
}
}
else
{
if ((x + (512 * d) - (memReq - numLen)) < numLen)
result[x + (512 * d)] = num[x + (512 * d) - (memReq - numLen)];
}
}
}
}
__shared__ int carry;
carry = 1;
__syncthreads();
while (carry)
{
if (reps == 1)
{
int c = 0;
if (x < memReq)
{
c = result[x] / 10;
result[x] %= 10;
}
__syncthreads();
if (x < memReq && x != 0)
{
result[x - 1] += c;
}
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
int c = 0;
if ((x + (512 * d)) < memReq)
{
c = result[x + (512 * d)] / 10;
result[x + (512 * d)] %= 10;
}
__syncthreads();
if ((x + (512 * d)) < memReq && (x != 0))
{
result[x - 1 + (512 * d)] += c;
}
}
}
carry = 0;
__syncthreads();
if (x < memReq)
{
if (reps == 1)
{
if (abs(result[x]) > 9)
{
carry = 1;
}
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq && (abs(result[x + (512 * d)]) > 9))
{
carry = 1;
}
}
}
}
__syncthreads();
}
}
/*
TODO addition kernel: need to add in normalization
*/
__global__ void addition(int *num1, int *num2, unsigned int num1Len, unsigned int num2Len, unsigned int num1offset, unsigned int num2offset, int *temp_buffer1, int *temp_buffer2, int *output, unsigned int memReq, unsigned int reps)
{
int x = threadIdx.x;
if (reps == 1)
{
if (x < memReq)
{
if (x >= memReq - num1Len)
{
temp_buffer1[x] = num1[x - (memReq - num1Len)];
}
if (x >= memReq - num2Len)
{
temp_buffer2[x] = num2[x - (memReq - num2Len)];
}
}
}
else
{
int d = 0;
for (d = 0; d < reps; ++d)
{
if (d == 0)
{
if (x >= memReq - num1Len)
{
temp_buffer1[x + (512 * d)] = num1[x + (512 * d) - (memReq - num1Len)];
}
if (x >= memReq - num2Len)
{
temp_buffer2[x + (512 * d)] = num2[x + (512 * d) - (memReq - num2Len)];
}
}
else
{
if ((x + (512 * d) - (memReq - num1Len)) < num1Len)
{
temp_buffer1[x + (512 * d)] = num1[x + (512 * d) - (memReq - num1Len)];
}
if ((x + (512 * d) - (memReq - num2Len)) < num2Len)
{
temp_buffer2[x + (512 * d)] = num2[x + (512 * d) - (memReq - num2Len)];
}
}
}
}
//move everything to temp buffers
__shared__ int carry;
carry = 0;
__syncthreads();
if (reps == 1)
{
if (((unsigned int)(temp_buffer1[x] & ((unsigned int)3 << 30)) > 0) || ((unsigned int)(temp_buffer2[x] & ((unsigned int)3 << 30)) > 0))
{
carry = 1;
}
}
else
{
int d = 0;
for (d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq)
{
if (((unsigned int)(temp_buffer1[x + (512 * d)] & ((unsigned int)3 << 30)) > 0) || ((unsigned int)(temp_buffer2[x + (512 * d)] & ((unsigned int)3 << 30)) > 0))
{
carry = 1;
}
}
}
}
__syncthreads();
while (carry)
{
if (reps == 1)
{
int c1 = 0;
int c2 = 0;
if (x < memReq)
{
c1 = temp_buffer1[x] / 10;
c2 = temp_buffer2[x] / 10;
temp_buffer1[x] %= 10;
temp_buffer2[x] %= 10;
}
__syncthreads();
if (x < memReq && x != 0)
{
temp_buffer1[x - 1] += c1;
temp_buffer2[x - 1] += c2;
}
carry = 0;
__syncthreads();
if (x < memReq)
{
if ((abs(temp_buffer1[x]) > 9) || (abs(temp_buffer2[x]) > 9))
{
carry = 1;
}
}
__syncthreads();
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
int c1 = 0;
int c2 = 0;
if ((x + (512 * d)) < memReq)
{
c1 = temp_buffer1[x + (512 * d)] / 10;
c2 = temp_buffer2[x + (512 * d)] / 10;
temp_buffer1[x + (512 * d)] %= 10;
temp_buffer2[x + (512 * d)] %= 10;
}
__syncthreads();
if ((x + (512 * d)) < memReq && (x != 0))
{
temp_buffer1[x - 1 + (512 * d)] += c1;
temp_buffer2[x - 1 + (512 * d)] += c2;
}
}
}
carry = 0;
__syncthreads();
if (x < memReq)
{
if (reps == 1)
{
if ((abs(temp_buffer1[x]) > 9) || (abs(temp_buffer2[x]) > 9))
{
carry = 1;
}
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq && ((abs(temp_buffer1[x + (512 * d)]) > 9) || (abs(temp_buffer2[x + (512 * d)]) > 9)))
{
carry = 1;
}
}
}
}
__syncthreads();
}
if (x < memReq)
{
if (reps == 1)
{
if (((x + num1offset) < memReq) && ((x + num2offset) < memReq))
{
output[x] = temp_buffer1[x + num1offset] + temp_buffer2[x + num2offset];
}
else if ((x + num2offset) < memReq)
{
output[x] = temp_buffer2[x + num2offset];
}
else if ((x + num1offset) < memReq)
{
output[x] = temp_buffer1[x + num1offset];
}
else
{
//do nothing
}
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
if ((((x + (512 * d)) + num1offset) < memReq) && (((x + (512 * d)) + num2offset) < memReq))
{
output[(x + (512 * d))] = temp_buffer1[(x + (512 * d)) + num1offset] + temp_buffer2[(x + (512 * d)) + num2offset];
}
else if (((x + (512 * d)) + num2offset) < memReq)
{
output[(x + (512 * d))] = temp_buffer2[(x + (512 * d)) + num2offset];
}
else if (((x + (512 * d)) + num1offset) < memReq)
{
output[(x + (512 * d))] = temp_buffer1[(x + (512 * d)) + num1offset];
}
else
{
//do nothing
}
}
}
}
}
//multiplication<<<1,MAX_THREADS_PER_BLOCK>>>(num1->gpuP, num2->gpuP, num1->length, num2->length, dec1_offset, dec2_offset, temp_buffer1, temp_buffer2, temp_buffer3, output->gpuP, result_req, reps);
__global__ void multiplication(int *num1, int *num2, unsigned int num1Len, unsigned int num2Len, unsigned int num1offset, unsigned int num2offset, int *temp_buffer1, int *temp_buffer2, int *temp_buffer3, int *output2, unsigned int memReq, unsigned int reps)
{
int x = threadIdx.x;
if (reps == 1)
{
if (x < memReq)
{
if (x >= memReq - num1Len)
{
temp_buffer1[x] = num1[x - (memReq - num1Len)];
}
if (x >= memReq - num2Len)
{
temp_buffer2[x] = num2[x - (memReq - num2Len)];
}
}
}
else
{
int d = 0;
for (d = 0; d < reps; ++d)
{
if (d == 0)
{
if (x >= memReq - num1Len)
{
temp_buffer1[x + (512 * d)] = num1[x + (512 * d) - (memReq - num1Len)];
//output2[x + (512 * d)] = num1[x + (512 * d) - (memReq - num1Len)];
}
if (x >= memReq - num2Len)
{
temp_buffer2[x + (512 * d)] = num2[x + (512 * d) - (memReq - num2Len)];
}
}
else
{
if ((x + (512 * d) - (memReq - num1Len)) < num1Len)
{
temp_buffer1[x + (512 * d)] = num1[x + (512 * d) - (memReq - num1Len)];
//output2[x + (512 * d)] = num1[x + (512 * d) - (memReq - num1Len)];
}
if ((x + (512 * d) - (memReq - num2Len)) < num2Len)
{
temp_buffer2[x + (512 * d)] = num2[x + (512 * d) - (memReq - num2Len)];
}
}
}
}
//move everything to temp buffers
__shared__ int carry;
carry = 1;
__syncthreads();
while (carry)
{
if (reps == 1)
{
int c1 = 0;
int c2 = 0;
if (x < memReq)
{
c1 = temp_buffer1[x] / 10;
c2 = temp_buffer2[x] / 10;
temp_buffer1[x] %= 10;
temp_buffer2[x] %= 10;
}
__syncthreads();
if (x < memReq && x != 0)
{
temp_buffer1[x - 1] += c1;
temp_buffer2[x - 1] += c2;
}
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
int c1 = 0;
int c2 = 0;
if ((x + (512 * d)) < memReq)
{
c1 = temp_buffer1[x + (512 * d)] / 10;
c2 = temp_buffer2[x + (512 * d)] / 10;
temp_buffer1[x + (512 * d)] %= 10;
temp_buffer2[x + (512 * d)] %= 10;
}
__syncthreads();
if ((x + (512 * d)) < memReq && (x != 0))
{
temp_buffer1[x - 1 + (512 * d)] += c1;
temp_buffer2[x - 1 + (512 * d)] += c2;
}
}
}
carry = 0;
__syncthreads();
if (reps == 1)
{
if ((x < memReq) && ((abs(temp_buffer1[x]) > 9) || (abs(temp_buffer2[x]) > 9)))
{
carry = 1;
}
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq && ((abs(temp_buffer1[x + (512 * d)]) > 9) || (abs(temp_buffer2[x + (512 * d)]) > 9)))
{
carry = 1;
}
}
}
__syncthreads();
}
//good till here
//TEST INTITIAL NORMALIZATION
__shared__ int multCount;
multCount = 0;
__syncthreads();
//output2[x] = reps;
//__syncthreads(); <-- uncomment this too
//TEST JUST ONE ITERATION
//while (multCount < num2Len)
while (multCount < num2Len)
{
int tempMultCountStore = multCount;
tempMultCountStore += 1;
if (reps == 1)
{
if (x < memReq)
{
if (x > multCount)
{
temp_buffer3[x - multCount] = temp_buffer2[memReq - multCount - 1] * temp_buffer1[x];
}
}
//check for overflow
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
if (d == 0)
{
if ((x > multCount) && ((x + (512 * d)) < memReq))
{
temp_buffer3[(x + (512 * d)) - multCount] = temp_buffer2[memReq - multCount - 1] * temp_buffer1[(x + (512 * d))];
}
}
else
{
if ((x + (512 * d)) < memReq)
{
temp_buffer3[(x + (512 * d)) - multCount] = temp_buffer2[memReq - multCount - 1] * temp_buffer1[(x + (512 * d))];
}
}
}
}
carry = 0;
__syncthreads();
int d = 0;
if (reps == 1)
{
for (d = 0; d <= reps; ++d)
{
if ((x + (512 * d)) < memReq)
{
if (((unsigned int)(temp_buffer3[x + (512 * d)] & ((unsigned int)3 << 30)) > 0) || ((unsigned int)(output2[x + (512 * d)] & ((unsigned int)3 << 30)) > 0))
{
carry = 1;
}
}
}
}
else
{
for (d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq)
{
if (((unsigned int)(temp_buffer3[x + (512 * d)] & ((unsigned int)3 << 30)) > 0) || ((unsigned int)(output2[x + (512 * d)] & ((unsigned int)3 << 30)) > 0))
{
carry = 1;
}
}
}
}
__syncthreads();
while (carry)
{
if (reps == 1)
{
int c1 = 0;
int c2 = 0;
if ((reps == 1) && (x < memReq))
{
c1 = temp_buffer3[x] / 10;
c2 = output2[x] / 10;
temp_buffer3[x] %= 10;
output2[x] %= 10;
}
__syncthreads();
if (x < memReq && x != 0)
{
temp_buffer3[x - 1] += c1;
output2[x - 1] += c2;
}
carry = 0;
__syncthreads();
if (x < memReq)
{
if ((abs(temp_buffer3[x]) > 9) || abs((output2[x]) > 9))
{
carry = 1;
}
}
__syncthreads();
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
int c1 = 0;
int c2 = 0;
if ((x + (512 * d)) < memReq)
{
c1 = temp_buffer3[x + (512 * d)] / 10;
c2 = output2[x + (512 * d)] / 10;
temp_buffer3[x + (512 * d)] %= 10;
output2[x + (512 * d)] %= 10;
}
__syncthreads();
if (d == 0)
{
if ((x != 0) && ((x + (512 * d)) < memReq))
{
//SOMEHOW DESYNCRONIZED
temp_buffer3[x - 1 + (512 * d)] += c1;
output2[x - 1 + (512 * d)] += c2;
}
}
else
{
if (((x + (512 * d)) < memReq))
{
//SOMEHOW DESYNCRONIZED
temp_buffer3[x - 1 + (512 * d)] += c1;
output2[x - 1 + (512 * d)] += c2;
}
}
__syncthreads();
carry = 0;
int d = 0;
for(d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq && ((abs(temp_buffer3[x + (512 * d)]) > 9) || (abs(output2[x + (512 * d)]) > 9)))
{
carry = 1;
}
}
__syncthreads();
}
}
}
//perform addition
if (reps == 1)
{
if (x < memReq)
{
output2[x] += temp_buffer3[x];
temp_buffer3[x] = 0;
}
//check for overflow
}
else
{
int d = 0;
for(d = 0; d < reps; ++d)
{
if ((x + (512 * d)) < memReq)
{
output2[x + (512 * d)] += temp_buffer3[x + (512 * d)];
temp_buffer3[x + (512 * d)] = 0;
}
}
}
//update counter
multCount = tempMultCountStore;
__syncthreads();
}
}
//ARITHMETIC FUNCTIONS
bcd* normalize(bcd *num)
{
unsigned int memReq = NORMALIZATION_EXPANSION + num->length;
bcd *output = createBcd(memReq);
zeroBcd(output);
loadBcdIntoGPU(output);
output->decpos = num->decpos + (memReq - num->length);
unsigned int reps = (memReq / MAX_THREADS_PER_BLOCK) + 1;
//printf("reps: %u",reps);
normalize<<<1,MAX_THREADS_PER_BLOCK>>>(num->gpuP, num->length, output->gpuP,memReq,reps);
return output;
}
bcd* add(bcd *num1, bcd *num2)
{
//first calc memory requirement for result
unsigned int result_req = memReqForAddition(num1, num2);
bcd *output = createBcd(result_req);
if (result_req > temp_buffer_size)
{
reallocTempBuffers(result_req * 2);
}
zeroBcd(output);
loadBcdIntoGPU(output);
output->decpos = decimalMovementAddition(num1, num2, result_req);
unsigned int reps = (result_req / MAX_THREADS_PER_BLOCK) + 1;
//now we'll figure out the decimal offset
unsigned int decdiff1 = num1->length - num1->decpos;
unsigned int decdiff2 = num2->length - num2->decpos;
unsigned int dec1_offset = 0;
unsigned int dec2_offset = 0;
if (decdiff1 > decdiff2)
{
dec2_offset = decdiff1 - decdiff2;
}
else
{
dec1_offset = decdiff2 - decdiff1;
}
zeroTempBuffers();
addition<<<1,MAX_THREADS_PER_BLOCK>>>(num1->gpuP, num2->gpuP, num1->length, num2->length, dec1_offset, dec2_offset, temp_buffer1, temp_buffer2, output->gpuP, result_req, reps);
return output;
}
bcd *multiply(bcd *num1, bcd *num2)
{
unsigned int result_req = memReqForMulitiplcation(num1, num2);
bcd *output = createBcd(result_req);
if (result_req > temp_buffer_size)
{
reallocTempBuffers(result_req * 2);
}
zeroBcd(output);
loadBcdIntoGPU(output);
output->decpos = decimalMovementMultiplication(num1, num2, result_req);
unsigned int reps = (result_req / MAX_THREADS_PER_BLOCK) + 1;
//printf("REPS: %u\n", reps);
//now we'll figure out the decimal offset
unsigned int decdiff1 = num1->length - num1->decpos;
unsigned int decdiff2 = num2->length - num2->decpos;
unsigned int dec1_offset = 0;
unsigned int dec2_offset = 0;
if (decdiff1 > decdiff2)
{
dec2_offset = decdiff1 - decdiff2;
}
else
{
dec1_offset = decdiff2 - decdiff1;
}
//printf("RESULT REQ: %u\n", result_req);
zeroTempBuffers();
multiplication<<<1,MAX_THREADS_PER_BLOCK>>>(num1->gpuP, num2->gpuP, num1->length, num2->length, dec1_offset, dec2_offset, temp_buffer1, temp_buffer2, temp_buffer3, output->gpuP, result_req, reps);
return output;
}
//THIS IS A TESTBED FOR OUR LIBRARY
//AN EXAMPLE
int main()
{
//bcd *num1 = createBcd(903);
bcd *num1 = createBcd(7);
bcd *num2 = createBcd(2);
//bcdFromString("111111111111111111111111111111111111111111111112111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111121111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111.9", num1);
bcdFromString("112341.9", num1);
bcdFromString("1.2", num2);
printBcdNotNormal(num1);
printBcd(num2);
cudaSetup();
loadBcdIntoGPU(num1);
loadBcdIntoGPU(num2);
bcd* result = multiply(num1, num2);
bcd* normResult = normalize(result);
getCompResult(normResult);
printf("\n");
printBcd(normResult);
//printBcd(result);
freeBcd(num1);
freeBcd(num2);
freeBcd(result);
freeBcd(normResult);
freeTempBuffers();
return 0;
} |
34f746bf09dec8aa07b5c055be76e27a1a670919.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************6
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_orderedPos;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMemset(dev_particleArrayIndices, 0, N * sizeof(int));
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMemset(dev_particleGridIndices, 0, N * sizeof(int));
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMemset(dev_gridCellStartIndices, 0, gridCellCount * sizeof(int));
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipMemset(dev_gridCellEndIndices, 0, gridCellCount * sizeof(int));
hipMalloc((void**)&dev_orderedPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_orderedPos failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel)
{
glm::vec3 sumCOM(0.0f, 0.0f, 0.0f);
glm::vec3 sumDelta(0.0f, 0.0f, 0.0f);
glm::vec3 sumVel(0.0f, 0.0f, 0.0f);
int numBoidsRule[2] = { 0, 0 };
for (int boidIdx = 0; boidIdx < N; ++boidIdx)
{
if (boidIdx == iSelf)
continue;
const float distance = glm::distance(pos[boidIdx], pos[iSelf]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance)
{
sumCOM += pos[boidIdx];
++numBoidsRule[0];
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance)
{
sumDelta -= (pos[boidIdx] - pos[iSelf]);
}
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance)
{
sumVel += vel[boidIdx];
++numBoidsRule[1];
}
}
glm::vec3 totalVel(0.0f, 0.0f, 0.0f);
if (numBoidsRule[0] > 0)
totalVel += (sumCOM / float(numBoidsRule[0]) - pos[iSelf]) * rule1Scale;
totalVel += sumDelta * rule2Scale;
if (numBoidsRule[1] > 0)
totalVel += (sumVel / float(numBoidsRule[1]) - vel[iSelf]) * rule3Scale;;
return totalVel;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(
int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2)
{
const int iSelf = threadIdx.x + (blockIdx.x * blockDim.x);
if (iSelf >= N) {
return;
}
// Compute a new velocity based on pos and vel1
glm::vec3 newVel = computeVelocityChange(N, iSelf, pos, vel1);
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[iSelf] += newVel;
// Clamp the speed
const float newSpeed = glm::length(vel2[iSelf]);
if (newSpeed > maxSpeed)
{
vel2[iSelf] *= maxSpeed / newSpeed;
}
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices)
{
// TODO-2.1
// - Label each boid with the index of its grid cell.
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::highp_uvec3 gridIdx3D = (pos[index] - gridMin) * inverseCellWidth;
int gridIdx1D = gridIndex3Dto1D(gridIdx3D.x, gridIdx3D.y, gridIdx3D.z, gridResolution);
gridIndices[index] = gridIdx1D;
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
//ASSERT(particleGridIndices.isSorted());
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
const int curCellIdx = particleGridIndices[index];
if (index == 0 || particleGridIndices[index - 1] != curCellIdx)
{
gridCellStartIndices[curCellIdx] = index;
}
if (index == N - 1 || particleGridIndices[index + 1] != curCellIdx)
{
gridCellEndIndices[curCellIdx] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2)
{
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
const int iSelf = threadIdx.x + (blockIdx.x * blockDim.x);
if (iSelf >= N) {
return;
}
// - Identify the grid cell that this particle is in
glm::vec3 gridIdx3D = (pos[iSelf] - gridMin) * inverseCellWidth;
glm::highp_uvec3 gridUIdx3D(gridIdx3D);
const int boidCellIdx = gridIndex3Dto1D(gridUIdx3D.x, gridUIdx3D.y, gridUIdx3D.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
glm::vec3 roundVals = glm::round( gridIdx3D - glm::vec3(gridUIdx3D) );
glm::highp_ivec3 nghbrsDir(roundVals.x > 0.5f ? 1 : -1, roundVals.y > 0.5f ? 1 : -1, roundVals.z > 0.5f ? 1 : -1);
const glm::highp_ivec3 neighborCells3D[] =
{
glm::highp_ivec3(gridIdx3D.x, gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(gridIdx3D.x, gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
};
// - For each cell, read the start/end indices in the boid pointer array.
for (int i = 0; i < 8; ++i)
{
if (neighborCells3D[i].x < 0 || neighborCells3D[i].y < 0 || neighborCells3D[i].z < 0)
continue;
if (neighborCells3D[i].x >= gridResolution || neighborCells3D[i].y >= gridResolution || neighborCells3D[i].z >= gridResolution)
continue;
const int cellIdx = gridIndex3Dto1D(neighborCells3D[i].x, neighborCells3D[i].y, neighborCells3D[i].z, gridResolution);
const int boidPtrStartIdx = gridCellStartIndices[cellIdx];
const int boidPtrEndIdx = gridCellEndIndices[cellIdx];
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 sumCOM(0.0f, 0.0f, 0.0f);
glm::vec3 sumDelta(0.0f, 0.0f, 0.0f);
glm::vec3 sumVel(0.0f, 0.0f, 0.0f);
int numBoidsRule[2] = { 0, 0 };
for (int boidPtrIdx = boidPtrStartIdx; boidPtrIdx <= boidPtrEndIdx; ++boidPtrIdx)
{
const int boidIdx = particleArrayIndices[boidPtrIdx];
if (boidIdx == iSelf)
continue;
const float distance = glm::distance(pos[boidIdx], pos[iSelf]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance)
{
sumCOM += pos[boidIdx];
++numBoidsRule[0];
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance)
{
sumDelta -= (pos[boidIdx] - pos[iSelf]);
}
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance)
{
sumVel += vel1[boidIdx];
++numBoidsRule[1];
}
}
glm::vec3 totalVel(0.0f, 0.0f, 0.0f);
if (numBoidsRule[0] > 0)
totalVel += (sumCOM / float(numBoidsRule[0]) - pos[iSelf]) * rule1Scale;
totalVel += sumDelta * rule2Scale;
if (numBoidsRule[1] > 0)
totalVel += (sumVel / float(numBoidsRule[1]) - vel1[iSelf]) * rule3Scale;;
vel2[iSelf] += totalVel;
}
// - Clamp the speed change before putting the new speed in vel2
const float newSpeed = glm::length(vel2[iSelf]);
if (newSpeed > maxSpeed)
{
vel2[iSelf] *= maxSpeed / newSpeed;
}
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2)
{
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
const int iSelf = threadIdx.x + (blockIdx.x * blockDim.x);
if (iSelf >= N) {
return;
}
// - Identify the grid cell that this particle is in
glm::vec3 gridIdx3D = (pos[iSelf] - gridMin) * inverseCellWidth;
glm::highp_uvec3 gridUIdx3D(gridIdx3D);
const int boidCellIdx = gridIndex3Dto1D(gridUIdx3D.x, gridUIdx3D.y, gridUIdx3D.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
glm::vec3 roundVals = glm::round(gridIdx3D - glm::vec3(gridUIdx3D));
glm::highp_ivec3 nghbrsDir(roundVals.x > 0.5f ? 1 : -1, roundVals.y > 0.5f ? 1 : -1, roundVals.z > 0.5f ? 1 : -1);
// First Increment by x, then y, then z. Faster for consecutive call to gridIndex3Dto1D().
const glm::highp_ivec3 neighborCells3D[] =
{
glm::highp_ivec3(gridIdx3D.x, gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(gridIdx3D.x, gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
glm::highp_ivec3(gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
};
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
for (int i = 0; i < 8; ++i)
{
if (neighborCells3D[i].x < 0 || neighborCells3D[i].y < 0 || neighborCells3D[i].z < 0)
continue;
if (neighborCells3D[i].x >= gridResolution || neighborCells3D[i].y >= gridResolution || neighborCells3D[i].z >= gridResolution)
continue;
const int cellIdx = gridIndex3Dto1D(neighborCells3D[i].x, neighborCells3D[i].y, neighborCells3D[i].z, gridResolution);
const int boidStartIdx = gridCellStartIndices[cellIdx];
const int boidEndIdx = gridCellEndIndices[cellIdx];
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 sumCOM(0.0f, 0.0f, 0.0f);
glm::vec3 sumDelta(0.0f, 0.0f, 0.0f);
glm::vec3 sumVel(0.0f, 0.0f, 0.0f);
int numBoidsRule[2] = { 0, 0 };
for (int boidIdx = boidStartIdx; boidIdx <= boidEndIdx; ++boidIdx)
{
if (boidIdx == iSelf)
continue;
const float distance = glm::distance(pos[boidIdx], pos[iSelf]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance)
{
sumCOM += pos[boidIdx];
++numBoidsRule[0];
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance)
{
sumDelta -= (pos[boidIdx] - pos[iSelf]);
}
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance)
{
sumVel += vel1[boidIdx];
++numBoidsRule[1];
}
}
glm::vec3 totalVel(0.0f, 0.0f, 0.0f);
if (numBoidsRule[0] > 0)
totalVel += (sumCOM / float(numBoidsRule[0]) - pos[iSelf]) * rule1Scale;
totalVel += sumDelta * rule2Scale;
if (numBoidsRule[1] > 0)
totalVel += (sumVel / float(numBoidsRule[1]) - vel1[iSelf]) * rule3Scale;;
vel2[iSelf] += totalVel;
}
// - Clamp the speed change before putting the new speed in vel2
const float newSpeed = glm::length(vel2[iSelf]);
if (newSpeed > maxSpeed)
{
vel2[iSelf] *= maxSpeed / newSpeed;
}
}
__global__ void kernShuffleParticleData(
int N, int* particleArrayIndices,
glm::vec3 *inArray, glm::vec3 *outArray)
{
const int iSelf = threadIdx.x + (blockIdx.x * blockDim.x);
if (iSelf >= N) {
return;
}
const int iOldSelf = particleArrayIndices[iSelf];
outArray[iSelf] = inArray[iOldSelf];
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt)
{
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// TODO-1.2 ping-pong the velocity buffers
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyKind::hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt)
{
// TODO-2.1
dim3 fullCellBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
dim3 fullBoidBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
kernComputeIndices << <fullBoidBlocksPerGrid, blockSize >> >
(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullCellBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered << <fullBoidBlocksPerGrid, blockSize >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// - Update positions
kernUpdatePos << <fullBoidBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyKind::hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt)
{
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
dim3 fullCellBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
dim3 fullBoidBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
kernComputeIndices << <fullBoidBlocksPerGrid, blockSize >> >
(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullCellBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernShuffleParticleData << <fullCellBlocksPerGrid, blockSize >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_orderedPos);
hipMemcpy(dev_pos, dev_orderedPos, numObjects * sizeof(glm::vec3), hipMemcpyKind::hipMemcpyDeviceToDevice);
kernShuffleParticleData << <fullCellBlocksPerGrid, blockSize >> > (numObjects, dev_particleArrayIndices, dev_vel1, dev_vel2);
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyKind::hipMemcpyDeviceToDevice);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBoidBlocksPerGrid, blockSize >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchCoherent failed!");
// - Update positions
kernUpdatePos << <fullBoidBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyKind::hipMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_orderedPos);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| 34f746bf09dec8aa07b5c055be76e27a1a670919.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************6
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_orderedPos;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMemset(dev_particleArrayIndices, 0, N * sizeof(int));
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMemset(dev_particleGridIndices, 0, N * sizeof(int));
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMemset(dev_gridCellStartIndices, 0, gridCellCount * sizeof(int));
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaMemset(dev_gridCellEndIndices, 0, gridCellCount * sizeof(int));
cudaMalloc((void**)&dev_orderedPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_orderedPos failed!");
cudaThreadSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaThreadSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel)
{
glm::vec3 sumCOM(0.0f, 0.0f, 0.0f);
glm::vec3 sumDelta(0.0f, 0.0f, 0.0f);
glm::vec3 sumVel(0.0f, 0.0f, 0.0f);
int numBoidsRule[2] = { 0, 0 };
for (int boidIdx = 0; boidIdx < N; ++boidIdx)
{
if (boidIdx == iSelf)
continue;
const float distance = glm::distance(pos[boidIdx], pos[iSelf]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance)
{
sumCOM += pos[boidIdx];
++numBoidsRule[0];
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance)
{
sumDelta -= (pos[boidIdx] - pos[iSelf]);
}
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance)
{
sumVel += vel[boidIdx];
++numBoidsRule[1];
}
}
glm::vec3 totalVel(0.0f, 0.0f, 0.0f);
if (numBoidsRule[0] > 0)
totalVel += (sumCOM / float(numBoidsRule[0]) - pos[iSelf]) * rule1Scale;
totalVel += sumDelta * rule2Scale;
if (numBoidsRule[1] > 0)
totalVel += (sumVel / float(numBoidsRule[1]) - vel[iSelf]) * rule3Scale;;
return totalVel;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(
int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2)
{
const int iSelf = threadIdx.x + (blockIdx.x * blockDim.x);
if (iSelf >= N) {
return;
}
// Compute a new velocity based on pos and vel1
glm::vec3 newVel = computeVelocityChange(N, iSelf, pos, vel1);
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[iSelf] += newVel;
// Clamp the speed
const float newSpeed = glm::length(vel2[iSelf]);
if (newSpeed > maxSpeed)
{
vel2[iSelf] *= maxSpeed / newSpeed;
}
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices)
{
// TODO-2.1
// - Label each boid with the index of its grid cell.
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::highp_uvec3 gridIdx3D = (pos[index] - gridMin) * inverseCellWidth;
int gridIdx1D = gridIndex3Dto1D(gridIdx3D.x, gridIdx3D.y, gridIdx3D.z, gridResolution);
gridIndices[index] = gridIdx1D;
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
//ASSERT(particleGridIndices.isSorted());
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
const int curCellIdx = particleGridIndices[index];
if (index == 0 || particleGridIndices[index - 1] != curCellIdx)
{
gridCellStartIndices[curCellIdx] = index;
}
if (index == N - 1 || particleGridIndices[index + 1] != curCellIdx)
{
gridCellEndIndices[curCellIdx] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2)
{
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
const int iSelf = threadIdx.x + (blockIdx.x * blockDim.x);
if (iSelf >= N) {
return;
}
// - Identify the grid cell that this particle is in
glm::vec3 gridIdx3D = (pos[iSelf] - gridMin) * inverseCellWidth;
glm::highp_uvec3 gridUIdx3D(gridIdx3D);
const int boidCellIdx = gridIndex3Dto1D(gridUIdx3D.x, gridUIdx3D.y, gridUIdx3D.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
glm::vec3 roundVals = glm::round( gridIdx3D - glm::vec3(gridUIdx3D) );
glm::highp_ivec3 nghbrsDir(roundVals.x > 0.5f ? 1 : -1, roundVals.y > 0.5f ? 1 : -1, roundVals.z > 0.5f ? 1 : -1);
const glm::highp_ivec3 neighborCells3D[] =
{
glm::highp_ivec3(gridIdx3D.x, gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(gridIdx3D.x, gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
};
// - For each cell, read the start/end indices in the boid pointer array.
for (int i = 0; i < 8; ++i)
{
if (neighborCells3D[i].x < 0 || neighborCells3D[i].y < 0 || neighborCells3D[i].z < 0)
continue;
if (neighborCells3D[i].x >= gridResolution || neighborCells3D[i].y >= gridResolution || neighborCells3D[i].z >= gridResolution)
continue;
const int cellIdx = gridIndex3Dto1D(neighborCells3D[i].x, neighborCells3D[i].y, neighborCells3D[i].z, gridResolution);
const int boidPtrStartIdx = gridCellStartIndices[cellIdx];
const int boidPtrEndIdx = gridCellEndIndices[cellIdx];
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 sumCOM(0.0f, 0.0f, 0.0f);
glm::vec3 sumDelta(0.0f, 0.0f, 0.0f);
glm::vec3 sumVel(0.0f, 0.0f, 0.0f);
int numBoidsRule[2] = { 0, 0 };
for (int boidPtrIdx = boidPtrStartIdx; boidPtrIdx <= boidPtrEndIdx; ++boidPtrIdx)
{
const int boidIdx = particleArrayIndices[boidPtrIdx];
if (boidIdx == iSelf)
continue;
const float distance = glm::distance(pos[boidIdx], pos[iSelf]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance)
{
sumCOM += pos[boidIdx];
++numBoidsRule[0];
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance)
{
sumDelta -= (pos[boidIdx] - pos[iSelf]);
}
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance)
{
sumVel += vel1[boidIdx];
++numBoidsRule[1];
}
}
glm::vec3 totalVel(0.0f, 0.0f, 0.0f);
if (numBoidsRule[0] > 0)
totalVel += (sumCOM / float(numBoidsRule[0]) - pos[iSelf]) * rule1Scale;
totalVel += sumDelta * rule2Scale;
if (numBoidsRule[1] > 0)
totalVel += (sumVel / float(numBoidsRule[1]) - vel1[iSelf]) * rule3Scale;;
vel2[iSelf] += totalVel;
}
// - Clamp the speed change before putting the new speed in vel2
const float newSpeed = glm::length(vel2[iSelf]);
if (newSpeed > maxSpeed)
{
vel2[iSelf] *= maxSpeed / newSpeed;
}
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2)
{
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
const int iSelf = threadIdx.x + (blockIdx.x * blockDim.x);
if (iSelf >= N) {
return;
}
// - Identify the grid cell that this particle is in
glm::vec3 gridIdx3D = (pos[iSelf] - gridMin) * inverseCellWidth;
glm::highp_uvec3 gridUIdx3D(gridIdx3D);
const int boidCellIdx = gridIndex3Dto1D(gridUIdx3D.x, gridUIdx3D.y, gridUIdx3D.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
glm::vec3 roundVals = glm::round(gridIdx3D - glm::vec3(gridUIdx3D));
glm::highp_ivec3 nghbrsDir(roundVals.x > 0.5f ? 1 : -1, roundVals.y > 0.5f ? 1 : -1, roundVals.z > 0.5f ? 1 : -1);
// First Increment by x, then y, then z. Faster for consecutive call to gridIndex3Dto1D().
const glm::highp_ivec3 neighborCells3D[] =
{
glm::highp_ivec3(gridIdx3D.x, gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, gridIdx3D.z),
glm::highp_ivec3(gridIdx3D.x, gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
glm::highp_ivec3(gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
glm::highp_ivec3(nghbrsDir.x + gridIdx3D.x, nghbrsDir.y + gridIdx3D.y, nghbrsDir.z + gridIdx3D.z),
};
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
for (int i = 0; i < 8; ++i)
{
if (neighborCells3D[i].x < 0 || neighborCells3D[i].y < 0 || neighborCells3D[i].z < 0)
continue;
if (neighborCells3D[i].x >= gridResolution || neighborCells3D[i].y >= gridResolution || neighborCells3D[i].z >= gridResolution)
continue;
const int cellIdx = gridIndex3Dto1D(neighborCells3D[i].x, neighborCells3D[i].y, neighborCells3D[i].z, gridResolution);
const int boidStartIdx = gridCellStartIndices[cellIdx];
const int boidEndIdx = gridCellEndIndices[cellIdx];
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
glm::vec3 sumCOM(0.0f, 0.0f, 0.0f);
glm::vec3 sumDelta(0.0f, 0.0f, 0.0f);
glm::vec3 sumVel(0.0f, 0.0f, 0.0f);
int numBoidsRule[2] = { 0, 0 };
for (int boidIdx = boidStartIdx; boidIdx <= boidEndIdx; ++boidIdx)
{
if (boidIdx == iSelf)
continue;
const float distance = glm::distance(pos[boidIdx], pos[iSelf]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance)
{
sumCOM += pos[boidIdx];
++numBoidsRule[0];
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance)
{
sumDelta -= (pos[boidIdx] - pos[iSelf]);
}
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance)
{
sumVel += vel1[boidIdx];
++numBoidsRule[1];
}
}
glm::vec3 totalVel(0.0f, 0.0f, 0.0f);
if (numBoidsRule[0] > 0)
totalVel += (sumCOM / float(numBoidsRule[0]) - pos[iSelf]) * rule1Scale;
totalVel += sumDelta * rule2Scale;
if (numBoidsRule[1] > 0)
totalVel += (sumVel / float(numBoidsRule[1]) - vel1[iSelf]) * rule3Scale;;
vel2[iSelf] += totalVel;
}
// - Clamp the speed change before putting the new speed in vel2
const float newSpeed = glm::length(vel2[iSelf]);
if (newSpeed > maxSpeed)
{
vel2[iSelf] *= maxSpeed / newSpeed;
}
}
__global__ void kernShuffleParticleData(
int N, int* particleArrayIndices,
glm::vec3 *inArray, glm::vec3 *outArray)
{
const int iSelf = threadIdx.x + (blockIdx.x * blockDim.x);
if (iSelf >= N) {
return;
}
const int iOldSelf = particleArrayIndices[iSelf];
outArray[iSelf] = inArray[iOldSelf];
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt)
{
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// TODO-1.2 ping-pong the velocity buffers
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyKind::cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt)
{
// TODO-2.1
dim3 fullCellBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
dim3 fullBoidBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
kernComputeIndices << <fullBoidBlocksPerGrid, blockSize >> >
(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullCellBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered << <fullBoidBlocksPerGrid, blockSize >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// - Update positions
kernUpdatePos << <fullBoidBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyKind::cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt)
{
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
dim3 fullCellBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
dim3 fullBoidBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
kernComputeIndices << <fullBoidBlocksPerGrid, blockSize >> >
(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullCellBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernShuffleParticleData << <fullCellBlocksPerGrid, blockSize >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_orderedPos);
cudaMemcpy(dev_pos, dev_orderedPos, numObjects * sizeof(glm::vec3), cudaMemcpyKind::cudaMemcpyDeviceToDevice);
kernShuffleParticleData << <fullCellBlocksPerGrid, blockSize >> > (numObjects, dev_particleArrayIndices, dev_vel1, dev_vel2);
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyKind::cudaMemcpyDeviceToDevice);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBoidBlocksPerGrid, blockSize >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchCoherent failed!");
// - Update positions
kernUpdatePos << <fullBoidBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyKind::cudaMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_orderedPos);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
21f1398150acc602cd08ac2d875a6c667624265c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "belief-propagation-kernels.hpp"
__constant__ struct joint_probability edge_joint_probability[1];
/**
* Sets up the current buffer
* @param message_buffer The message buffer to init
* @param node_states The states to write
* @param num_nodes The number of nodes in the graph
*/
__global__
void init_message_buffer_kernel(struct belief * __restrict__ message_buffer,
const struct belief * __restrict__ node_states, const size_t num_variables,
const size_t num_nodes){
size_t node_index, state_index;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_nodes; node_index += blockDim.x * gridDim.x){
for(state_index = blockIdx.y*blockDim.y + threadIdx.y; state_index < num_variables; state_index += blockDim.y * gridDim.y){
message_buffer[node_index].data[state_index] = node_states[node_index].data[state_index];
}
}
}
/**
* Combines the incoming messages with the given belief
* @param dest The belief to update
* @param edge_messages The buffered messages on the edge
* @param num_vertices The number of nodes in the graph
* @param node_index The index of the destination node
* @param edge_offset The index offset for the edge
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for adjusting shared memory
* @param warp_size The warp size of the GPU
*/
__device__
void combine_message_cuda(struct belief * __restrict__ dest, const struct belief * __restrict__ edge_messages, const size_t num_vertices, size_t node_index,
const size_t edge_offset, const size_t num_edges, char n_is_pow_2, size_t warp_size){
__shared__ float shared_dest[BLOCK_SIZE_3_D_Z];
__shared__ float shared_src[BLOCK_SIZE_3_D_Z];
size_t index = threadIdx.z;
if(index < num_vertices && edge_offset < num_edges){
shared_dest[index] = dest[node_index].data[index];
shared_src[index] = edge_messages[edge_offset].data[index];
float value = shared_dest[index] * shared_src[index];
if(isnan(value) || isinf(value)) {
dest[node_index].data[index] = 0.0f;
}
else {
dest[node_index].data[index] = value;
}
}
}
/**
* Combines the incoming messages with the given PageRank
* @param dest The belief to update
* @param edge_messages The buffered messages on the edge
* @param num_vertices The number of nodes in the graph
* @param node_index The index of the destination node
* @param edge_offset The index offset for the edge
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for adjusting shared memory
* @param warp_size The warp size of the GPU
*/
__device__
void combine_page_rank_message_cuda(struct belief * __restrict__ dest, const struct belief * __restrict__ edge_messages, const size_t num_vertices, const size_t node_index,
const size_t edge_offset, const size_t num_edges, char n_is_pow_2, size_t warp_size){
__shared__ float shared_dest[BLOCK_SIZE_3_D_Z];
__shared__ float shared_src[BLOCK_SIZE_3_D_Z];
size_t index = threadIdx.z;
if(index < num_vertices && edge_offset < num_edges){
shared_dest[index] = dest[node_index].data[index];
shared_src[index] = edge_messages[edge_offset].data[index];
float value = shared_dest[index] + shared_src[index];
if(isnan(value) || isinf(value)) {
dest[node_index].data[index] = 0.0f;
}
else {
dest[node_index].data[index] = value;
}
}
}
/**
* Computes the argmax of the incoming messages with the given belief
* @param dest The belief to update
* @param edge_messages The buffered messages on the edge
* @param num_vertices The number of nodes in the graph
* @param node_index The index of the destination node
* @param edge_offset The index offset for the edge
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for adjusting shared memory
* @param warp_size The warp size of the GPU
*/
__device__
void combine_viterbi_message_cuda(struct belief * __restrict__ dest, const struct belief * __restrict__ edge_messages, const size_t num_vertices, const size_t node_index,
const size_t edge_offset, const size_t num_edges, char n_is_pow_2, size_t warp_size){
__shared__ float shared_dest[BLOCK_SIZE_3_D_Z];
__shared__ float shared_src[BLOCK_SIZE_3_D_Z];
size_t index = threadIdx.z;
if(index < num_vertices && edge_offset < num_edges){
shared_dest[index] = dest[node_index].data[index];
shared_src[index] = edge_messages[edge_offset].data[index];
float value = fmaxf(shared_dest[index], shared_src[index]);
if(isinf(value) || isnan(value)) {
dest[node_index].data[index] = 0.0f;
}
else {
dest[node_index].data[index] = value;
}
}
}
/**
* Reads the incoming messages and buffers them on the edge
* @param message_buffer The message buffer of the edge
* @param previous_messages The previous messages sent
* @param dest_node_to_edges_nodes Parallel array; maps the nodes to the edges in which they are destination nodes; first half which maps nodes to their indices in dest_node_to_edges_edges
* @param dest_node_to_edges_edges Parallel array; maps the nodes to the edges in which they are destination nodes; second half which maps the indices to edges
* @param current_num_edges The number of edges in the graph
* @param num_vertices The number of vertices in the graph
* @param n_is_pow_2 Flag for determining how to adjust shared memory
* @param warp_size The warp size of the GPU
*/
__global__
void read_incoming_messages_kernel(struct belief * __restrict__ message_buffer,
const size_t num_variables,
const struct belief * __restrict__ previous_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t current_num_edges,
const size_t num_vertices,
char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index, start_index, end_index, diff_index, tmp_index;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
start_index = dest_node_to_edges_nodes[node_index];
if (node_index + 1 >= num_vertices) {
end_index = current_num_edges;
} else {
end_index = dest_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if (edge_index < diff_index) {
tmp_index = dest_node_to_edges_edges[edge_index + start_index];
combine_message_cuda(message_buffer, previous_messages, num_variables, node_index,
tmp_index, current_num_edges, n_is_pow_2, warp_size);
}
}
}
/**
* Combines the belief with the joint probability table and writes the result to the buffer
* @param message_buffer The array of incoming beliefs
* @param edge_index The index of the edge in which the belief is sent
* @param node_index The incoming belief
* @param joint_probabilities The joint probabilities of the edges
* @param edge_messages The outbound buffer
*/
__device__
void send_message_for_edge_cuda(const struct belief * __restrict__ message_buffer,
const size_t edge_index, const size_t node_index,
const size_t num_src, const size_t num_dest,
struct belief * __restrict__ edge_messages,
float * __restrict__ edge_messages_previous,
float * __restrict__ edges_messages_current){
size_t i, j;
__shared__ float partial_sums[BLOCK_SIZE];
__shared__ float sums[BLOCK_SIZE];
__shared__ float s_belief[BLOCK_SIZE];
sums[threadIdx.x] = 0.0;
for(i = 0; i < num_src; ++i){
partial_sums[threadIdx.x] = 0.0;
for(j = 0; j < num_dest; ++j){
s_belief[threadIdx.x] = message_buffer[node_index].data[j];
partial_sums[threadIdx.x] += edge_joint_probability->data[i][j] * s_belief[threadIdx.x];
}
edge_messages[edge_index].data[i] = partial_sums[threadIdx.x];
sums[threadIdx.x] += partial_sums[threadIdx.x];
}
if(sums[threadIdx.x] <= 0.0){
sums[threadIdx.x] = 1.0;
}
edge_messages_previous[edge_index] = edges_messages_current[edge_index];
edges_messages_current[edge_index] = sums[threadIdx.x];
for(i = 0; i < num_src; ++i){
edge_messages[edge_index].data[i] /= sums[threadIdx.x];
}
}
/**
* Sends the messages for all nodes in the graph
* @param message_buffer The incoming beliefs
* @param current_num_edges The number of edges in the graph
* @param joint_probabilities The joint probability table of the graph
* @param current_edge_messages The destination noe
* @param src_node_to_edges_nodes Parallel array; maps nodes to the edges in which they are source nodes; first half; maps nodes to their index in src_node_to_edges_edges
* @param src_node_to_edges_edges Parallel array; maps nodes to the edges in which they are source nodes; second half; maps the indices to the edges
* @param num_vertices The number of vertices (nodes) in the graph
*/
__global__
void send_message_for_node_kernel(const struct belief * __restrict__ message_buffer, const size_t current_num_edges,
const size_t joint_probabilities_dim_x,
const size_t joint_probabilities_dim_y,
struct belief * __restrict__ current_edge_messages,
float * __restrict__ current_edge_message_previous,
float * __restrict__ current_edge_message_current,
const size_t * __restrict__ src_node_to_edges_nodes,
const size_t * __restrict__ src_node_to_edges_edges,
const size_t num_vertices){
size_t node_index, edge_index, start_index, end_index, diff_index;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x){
start_index = src_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = current_num_edges;
}
else{
end_index = src_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if (edge_index < diff_index) {
edge_index = src_node_to_edges_edges[edge_index + start_index];
send_message_for_edge_cuda(message_buffer, edge_index, node_index,
joint_probabilities_dim_x, joint_probabilities_dim_y, current_edge_messages,
current_edge_message_previous, current_edge_message_current);
}
}
}
/**
* Marginalizes and normalizes the beliefs in the graph
* @param message_buffer The source beliefs
* @param node_states The current states of the nodes
* @param current_edges_messages The current buffered messages on the graph
* @param dest_node_to_edges_nodes Parallel array; maps nodes to the edges in which they are the destination nodes; first half; maps nodes to their indices in dest_node_to_edges_edges
* @param dest_node_to_edges_edges Parallel array; maps nodes to the edges in which they are the destination nodes; second half; maps the indices to the edges
* @param num_vertices The number of vertices (nodes) in the graph
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for determining if padding needed for shared memory
* @param warp_size The size of the warp of the GPU
*/
__global__
void marginalize_node_combine_kernel(struct belief * __restrict__ message_buffer,
const struct belief * __restrict__ node_states, const size_t num_variables,
const struct belief * __restrict__ current_edges_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t num_vertices,
const size_t num_edges, char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index, temp_edge_index, start_index, end_index, diff_index;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
if(edge_index < num_variables){
message_buffer[node_index].data[edge_index] = 1.0;
}
start_index = dest_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = num_edges;
}
else{
end_index = dest_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if(edge_index < diff_index){
temp_edge_index = dest_node_to_edges_edges[edge_index + start_index];
combine_message_cuda(message_buffer, current_edges_messages, num_variables, node_index, temp_edge_index, num_edges, n_is_pow_2, warp_size);
}
}
}
/**
* Marginalizes and normalizes the beliefs in the graph
* @param message_buffer The source beliefs
* @param node_states The current states of the nodes
* @param current_edges_messages The current buffered messages on the graph
* @param dest_node_to_edges_nodes Parallel array; maps nodes to the edges in which they are the destination nodes; first half; maps nodes to their indices in dest_node_to_edges_edges
* @param dest_node_to_edges_edges Parallel array; maps nodes to the edges in which they are the destination nodes; second half; maps the indices to the edges
* @param num_vertices The number of vertices (nodes) in the graph
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for determining if padding needed for shared memory
* @param warp_size The size of the warp of the GPU
*/
__global__
void marginalize_page_rank_node_combine_kernel(struct belief * __restrict__ message_buffer,
const struct belief * __restrict__ node_states, const size_t num_variables,
const struct belief * __restrict__ current_edges_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t num_vertices,
const size_t num_edges, char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index, temp_edge_index, start_index, end_index, diff_index;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
if(edge_index < num_variables){
message_buffer[node_index].data[edge_index] = 0.0;
}
start_index = dest_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = num_edges;
}
else{
end_index = dest_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if(edge_index < diff_index){
temp_edge_index = dest_node_to_edges_edges[edge_index + start_index];
combine_page_rank_message_cuda(message_buffer, current_edges_messages, num_variables, node_index, temp_edge_index, num_edges, n_is_pow_2, warp_size);
}
}
}
/**
* Computes the argmax of the beliefs in the graph
* @param message_buffer The source beliefs
* @param node_states The current states of the nodes
* @param current_edges_messages The current buffered messages on the graph
* @param dest_node_to_edges_nodes Parallel array; maps nodes to the edges in which they are the destination nodes; first half; maps nodes to their indices in dest_node_to_edges_edges
* @param dest_node_to_edges_edges Parallel array; maps nodes to the edges in which they are the destination nodes; second half; maps the indices to the edges
* @param num_vertices The number of vertices (nodes) in the graph
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for determining if padding needed for shared memory
* @param warp_size The size of the warp of the GPU
*/
__global__
void argmax_node_combine_kernel(struct belief * __restrict__ message_buffer,
const struct belief * __restrict__ node_states, const size_t num_variables,
const struct belief * __restrict__ current_edges_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t num_vertices,
const size_t num_edges, char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index, temp_edge_index, start_index, end_index, diff_index;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
if(edge_index < num_variables){
message_buffer[node_index].data[edge_index] = 1.0;
}
start_index = dest_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = num_edges;
}
else{
end_index = dest_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if(edge_index < diff_index){
temp_edge_index = dest_node_to_edges_edges[edge_index + start_index];
combine_viterbi_message_cuda(message_buffer, current_edges_messages, num_variables, node_index, temp_edge_index, num_edges, n_is_pow_2, warp_size);
}
}
}
/**
* Marginalizes and normalizes nodes in the graph
* @param message_buffer The incoming beliefs
* @param node_states The destination belief to update
* @param current_edges_messages The buffered beliefs on the edges
* @param dest_node_to_edges_nodes Parallel array; maps nodes to the edges in which they are the destination node; first half; maps nodes to their indices in dest_node_to_edges_edges
* @param dest_node_to_edges_edges Parallel array; maps nodes to the edges in which they are the destination node; second half; maps the indices to the edges
* @param num_vertices The number of vertices (nodes) in the graph
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for determining if padding is needed for shared memory
* @param warp_size The size of the warp of the GPU
*/
__global__
void marginalize_sum_node_kernel(const struct belief * __restrict__ message_buffer,
struct belief * __restrict__ node_states, const size_t num_variables,
const struct belief * __restrict__ current_edges_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t num_vertices,
const size_t num_edges, char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index;
__shared__ float sum[BLOCK_SIZE_2_D_X];
__shared__ float shared_message_buffer[BLOCK_SIZE_2_D_X][BLOCK_SIZE_2_D_Y];
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
if(edge_index < num_variables) {
if (edge_index == 0) {
sum[threadIdx.x] = 0.0;
}
shared_message_buffer[threadIdx.x][threadIdx.y] *= message_buffer[node_index].data[edge_index];
__syncthreads();
atomicAdd(&sum[threadIdx.x], shared_message_buffer[threadIdx.x][threadIdx.y]);
__syncthreads();
if (threadIdx.y == 0 && sum[threadIdx.x] <= 0.0) {
sum[threadIdx.x] = 1.0;
}
__syncthreads();
node_states[node_index].data[edge_index] = shared_message_buffer[threadIdx.x][threadIdx.y] / sum[threadIdx.x];
}
}
}
__global__
void marginalize_dampening_factor_kernel(const struct belief * __restrict__ message_buffer,
struct belief * __restrict__ node_states, const size_t num_variables,
const struct belief * __restrict__ current_edges_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t num_vertices,
const size_t num_edges, char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index, end_index, start_index;
__shared__ float factor[BLOCK_SIZE_2_D_X];
__shared__ float shared_message_buffer[BLOCK_SIZE_2_D_X][BLOCK_SIZE_2_D_Y];
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
start_index = dest_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = num_edges;
}
else{
end_index = dest_node_to_edges_nodes[node_index + 1];
}
if(edge_index < num_variables) {
if (edge_index == 0) {
factor[threadIdx.x] = (1 - DAMPENING_FACTOR) / (end_index - start_index);
}
__syncthreads();
shared_message_buffer[threadIdx.x][threadIdx.y] = factor[threadIdx.x] + DAMPENING_FACTOR * message_buffer[node_index].data[edge_index];
__syncthreads();
node_states[node_index].data[edge_index] = shared_message_buffer[threadIdx.x][threadIdx.y];
}
}
}
__global__
void marginalize_viterbi_beliefs(struct belief * __restrict__ nodes, const size_t nodes_size, const size_t num_nodes){
size_t idx, i;
float sum;
for(idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_nodes; idx += blockDim.x * gridDim.x){
sum = 0.0;
for(i = 0; i < nodes_size; ++i){
sum += nodes[idx].data[i];
}
for(i = 0; i < nodes_size; ++i){
nodes[idx].data[i] = nodes[idx].data[i] / sum;
}
}
}
/**
* Marginalizes and normalizes nodes in the graph
* @param message_buffer The incoming beliefs
* @param node_states The destination belief to update
* @param current_edges_messages The buffered beliefs on the edges
* @param dest_node_to_edges_nodes Parallel array; maps nodes to the edges in which they are the destination node; first half; maps nodes to their indices in dest_node_to_edges_edges
* @param dest_node_to_edges_edges Parallel array; maps nodes to the edges in which they are the destination node; second half; maps the indices to the edges
* @param num_vertices The number of vertices (nodes) in the graph
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for determining if padding is needed for shared memory
* @param warp_size The size of the warp of the GPU
*/
__global__
void argmax_kernel(const struct belief * __restrict__ message_buffer,
struct belief * __restrict__ node_states,
const size_t num_variables,
const struct belief * __restrict__ current_edges_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t num_vertices,
const size_t num_edges, char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index;
__shared__ float shared_message_buffer[BLOCK_SIZE_2_D_X][BLOCK_SIZE_2_D_Y];
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
if(edge_index < num_variables) {
if (edge_index == 0) {
shared_message_buffer[threadIdx.x][threadIdx.y] = -1.0f;
}
__syncthreads();
shared_message_buffer[threadIdx.x][threadIdx.y] = fmaxf(shared_message_buffer[threadIdx.x][threadIdx.y], message_buffer[node_index].data[edge_index]);
__syncthreads();
node_states[node_index].data[edge_index] = shared_message_buffer[threadIdx.x][threadIdx.y];
}
}
}
/**
* Calculates the delta for a given message
* @param i The message's index
* @param current_messages The current messages
* @return The delta between the messages
*/
__device__
float calculate_local_delta(size_t i, const float * __restrict__ current_messages_previous, const float * __restrict__ current_messages_current){
float delta, diff;
diff = current_messages_previous[i] - current_messages_current[i];
if(diff != diff){
diff = 0.0;
}
delta = (float)fabs(diff);
return delta;
}
/**
* Calculates the delta used for testing for convergence via reduction
* @param current_messages The current beliefs of the graph
* @param delta The delta to write back
* @param delta_array Temp array used to store partial deltas for reduction
* @param num_edges The number of edges in the graph
*/
__global__
void calculate_delta(const float * __restrict__ current_messages_previous, const float * __restrict__ current_messages_current,
float * __restrict__ delta, float * __restrict__ delta_array, const size_t num_edges){
extern __shared__ float shared_delta[];
size_t tid, idx, i, s;
tid = threadIdx.x;
i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
for(idx = blockIdx.x*blockDim.x + threadIdx.x; idx < num_edges; idx += blockDim.x * gridDim.x){
delta_array[idx] = calculate_local_delta(idx, current_messages_previous, current_messages_current);
}
__syncthreads();
float my_delta = (i < num_edges) ? delta_array[i] : 0;
if(i + BLOCK_SIZE < num_edges){
my_delta += delta_array[i + BLOCK_SIZE];
}
shared_delta[tid] = my_delta;
__syncthreads();
// do reduction in shared mememory
for(s= blockDim.x / 2; s > 32; s>>=1){
if(tid < s){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + s];
}
__syncthreads();
}
#if (__CUDA_ARCH__ >= 300)
if(tid < 32){
//fetch final intermediate sum from second warp
if(BLOCK_SIZE >= 64){
my_delta += shared_delta[tid + 32];
}
for(s = WARP_SIZE/2; s > 0; s /= 2){
my_delta += __shfl_down(my_delta, s);
}
}
#else
if((BLOCK_SIZE >= 64) && (tid < 32)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 32];
}
__syncthreads();
if((BLOCK_SIZE >= 32) && (tid < 16)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 16];
}
__syncthreads();
if((BLOCK_SIZE >= 16) && (tid < 8)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 8];
}
__syncthreads();
if((BLOCK_SIZE >= 8) && (tid < 4)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 4];
}
__syncthreads();
if((BLOCK_SIZE >= 4) && (tid < 2)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 2];
}
__syncthreads();
if((BLOCK_SIZE >= 2) && (tid < 1)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 1];
}
__syncthreads();
#endif
if(tid == 0) {
*delta = my_delta;
}
}
/**
* Calculates the delta used for testing for convergence via reduction
* @details Copied from the sample in the NVIDIA CUDA SDK
* @param current_messages The current beliefs of the graph
* @param delta The delta to write back
* @param delta_array Temp array used to store partial deltas for reduction
* @param num_edges The number of edges in the graph
*/
__global__
void calculate_delta_6(const float * __restrict__ current_messages_previous, const float * __restrict__ current_messages_current,
float * __restrict__ delta, float * __restrict__ delta_array,
const size_t num_edges, char n_is_pow_2, const size_t warp_size) {
extern __shared__ float shared_delta[];
size_t offset;
// perform first level of reduce
// reading from global memory, writing to shared memory
size_t idx;
size_t tid = threadIdx.x;
size_t i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
size_t grid_size = blockDim.x * 2 * gridDim.x;
for(idx = blockIdx.x*blockDim.x + threadIdx.x; idx < num_edges; idx += blockDim.x * gridDim.x){
delta_array[idx] = calculate_local_delta(idx, current_messages_previous, current_messages_current);
}
__syncthreads();
float my_delta = 0.0;
while (i < num_edges) {
my_delta = delta_array[i];
// ensure we don't read out of bounds
if (n_is_pow_2 || i + blockDim.x < num_edges) {
my_delta += delta_array[i];
}
i += grid_size;
}
//each thread puts its local sum into shared memory
shared_delta[tid] = my_delta;
__syncthreads();
// do reduction in shared mem
if ((blockDim.x >= 512) && (tid < 256)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 256];
}
__syncthreads();
if ((blockDim.x >= 256) && (tid < 128)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 128];
}
__syncthreads();
if ((blockDim.x >= 128) && (tid < 64)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if( tid < 32){
// fetch final intermediate sum from 2nd warp
if(blockDim.x >= 64){
my_delta += shared_delta[tid + 32];
}
for(offset = warp_size/2; offset > 0; offset /= 2 ){
my_delta += __shfl_down(my_delta, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockDim.x >= 64) && (tid < 32)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 32];
}
__syncthreads();
if ((blockDim.x >= 32) && (tid < 16)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 16];
}
__syncthreads();
if((blockDim.x >= 16) && (tid < 8)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 8];
}
__syncthreads();
if((blockDim.x >= 8) && (tid < 4)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 4];
}
__syncthreads();
if((blockDim.x >= 4) && (tid < 2)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 2];
}
__syncthreads();
if((blockDim.x >= 2) && (tid < 1)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 1];
}
__syncthreads();
#endif
//write result for this block to global mem
if(tid == 0){
*delta = my_delta;
}
}
/**
* Calculates the delta used for testing for convergence via reduction
* @details Simple kernel used for testing
* @param previous_messages The previous beliefs of the graph
* @param current_messages The current beliefs of the graph
* @param delta The delta to write back
* @param delta_array Temp array used to store partial deltas for reduction
* @param num_edges The number of edges in the graph
*/
__global__
void calculate_delta_simple(const float * __restrict__ current_messages_previous,
const float * __restrict__ current_messages_current,
float * __restrict__ delta, float * __restrict__ delta_array,
const size_t num_edges) {
extern __shared__ float shared_delta[];
size_t tid, idx, i, s;
tid = threadIdx.x;
for(idx = blockIdx.x*blockDim.x + threadIdx.x; idx < num_edges; idx += blockDim.x * gridDim.x){
delta_array[idx] = calculate_local_delta(idx, current_messages_previous, current_messages_current);
}
__syncthreads();
idx = blockIdx.x * blockDim.x + threadIdx.x;
shared_delta[tid] = (idx < num_edges) ? delta_array[idx] : 0;
__syncthreads();
// do reduction in shared mem
for(s = 1; s < blockDim.x; s *= 2){
i = 2 * s * tid;
if( i < blockDim.x ) {
shared_delta[i] += shared_delta[i + s];
}
__syncthreads();
}
//write result for this block to global mem
if(tid == 0){
*delta = shared_delta[0];
}
}
/**
* Helper function to test for errors for kernel calls
*/
void check_cuda_kernel_return_code(){
hipError_t err;
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Error: %s\n", hipGetErrorString(err));
exit(-1);
}
}
/**
* Runs loopy BP on the graph
* @param graph The graph to use
* @param convergence The convergence threshold; if the delta falls below it, execution will stop
* @param max_iterations The maximum number of iterations to run for
* @return The actual number of iterations executed
*/
int loopy_propagate_until_cuda_kernels(Graph_t graph, const float convergence, const int max_iterations){
size_t i, j;
int num_iter;
float * delta;
float * delta_array;
float previous_delta, host_delta;
char is_pow_2;
struct belief * message_buffer;
struct belief * current_messages;
float * current_messages_previous;
float * current_messages_current;
size_t * src_nodes_to_edges_nodes;
size_t * src_nodes_to_edges_edges;
size_t * dest_nodes_to_edges_nodes;
size_t * dest_nodes_to_edges_edges;
struct belief * node_states;
host_delta = 0.0;
const size_t num_vertices = graph->current_num_vertices;
const size_t num_edges = graph->current_num_edges;
const size_t edge_joint_probability_dim_x = graph->edge_joint_probability_dim_x;
const size_t edge_joint_probability_dim_y = graph->edge_joint_probability_dim_y;
const size_t node_states_size = graph->node_states_size;
/*printf("Before=====");
print_edges(graph);
print_nodes(graph);*/
is_pow_2 = num_vertices % 2 == 0;
// allocate data
CUDA_CHECK_RETURN(hipMalloc((void **)&dest_nodes_to_edges_nodes, sizeof(size_t) * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&dest_nodes_to_edges_edges, sizeof(size_t) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&src_nodes_to_edges_nodes, sizeof(size_t) * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&src_nodes_to_edges_edges, sizeof(size_t) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)¤t_messages, sizeof(struct belief) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)¤t_messages_current, sizeof(float) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)¤t_messages_previous, sizeof(float) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&node_states, sizeof(struct belief) * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&delta, sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void **)&delta_array, sizeof(float) * num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&message_buffer, sizeof(struct belief) * num_vertices));
// copy data
CUDA_CHECK_RETURN(hipMemcpyToSymbol(edge_joint_probability, &(graph->edge_joint_probability), sizeof(struct joint_probability)));
CUDA_CHECK_RETURN(hipMemcpy(current_messages, graph->edges_messages, sizeof(struct belief) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(current_messages_previous, graph->edges_messages_previous, sizeof(float) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(current_messages_current, graph->edges_messages_current, sizeof(float) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(node_states, graph->node_states, sizeof(struct belief) * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(dest_nodes_to_edges_nodes, graph->dest_nodes_to_edges_node_list, sizeof(size_t) * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(dest_nodes_to_edges_edges, graph->dest_nodes_to_edges_edge_list, sizeof(size_t) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(src_nodes_to_edges_nodes, graph->src_nodes_to_edges_node_list, sizeof(size_t) * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(src_nodes_to_edges_edges, graph->src_nodes_to_edges_edge_list, sizeof(size_t) * graph->current_num_edges, hipMemcpyHostToDevice));
const int blockEdge1dCount = (num_edges + BLOCK_SIZE - 1)/ BLOCK_SIZE;
const int blockNodeCount = (num_vertices + BLOCK_SIZE_2_D_X - 1)/BLOCK_SIZE_2_D_X;
const int blockStateCount = (MAX_STATES + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockDegreeCount = (graph->max_degree + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockMessageNodeCount = (num_vertices + BLOCK_SIZE_3_D_X - 1)/BLOCK_SIZE_3_D_X;
const int blockMessageDegreeCount = ( graph->max_degree + BLOCK_SIZE_3_D_Y - 1)/BLOCK_SIZE_3_D_Y;
const int blockMessageStateCount = ( MAX_STATES + BLOCK_SIZE_3_D_Z - 1)/BLOCK_SIZE_3_D_Z;
num_iter = 0;
dim3 dimReduceBlock(BLOCK_SIZE, 1, 1);
dim3 dimReduceGrid(blockEdge1dCount, 1, 1);
int reduceSmemSize = (BLOCK_SIZE <= 32) ? 2 * BLOCK_SIZE * sizeof(float) : BLOCK_SIZE * sizeof(float);
dim3 dimInitMessageBuffer(BLOCK_SIZE_2_D_X, BLOCK_SIZE_2_D_Y, 1);
dim3 dimInitGrid(blockNodeCount, blockStateCount, 1);
dim3 dimDegreeGrid(blockNodeCount, blockDegreeCount, 1);
// int reduce2DSmemSize = (BLOCK_SIZE_2_D_Y <= 32) ? 2 * BLOCK_SIZE_2_D_Y * sizeof(float) : BLOCK_SIZE_2_D_Y * sizeof(float);
dim3 dimMessagesBuffer(BLOCK_SIZE_3_D_X, BLOCK_SIZE_3_D_Y, BLOCK_SIZE_3_D_Z);
dim3 dimMessagesGrid(blockMessageNodeCount, blockMessageDegreeCount, blockMessageStateCount);
for(i = BATCH_SIZE; i <= max_iterations; i+= BATCH_SIZE){
for(j = 0; j < BATCH_SIZE; ++j) {
hipLaunchKernelGGL(( init_message_buffer_kernel), dim3(dimInitGrid), dim3(dimInitMessageBuffer), 0, 0, message_buffer, node_states, node_states_size, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( read_incoming_messages_kernel) , dim3(dimMessagesGrid), dim3(dimMessagesBuffer), 0, 0, message_buffer, node_states_size, current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_edges, num_vertices, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( send_message_for_node_kernel), dim3(dimInitGrid), dim3(dimInitMessageBuffer), 0, 0, message_buffer, num_edges,
edge_joint_probability_dim_x, edge_joint_probability_dim_y,
current_messages, current_messages_previous, current_messages_current,
src_nodes_to_edges_nodes, src_nodes_to_edges_edges, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( marginalize_node_combine_kernel), dim3(dimMessagesGrid), dim3(dimMessagesBuffer), 0, 0, message_buffer,
node_states, node_states_size,
current_messages,
dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
hipLaunchKernelGGL(( marginalize_sum_node_kernel), dim3(dimInitGrid), dim3(dimInitMessageBuffer), 0, 0, message_buffer,
node_states, node_states_size,
current_messages,
dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
num_iter++;
}
hipLaunchKernelGGL(( calculate_delta_6), dim3(dimReduceGrid), dim3(dimReduceBlock), reduceSmemSize, 0, current_messages_previous, current_messages_current, delta, delta_array, num_edges, is_pow_2, WARP_SIZE);
//calculate_delta<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages, delta, delta_array, num_edges);
//calculate_delta_simple<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages, delta, delta_array, num_edges);
check_cuda_kernel_return_code();
CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
// printf("Current delta: %f\n", host_delta);
if(host_delta < convergence || fabs(host_delta - previous_delta) < convergence){
break;
}
if(i < max_iterations - BATCH_SIZE) {
previous_delta = host_delta;
}
if(i >= max_iterations){
printf("No Convergence: previous: %f vs current: %f\n", previous_delta, host_delta);
}
}
// copy data back
CUDA_CHECK_RETURN(hipMemcpy(graph->node_states, node_states, sizeof(struct belief) * num_vertices, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipMemcpy(graph->edges_messages, current_messages, sizeof(struct belief) * num_edges, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipFree(dest_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(hipFree(dest_nodes_to_edges_edges));
CUDA_CHECK_RETURN(hipFree(src_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(hipFree(src_nodes_to_edges_edges));
CUDA_CHECK_RETURN(hipFree(current_messages));
CUDA_CHECK_RETURN(hipFree(current_messages_previous));
CUDA_CHECK_RETURN(hipFree(current_messages_current));
CUDA_CHECK_RETURN(hipFree(message_buffer));
CUDA_CHECK_RETURN(hipFree(node_states));
CUDA_CHECK_RETURN(hipFree(delta));
CUDA_CHECK_RETURN(hipFree(delta_array));
/*printf("After=====");
print_nodes(graph);
print_edges(graph);*/
return num_iter;
}
/**
* Runs PageRank on the graph
* @param graph The graph to use
* @param convergence The convergence threshold; if the delta falls below it, execution will stop
* @param max_iterations The maximum number of iterations to run for
* @return The actual number of iterations executed
*/
int page_rank_until_cuda_kernels(Graph_t graph, const float convergence, const int max_iterations){
int num_iter;
size_t i, j;
float * delta;
float * delta_array;
float previous_delta, host_delta;
char is_pow_2;
struct belief * message_buffer;
struct belief * current_messages;
float * current_messages_previous;
float * current_messages_current;
size_t * src_nodes_to_edges_nodes;
size_t * src_nodes_to_edges_edges;
size_t * dest_nodes_to_edges_nodes;
size_t * dest_nodes_to_edges_edges;
struct belief * node_states;
host_delta = 0.0;
previous_delta = INFINITY;
const size_t num_vertices = graph->current_num_vertices;
const size_t num_edges = graph->current_num_edges;
const size_t edge_joint_probability_dim_x = graph->edge_joint_probability_dim_x;
const size_t edge_joint_probability_dim_y = graph->edge_joint_probability_dim_y;
const size_t node_states_size = graph->node_states_size;
/*printf("Before=====");
print_edges(graph);
print_nodes(graph);*/
is_pow_2 = num_vertices % 2 == 0;
// allocate data
CUDA_CHECK_RETURN(hipMalloc((void **)&dest_nodes_to_edges_nodes, sizeof(size_t) * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&dest_nodes_to_edges_edges, sizeof(size_t) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&src_nodes_to_edges_nodes, sizeof(size_t) * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&src_nodes_to_edges_edges, sizeof(size_t) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)¤t_messages, sizeof(struct belief) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)¤t_messages_previous, sizeof(float) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)¤t_messages_current, sizeof(float) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&node_states, sizeof(struct belief) * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&delta, sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void **)&delta_array, sizeof(float) * num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&message_buffer, sizeof(struct belief) * num_vertices));
// copy data
CUDA_CHECK_RETURN(hipMemcpyToSymbol(edge_joint_probability, &(graph->edge_joint_probability), sizeof(struct joint_probability)));
CUDA_CHECK_RETURN(hipMemcpy(current_messages, graph->edges_messages, sizeof(struct belief) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(current_messages, graph->edges_messages_previous, sizeof(float) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(current_messages, graph->edges_messages_current, sizeof(float) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(node_states, graph->node_states, sizeof(struct belief) * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(dest_nodes_to_edges_nodes, graph->dest_nodes_to_edges_node_list, sizeof(size_t) * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(dest_nodes_to_edges_edges, graph->dest_nodes_to_edges_edge_list, sizeof(size_t) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(src_nodes_to_edges_nodes, graph->src_nodes_to_edges_node_list, sizeof(size_t) * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(src_nodes_to_edges_edges, graph->src_nodes_to_edges_edge_list, sizeof(size_t) * graph->current_num_edges, hipMemcpyHostToDevice));
const int blockEdge1dCount = (num_edges + BLOCK_SIZE - 1)/ BLOCK_SIZE;
const int blockNodeCount = (num_vertices + BLOCK_SIZE_2_D_X - 1)/BLOCK_SIZE_2_D_X;
const int blockStateCount = (MAX_STATES + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockDegreeCount = (graph->max_degree + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockMessageNodeCount = (num_vertices + BLOCK_SIZE_3_D_X - 1)/BLOCK_SIZE_3_D_X;
const int blockMessageDegreeCount = ( graph->max_degree + BLOCK_SIZE_3_D_Y - 1)/BLOCK_SIZE_3_D_Y;
const int blockMessageStateCount = ( MAX_STATES + BLOCK_SIZE_3_D_Z - 1)/BLOCK_SIZE_3_D_Z;
num_iter = 0;
dim3 dimReduceBlock(BLOCK_SIZE, 1, 1);
dim3 dimReduceGrid(blockEdge1dCount, 1, 1);
int reduceSmemSize = (BLOCK_SIZE <= 32) ? 2 * BLOCK_SIZE * sizeof(float) : BLOCK_SIZE * sizeof(float);
dim3 dimInitMessageBuffer(BLOCK_SIZE_2_D_X, BLOCK_SIZE_2_D_Y, 1);
dim3 dimInitGrid(blockNodeCount, blockStateCount, 1);
dim3 dimDegreeGrid(blockNodeCount, blockDegreeCount, 1);
// int reduce2DSmemSize = (BLOCK_SIZE_2_D_Y <= 32) ? 2 * BLOCK_SIZE_2_D_Y * sizeof(float) : BLOCK_SIZE_2_D_Y * sizeof(float);
dim3 dimMessagesBuffer(BLOCK_SIZE_3_D_X, BLOCK_SIZE_3_D_Y, BLOCK_SIZE_3_D_Z);
dim3 dimMessagesGrid(blockMessageNodeCount, blockMessageDegreeCount, blockMessageStateCount);
for(i = BATCH_SIZE; i <= max_iterations; i+= BATCH_SIZE){
for(j = 0; j < BATCH_SIZE; ++j) {
hipLaunchKernelGGL(( init_message_buffer_kernel), dim3(dimInitGrid), dim3(dimInitMessageBuffer), 0, 0, message_buffer, node_states, node_states_size, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( read_incoming_messages_kernel) , dim3(dimMessagesGrid), dim3(dimMessagesBuffer), 0, 0, message_buffer, node_states_size,
current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_edges, num_vertices, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( send_message_for_node_kernel), dim3(dimInitGrid), dim3(dimInitMessageBuffer), 0, 0, message_buffer, num_edges,
edge_joint_probability_dim_x, edge_joint_probability_dim_y,
current_messages, current_messages_previous, current_messages_current,
src_nodes_to_edges_nodes, src_nodes_to_edges_edges, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( marginalize_page_rank_node_combine_kernel), dim3(dimMessagesGrid), dim3(dimMessagesBuffer), 0, 0, message_buffer,
node_states, node_states_size,
current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
hipLaunchKernelGGL(( marginalize_dampening_factor_kernel), dim3(dimInitGrid), dim3(dimInitMessageBuffer), 0, 0, message_buffer, node_states, node_states_size, current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
num_iter++;
}
hipLaunchKernelGGL(( calculate_delta_6), dim3(dimReduceGrid), dim3(dimReduceBlock), reduceSmemSize, 0, current_messages_previous, current_messages_current, delta, delta_array, num_edges, is_pow_2, WARP_SIZE);
//calculate_delta<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages, delta, delta_array, num_edges);
//calculate_delta_simple<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages, delta, delta_array, num_edges);
check_cuda_kernel_return_code();
CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
// printf("Current delta: %f\n", host_delta);
if(host_delta < convergence || fabs(host_delta - previous_delta) < convergence){
break;
}
previous_delta = host_delta;
}
// copy data back
CUDA_CHECK_RETURN(hipMemcpy(graph->node_states, node_states, sizeof(struct belief) * num_vertices, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipMemcpy(graph->edges_messages, current_messages, sizeof(struct belief) * num_edges, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipFree(dest_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(hipFree(dest_nodes_to_edges_edges));
CUDA_CHECK_RETURN(hipFree(src_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(hipFree(src_nodes_to_edges_edges));
CUDA_CHECK_RETURN(hipFree(current_messages));
CUDA_CHECK_RETURN(hipFree(current_messages_previous));
CUDA_CHECK_RETURN(hipFree(current_messages_current));
CUDA_CHECK_RETURN(hipFree(message_buffer));
CUDA_CHECK_RETURN(hipFree(node_states));
CUDA_CHECK_RETURN(hipFree(delta));
CUDA_CHECK_RETURN(hipFree(delta_array));
/*printf("After=====");
print_nodes(graph);
print_edges(graph);*/
return num_iter;
}
/**
* Runs Viterbi on the graph
* @param graph The graph to use
* @param convergence The convergence threshold; if the delta falls below it, execution will stop
* @param max_iterations The maximum number of iterations to run for
* @return The actual number of iterations executed
*/
int viterbi_until_cuda_kernels(Graph_t graph, const float convergence, const int max_iterations){
int num_iter;
size_t i, j;
float * delta;
float * delta_array;
float previous_delta, host_delta;
char is_pow_2;
struct belief * message_buffer;
struct belief * current_messages;
float * current_messages_previous;
float * current_messages_current;
size_t * src_nodes_to_edges_nodes;
size_t * src_nodes_to_edges_edges;
size_t * dest_nodes_to_edges_nodes;
size_t * dest_nodes_to_edges_edges;
struct belief * node_states;
previous_delta = INFINITY;
host_delta = 0.0;
const size_t num_vertices = graph->current_num_vertices;
const size_t num_edges = graph->current_num_edges;
const size_t edge_joint_probability_dim_x = graph->edge_joint_probability_dim_x;
const size_t edge_joint_probability_dim_y = graph->edge_joint_probability_dim_y;
const size_t node_states_size = graph->node_states_size;
/*printf("Before=====");
print_edges(graph);
print_nodes(graph);*/
is_pow_2 = num_vertices % 2 == 0;
// allocate data
CUDA_CHECK_RETURN(hipMalloc((void **)&dest_nodes_to_edges_nodes, sizeof(size_t) * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&dest_nodes_to_edges_edges, sizeof(size_t) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&src_nodes_to_edges_nodes, sizeof(size_t) * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&src_nodes_to_edges_edges, sizeof(size_t) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)¤t_messages, sizeof(struct belief) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)¤t_messages_previous, sizeof(float) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)¤t_messages_current, sizeof(float) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&node_states, sizeof(struct belief) * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&delta, sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void **)&delta_array, sizeof(float) * num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&message_buffer, sizeof(struct belief) * num_vertices));
// copy data
CUDA_CHECK_RETURN(hipMemcpyToSymbol(edge_joint_probability, &(graph->edge_joint_probability), sizeof(struct joint_probability)));
CUDA_CHECK_RETURN(hipMemcpy(current_messages, graph->edges_messages, sizeof(struct belief) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(current_messages_previous, graph->edges_messages_previous, sizeof(float) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(current_messages_current, graph->edges_messages_current, sizeof(float) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(node_states, graph->node_states, sizeof(struct belief) * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(dest_nodes_to_edges_nodes, graph->dest_nodes_to_edges_node_list, sizeof(size_t) * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(dest_nodes_to_edges_edges, graph->dest_nodes_to_edges_edge_list, sizeof(size_t) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(src_nodes_to_edges_nodes, graph->src_nodes_to_edges_node_list, sizeof(size_t) * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(src_nodes_to_edges_edges, graph->src_nodes_to_edges_edge_list, sizeof(size_t) * graph->current_num_edges, hipMemcpyHostToDevice));
const int blockEdge1dCount = (num_edges + BLOCK_SIZE - 1)/ BLOCK_SIZE;
const int blockNodeCount = (num_vertices + BLOCK_SIZE_2_D_X - 1)/BLOCK_SIZE_2_D_X;
const int blockStateCount = (MAX_STATES + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockDegreeCount = (graph->max_degree + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockMessageNodeCount = (num_vertices + BLOCK_SIZE_3_D_X - 1)/BLOCK_SIZE_3_D_X;
const int blockMessageDegreeCount = ( graph->max_degree + BLOCK_SIZE_3_D_Y - 1)/BLOCK_SIZE_3_D_Y;
const int blockMessageStateCount = ( MAX_STATES + BLOCK_SIZE_3_D_Z - 1)/BLOCK_SIZE_3_D_Z;
num_iter = 0;
dim3 dimReduceBlock(BLOCK_SIZE, 1, 1);
dim3 dimReduceGrid(blockEdge1dCount, 1, 1);
int reduceSmemSize = (BLOCK_SIZE <= 32) ? 2 * BLOCK_SIZE * sizeof(float) : BLOCK_SIZE * sizeof(float);
dim3 dimInitMessageBuffer(BLOCK_SIZE_2_D_X, BLOCK_SIZE_2_D_Y, 1);
dim3 dimInitGrid(blockNodeCount, blockStateCount, 1);
dim3 dimDegreeGrid(blockNodeCount, blockDegreeCount, 1);
// int reduce2DSmemSize = (BLOCK_SIZE_2_D_Y <= 32) ? 2 * BLOCK_SIZE_2_D_Y * sizeof(float) : BLOCK_SIZE_2_D_Y * sizeof(float);
dim3 dimMessagesBuffer(BLOCK_SIZE_3_D_X, BLOCK_SIZE_3_D_Y, BLOCK_SIZE_3_D_Z);
dim3 dimMessagesGrid(blockMessageNodeCount, blockMessageDegreeCount, blockMessageStateCount);
for(i = BATCH_SIZE; i <= max_iterations; i+= BATCH_SIZE){
for(j = 0; j < BATCH_SIZE; ++j) {
hipLaunchKernelGGL(( init_message_buffer_kernel), dim3(dimInitGrid), dim3(dimInitMessageBuffer), 0, 0, message_buffer, node_states, node_states_size, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( read_incoming_messages_kernel) , dim3(dimMessagesGrid), dim3(dimMessagesBuffer), 0, 0, message_buffer, node_states_size,
current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_edges, num_vertices, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( send_message_for_node_kernel), dim3(dimInitGrid), dim3(dimInitMessageBuffer), 0, 0, message_buffer, num_edges,
edge_joint_probability_dim_x, edge_joint_probability_dim_y,
current_messages, current_messages_previous, current_messages_current,
src_nodes_to_edges_nodes, src_nodes_to_edges_edges, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( argmax_node_combine_kernel), dim3(dimMessagesGrid), dim3(dimMessagesBuffer), 0, 0, message_buffer, node_states, node_states_size, current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
hipLaunchKernelGGL(( argmax_kernel), dim3(dimInitGrid), dim3(dimInitMessageBuffer), 0, 0, message_buffer, node_states, node_states_size, current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
num_iter++;
}
hipLaunchKernelGGL(( calculate_delta_6), dim3(dimReduceGrid), dim3(dimReduceBlock), reduceSmemSize, 0, current_messages_previous, current_messages_current, delta, delta_array, num_edges, is_pow_2, WARP_SIZE);
//calculate_delta<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages, delta, delta_array, num_edges);
//calculate_delta_simple<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages, delta, delta_array, num_edges);
check_cuda_kernel_return_code();
CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
// printf("Current delta: %f\n", host_delta);
if(host_delta < convergence || fabs(host_delta - previous_delta) < convergence){
hipLaunchKernelGGL(( marginalize_viterbi_beliefs), dim3(num_vertices), dim3(BLOCK_SIZE), 0, 0, node_states, node_states_size, num_vertices);
break;
}
previous_delta = host_delta;
}
// copy data back
CUDA_CHECK_RETURN(hipMemcpy(graph->node_states, node_states, sizeof(struct belief) * num_vertices, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipMemcpy(graph->edges_messages, current_messages, sizeof(struct belief) * num_edges, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipFree(dest_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(hipFree(dest_nodes_to_edges_edges));
CUDA_CHECK_RETURN(hipFree(src_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(hipFree(src_nodes_to_edges_edges));
CUDA_CHECK_RETURN(hipFree(current_messages));
CUDA_CHECK_RETURN(hipFree(current_messages_previous));
CUDA_CHECK_RETURN(hipFree(current_messages_current));
CUDA_CHECK_RETURN(hipFree(message_buffer));
CUDA_CHECK_RETURN(hipFree(node_states));
CUDA_CHECK_RETURN(hipFree(delta));
CUDA_CHECK_RETURN(hipFree(delta_array));
/*printf("After=====");
print_nodes(graph);
print_edges(graph);*/
return num_iter;
}
/**
* Runs loopy BP on the file
* @param file_name The path of the file to read
*/
void test_loopy_belief_propagation_kernels(char * file_name){
struct expression * expression;
yyscan_t scanner;
YY_BUFFER_STATE state;
FILE * in;
Graph_t graph;
clock_t start, end;
double time_elapsed;
assert(yylex_init(&scanner) == 0);
in = fopen(file_name, "r");
yyset_in(in, scanner);
assert(yyparse(&expression, scanner) == 0);
//yy_delete_buffer(state, scanner);
yylex_destroy(scanner);
fclose(in);
assert(expression != NULL);
graph = build_graph(expression);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
init_previous_edge(graph);
start = clock();
loopy_propagate_until_cuda_kernels(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
printf("%s,loopy,%ld,%ld,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, time_elapsed);
delete_expression(expression);
graph_destroy(graph);
}
/**
* Runs loopy BP on the AST root node
* @param expression The BNF AST root node
* @param file_name The input file path
* @param out The file handle for the output CSV
*/
void run_test_loopy_belief_propagation_kernels(struct expression * expression, const char * file_name, FILE * out){
Graph_t graph;
clock_t start, end;
double time_elapsed;
int num_iterations;
graph = build_graph(expression);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
//calculate_diameter(graph);
init_previous_edge(graph);
start = clock();
num_iterations = loopy_propagate_until_cuda_kernels(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
fprintf(out, "%s,loopy,%ld,%ld,%d,%d,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, num_iterations, time_elapsed);
fflush(out);
graph_destroy(graph);
}
/**
* Reads the XML file and runs loopy BP on it
* @param file_name The input XML file path
* @param out The output CSV file handle
*/
void run_test_loopy_belief_propagation_xml_file_kernels(const char * file_name, FILE * out){
Graph_t graph;
clock_t start, end;
double time_elapsed;
int num_iterations;
graph = parse_xml_file(file_name);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
//calculate_diameter(graph);
init_previous_edge(graph);
start = clock();
num_iterations = loopy_propagate_until_cuda_kernels(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
fprintf(out, "%s,loopy,%ld,%ld,%d,%d,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, num_iterations, time_elapsed);
fflush(out);
graph_destroy(graph);
}
/**
* Reads the XML file and runs loopy BP on it
* @param edge_file_name The file to read for the SNAP edges
* @param node_file_name The file to read for the SNAP observed nodes
* @param out The CSV file to output to
*/
void run_test_loopy_belief_propagation_snap_file_kernels(const char * edge_file_name, const char * node_file_name,
FILE * out){
Graph_t graph;
clock_t start, end;
double time_elapsed;
int num_iterations;
graph = parse_graph_from_snap_files(edge_file_name, node_file_name);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
//calculate_diameter(graph);
init_previous_edge(graph);
start = clock();
num_iterations = loopy_propagate_until_cuda_kernels(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
fprintf(out, "%s-%s,loopy,%ld,%ld,%d,%d,%lf\n", edge_file_name, node_file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, num_iterations, time_elapsed);
fflush(out);
graph_destroy(graph);
}
void run_test_loopy_belief_propagation_mtx_files_kernels(const char * edges_mtx, const char * nodes_mtx,
const struct joint_probability * edge_probability,
int dim_x, int dim_y,
FILE * out){
Graph_t graph;
clock_t start, end, begin;
double time_elapsed, total_time_elapsed;
int num_iterations;
begin = clock();
graph = build_graph_from_mtx(edges_mtx, nodes_mtx, edge_probability, dim_x, dim_y);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges_no_hsearch(graph);
set_up_dest_nodes_to_edges_no_hsearch(graph);
//calculate_diameter(graph);
init_previous_edge(graph);
start = clock();
num_iterations = loopy_propagate_until_cuda_kernels(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
total_time_elapsed = (double)(end - begin)/CLOCKS_PER_SEC;
//print_nodes(graph);
fprintf(out, "%s-%s,loopy,%ld,%ld,%d,%d,%lf,%d,%lf,%d,%lf,%lf,%lf\n", edges_mtx, nodes_mtx, graph->current_num_vertices, graph->current_num_edges, graph->diameter, graph->max_in_degree, graph->avg_in_degree, graph->max_out_degree, graph->avg_out_degree, num_iterations, time_elapsed, time_elapsed/num_iterations, total_time_elapsed);
fflush(out);
graph_destroy(graph);
}
/**
* Function for printing errors with kernel execution
* @param file The source code file
* @param line The line within file
* @param statement The name of the kernel
* @param err The error message
*/
void CheckCudaErrorAux (const char *file, int line, const char *statement, hipError_t err)
{
if (err == hipSuccess) {
return;
}
printf("%s returned %s (%d) at %s:%d\n", statement, hipGetErrorString(err), err, file, line);
exit (1);
}
| 21f1398150acc602cd08ac2d875a6c667624265c.cu | #include "belief-propagation-kernels.hpp"
__constant__ struct joint_probability edge_joint_probability[1];
/**
* Sets up the current buffer
* @param message_buffer The message buffer to init
* @param node_states The states to write
* @param num_nodes The number of nodes in the graph
*/
__global__
void init_message_buffer_kernel(struct belief * __restrict__ message_buffer,
const struct belief * __restrict__ node_states, const size_t num_variables,
const size_t num_nodes){
size_t node_index, state_index;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_nodes; node_index += blockDim.x * gridDim.x){
for(state_index = blockIdx.y*blockDim.y + threadIdx.y; state_index < num_variables; state_index += blockDim.y * gridDim.y){
message_buffer[node_index].data[state_index] = node_states[node_index].data[state_index];
}
}
}
/**
* Combines the incoming messages with the given belief
* @param dest The belief to update
* @param edge_messages The buffered messages on the edge
* @param num_vertices The number of nodes in the graph
* @param node_index The index of the destination node
* @param edge_offset The index offset for the edge
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for adjusting shared memory
* @param warp_size The warp size of the GPU
*/
__device__
void combine_message_cuda(struct belief * __restrict__ dest, const struct belief * __restrict__ edge_messages, const size_t num_vertices, size_t node_index,
const size_t edge_offset, const size_t num_edges, char n_is_pow_2, size_t warp_size){
__shared__ float shared_dest[BLOCK_SIZE_3_D_Z];
__shared__ float shared_src[BLOCK_SIZE_3_D_Z];
size_t index = threadIdx.z;
if(index < num_vertices && edge_offset < num_edges){
shared_dest[index] = dest[node_index].data[index];
shared_src[index] = edge_messages[edge_offset].data[index];
float value = shared_dest[index] * shared_src[index];
if(isnan(value) || isinf(value)) {
dest[node_index].data[index] = 0.0f;
}
else {
dest[node_index].data[index] = value;
}
}
}
/**
* Combines the incoming messages with the given PageRank
* @param dest The belief to update
* @param edge_messages The buffered messages on the edge
* @param num_vertices The number of nodes in the graph
* @param node_index The index of the destination node
* @param edge_offset The index offset for the edge
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for adjusting shared memory
* @param warp_size The warp size of the GPU
*/
__device__
void combine_page_rank_message_cuda(struct belief * __restrict__ dest, const struct belief * __restrict__ edge_messages, const size_t num_vertices, const size_t node_index,
const size_t edge_offset, const size_t num_edges, char n_is_pow_2, size_t warp_size){
__shared__ float shared_dest[BLOCK_SIZE_3_D_Z];
__shared__ float shared_src[BLOCK_SIZE_3_D_Z];
size_t index = threadIdx.z;
if(index < num_vertices && edge_offset < num_edges){
shared_dest[index] = dest[node_index].data[index];
shared_src[index] = edge_messages[edge_offset].data[index];
float value = shared_dest[index] + shared_src[index];
if(isnan(value) || isinf(value)) {
dest[node_index].data[index] = 0.0f;
}
else {
dest[node_index].data[index] = value;
}
}
}
/**
* Computes the argmax of the incoming messages with the given belief
* @param dest The belief to update
* @param edge_messages The buffered messages on the edge
* @param num_vertices The number of nodes in the graph
* @param node_index The index of the destination node
* @param edge_offset The index offset for the edge
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for adjusting shared memory
* @param warp_size The warp size of the GPU
*/
__device__
void combine_viterbi_message_cuda(struct belief * __restrict__ dest, const struct belief * __restrict__ edge_messages, const size_t num_vertices, const size_t node_index,
const size_t edge_offset, const size_t num_edges, char n_is_pow_2, size_t warp_size){
__shared__ float shared_dest[BLOCK_SIZE_3_D_Z];
__shared__ float shared_src[BLOCK_SIZE_3_D_Z];
size_t index = threadIdx.z;
if(index < num_vertices && edge_offset < num_edges){
shared_dest[index] = dest[node_index].data[index];
shared_src[index] = edge_messages[edge_offset].data[index];
float value = fmaxf(shared_dest[index], shared_src[index]);
if(isinf(value) || isnan(value)) {
dest[node_index].data[index] = 0.0f;
}
else {
dest[node_index].data[index] = value;
}
}
}
/**
* Reads the incoming messages and buffers them on the edge
* @param message_buffer The message buffer of the edge
* @param previous_messages The previous messages sent
* @param dest_node_to_edges_nodes Parallel array; maps the nodes to the edges in which they are destination nodes; first half which maps nodes to their indices in dest_node_to_edges_edges
* @param dest_node_to_edges_edges Parallel array; maps the nodes to the edges in which they are destination nodes; second half which maps the indices to edges
* @param current_num_edges The number of edges in the graph
* @param num_vertices The number of vertices in the graph
* @param n_is_pow_2 Flag for determining how to adjust shared memory
* @param warp_size The warp size of the GPU
*/
__global__
void read_incoming_messages_kernel(struct belief * __restrict__ message_buffer,
const size_t num_variables,
const struct belief * __restrict__ previous_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t current_num_edges,
const size_t num_vertices,
char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index, start_index, end_index, diff_index, tmp_index;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
start_index = dest_node_to_edges_nodes[node_index];
if (node_index + 1 >= num_vertices) {
end_index = current_num_edges;
} else {
end_index = dest_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if (edge_index < diff_index) {
tmp_index = dest_node_to_edges_edges[edge_index + start_index];
combine_message_cuda(message_buffer, previous_messages, num_variables, node_index,
tmp_index, current_num_edges, n_is_pow_2, warp_size);
}
}
}
/**
* Combines the belief with the joint probability table and writes the result to the buffer
* @param message_buffer The array of incoming beliefs
* @param edge_index The index of the edge in which the belief is sent
* @param node_index The incoming belief
* @param joint_probabilities The joint probabilities of the edges
* @param edge_messages The outbound buffer
*/
__device__
void send_message_for_edge_cuda(const struct belief * __restrict__ message_buffer,
const size_t edge_index, const size_t node_index,
const size_t num_src, const size_t num_dest,
struct belief * __restrict__ edge_messages,
float * __restrict__ edge_messages_previous,
float * __restrict__ edges_messages_current){
size_t i, j;
__shared__ float partial_sums[BLOCK_SIZE];
__shared__ float sums[BLOCK_SIZE];
__shared__ float s_belief[BLOCK_SIZE];
sums[threadIdx.x] = 0.0;
for(i = 0; i < num_src; ++i){
partial_sums[threadIdx.x] = 0.0;
for(j = 0; j < num_dest; ++j){
s_belief[threadIdx.x] = message_buffer[node_index].data[j];
partial_sums[threadIdx.x] += edge_joint_probability->data[i][j] * s_belief[threadIdx.x];
}
edge_messages[edge_index].data[i] = partial_sums[threadIdx.x];
sums[threadIdx.x] += partial_sums[threadIdx.x];
}
if(sums[threadIdx.x] <= 0.0){
sums[threadIdx.x] = 1.0;
}
edge_messages_previous[edge_index] = edges_messages_current[edge_index];
edges_messages_current[edge_index] = sums[threadIdx.x];
for(i = 0; i < num_src; ++i){
edge_messages[edge_index].data[i] /= sums[threadIdx.x];
}
}
/**
* Sends the messages for all nodes in the graph
* @param message_buffer The incoming beliefs
* @param current_num_edges The number of edges in the graph
* @param joint_probabilities The joint probability table of the graph
* @param current_edge_messages The destination noe
* @param src_node_to_edges_nodes Parallel array; maps nodes to the edges in which they are source nodes; first half; maps nodes to their index in src_node_to_edges_edges
* @param src_node_to_edges_edges Parallel array; maps nodes to the edges in which they are source nodes; second half; maps the indices to the edges
* @param num_vertices The number of vertices (nodes) in the graph
*/
__global__
void send_message_for_node_kernel(const struct belief * __restrict__ message_buffer, const size_t current_num_edges,
const size_t joint_probabilities_dim_x,
const size_t joint_probabilities_dim_y,
struct belief * __restrict__ current_edge_messages,
float * __restrict__ current_edge_message_previous,
float * __restrict__ current_edge_message_current,
const size_t * __restrict__ src_node_to_edges_nodes,
const size_t * __restrict__ src_node_to_edges_edges,
const size_t num_vertices){
size_t node_index, edge_index, start_index, end_index, diff_index;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x){
start_index = src_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = current_num_edges;
}
else{
end_index = src_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if (edge_index < diff_index) {
edge_index = src_node_to_edges_edges[edge_index + start_index];
send_message_for_edge_cuda(message_buffer, edge_index, node_index,
joint_probabilities_dim_x, joint_probabilities_dim_y, current_edge_messages,
current_edge_message_previous, current_edge_message_current);
}
}
}
/**
* Marginalizes and normalizes the beliefs in the graph
* @param message_buffer The source beliefs
* @param node_states The current states of the nodes
* @param current_edges_messages The current buffered messages on the graph
* @param dest_node_to_edges_nodes Parallel array; maps nodes to the edges in which they are the destination nodes; first half; maps nodes to their indices in dest_node_to_edges_edges
* @param dest_node_to_edges_edges Parallel array; maps nodes to the edges in which they are the destination nodes; second half; maps the indices to the edges
* @param num_vertices The number of vertices (nodes) in the graph
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for determining if padding needed for shared memory
* @param warp_size The size of the warp of the GPU
*/
__global__
void marginalize_node_combine_kernel(struct belief * __restrict__ message_buffer,
const struct belief * __restrict__ node_states, const size_t num_variables,
const struct belief * __restrict__ current_edges_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t num_vertices,
const size_t num_edges, char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index, temp_edge_index, start_index, end_index, diff_index;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
if(edge_index < num_variables){
message_buffer[node_index].data[edge_index] = 1.0;
}
start_index = dest_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = num_edges;
}
else{
end_index = dest_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if(edge_index < diff_index){
temp_edge_index = dest_node_to_edges_edges[edge_index + start_index];
combine_message_cuda(message_buffer, current_edges_messages, num_variables, node_index, temp_edge_index, num_edges, n_is_pow_2, warp_size);
}
}
}
/**
* Marginalizes and normalizes the beliefs in the graph
* @param message_buffer The source beliefs
* @param node_states The current states of the nodes
* @param current_edges_messages The current buffered messages on the graph
* @param dest_node_to_edges_nodes Parallel array; maps nodes to the edges in which they are the destination nodes; first half; maps nodes to their indices in dest_node_to_edges_edges
* @param dest_node_to_edges_edges Parallel array; maps nodes to the edges in which they are the destination nodes; second half; maps the indices to the edges
* @param num_vertices The number of vertices (nodes) in the graph
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for determining if padding needed for shared memory
* @param warp_size The size of the warp of the GPU
*/
__global__
void marginalize_page_rank_node_combine_kernel(struct belief * __restrict__ message_buffer,
const struct belief * __restrict__ node_states, const size_t num_variables,
const struct belief * __restrict__ current_edges_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t num_vertices,
const size_t num_edges, char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index, temp_edge_index, start_index, end_index, diff_index;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
if(edge_index < num_variables){
message_buffer[node_index].data[edge_index] = 0.0;
}
start_index = dest_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = num_edges;
}
else{
end_index = dest_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if(edge_index < diff_index){
temp_edge_index = dest_node_to_edges_edges[edge_index + start_index];
combine_page_rank_message_cuda(message_buffer, current_edges_messages, num_variables, node_index, temp_edge_index, num_edges, n_is_pow_2, warp_size);
}
}
}
/**
* Computes the argmax of the beliefs in the graph
* @param message_buffer The source beliefs
* @param node_states The current states of the nodes
* @param current_edges_messages The current buffered messages on the graph
* @param dest_node_to_edges_nodes Parallel array; maps nodes to the edges in which they are the destination nodes; first half; maps nodes to their indices in dest_node_to_edges_edges
* @param dest_node_to_edges_edges Parallel array; maps nodes to the edges in which they are the destination nodes; second half; maps the indices to the edges
* @param num_vertices The number of vertices (nodes) in the graph
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for determining if padding needed for shared memory
* @param warp_size The size of the warp of the GPU
*/
__global__
void argmax_node_combine_kernel(struct belief * __restrict__ message_buffer,
const struct belief * __restrict__ node_states, const size_t num_variables,
const struct belief * __restrict__ current_edges_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t num_vertices,
const size_t num_edges, char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index, temp_edge_index, start_index, end_index, diff_index;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
if(edge_index < num_variables){
message_buffer[node_index].data[edge_index] = 1.0;
}
start_index = dest_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = num_edges;
}
else{
end_index = dest_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if(edge_index < diff_index){
temp_edge_index = dest_node_to_edges_edges[edge_index + start_index];
combine_viterbi_message_cuda(message_buffer, current_edges_messages, num_variables, node_index, temp_edge_index, num_edges, n_is_pow_2, warp_size);
}
}
}
/**
* Marginalizes and normalizes nodes in the graph
* @param message_buffer The incoming beliefs
* @param node_states The destination belief to update
* @param current_edges_messages The buffered beliefs on the edges
* @param dest_node_to_edges_nodes Parallel array; maps nodes to the edges in which they are the destination node; first half; maps nodes to their indices in dest_node_to_edges_edges
* @param dest_node_to_edges_edges Parallel array; maps nodes to the edges in which they are the destination node; second half; maps the indices to the edges
* @param num_vertices The number of vertices (nodes) in the graph
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for determining if padding is needed for shared memory
* @param warp_size The size of the warp of the GPU
*/
__global__
void marginalize_sum_node_kernel(const struct belief * __restrict__ message_buffer,
struct belief * __restrict__ node_states, const size_t num_variables,
const struct belief * __restrict__ current_edges_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t num_vertices,
const size_t num_edges, char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index;
__shared__ float sum[BLOCK_SIZE_2_D_X];
__shared__ float shared_message_buffer[BLOCK_SIZE_2_D_X][BLOCK_SIZE_2_D_Y];
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
if(edge_index < num_variables) {
if (edge_index == 0) {
sum[threadIdx.x] = 0.0;
}
shared_message_buffer[threadIdx.x][threadIdx.y] *= message_buffer[node_index].data[edge_index];
__syncthreads();
atomicAdd(&sum[threadIdx.x], shared_message_buffer[threadIdx.x][threadIdx.y]);
__syncthreads();
if (threadIdx.y == 0 && sum[threadIdx.x] <= 0.0) {
sum[threadIdx.x] = 1.0;
}
__syncthreads();
node_states[node_index].data[edge_index] = shared_message_buffer[threadIdx.x][threadIdx.y] / sum[threadIdx.x];
}
}
}
__global__
void marginalize_dampening_factor_kernel(const struct belief * __restrict__ message_buffer,
struct belief * __restrict__ node_states, const size_t num_variables,
const struct belief * __restrict__ current_edges_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t num_vertices,
const size_t num_edges, char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index, end_index, start_index;
__shared__ float factor[BLOCK_SIZE_2_D_X];
__shared__ float shared_message_buffer[BLOCK_SIZE_2_D_X][BLOCK_SIZE_2_D_Y];
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
start_index = dest_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = num_edges;
}
else{
end_index = dest_node_to_edges_nodes[node_index + 1];
}
if(edge_index < num_variables) {
if (edge_index == 0) {
factor[threadIdx.x] = (1 - DAMPENING_FACTOR) / (end_index - start_index);
}
__syncthreads();
shared_message_buffer[threadIdx.x][threadIdx.y] = factor[threadIdx.x] + DAMPENING_FACTOR * message_buffer[node_index].data[edge_index];
__syncthreads();
node_states[node_index].data[edge_index] = shared_message_buffer[threadIdx.x][threadIdx.y];
}
}
}
__global__
void marginalize_viterbi_beliefs(struct belief * __restrict__ nodes, const size_t nodes_size, const size_t num_nodes){
size_t idx, i;
float sum;
for(idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_nodes; idx += blockDim.x * gridDim.x){
sum = 0.0;
for(i = 0; i < nodes_size; ++i){
sum += nodes[idx].data[i];
}
for(i = 0; i < nodes_size; ++i){
nodes[idx].data[i] = nodes[idx].data[i] / sum;
}
}
}
/**
* Marginalizes and normalizes nodes in the graph
* @param message_buffer The incoming beliefs
* @param node_states The destination belief to update
* @param current_edges_messages The buffered beliefs on the edges
* @param dest_node_to_edges_nodes Parallel array; maps nodes to the edges in which they are the destination node; first half; maps nodes to their indices in dest_node_to_edges_edges
* @param dest_node_to_edges_edges Parallel array; maps nodes to the edges in which they are the destination node; second half; maps the indices to the edges
* @param num_vertices The number of vertices (nodes) in the graph
* @param num_edges The number of edges in the graph
* @param n_is_pow_2 Flag for determining if padding is needed for shared memory
* @param warp_size The size of the warp of the GPU
*/
__global__
void argmax_kernel(const struct belief * __restrict__ message_buffer,
struct belief * __restrict__ node_states,
const size_t num_variables,
const struct belief * __restrict__ current_edges_messages,
const size_t * __restrict__ dest_node_to_edges_nodes,
const size_t * __restrict__ dest_node_to_edges_edges,
const size_t num_vertices,
const size_t num_edges, char n_is_pow_2, const size_t warp_size){
size_t node_index, edge_index;
__shared__ float shared_message_buffer[BLOCK_SIZE_2_D_X][BLOCK_SIZE_2_D_Y];
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
for(node_index = blockIdx.x*blockDim.x + threadIdx.x; node_index < num_vertices; node_index += blockDim.x * gridDim.x) {
if(edge_index < num_variables) {
if (edge_index == 0) {
shared_message_buffer[threadIdx.x][threadIdx.y] = -1.0f;
}
__syncthreads();
shared_message_buffer[threadIdx.x][threadIdx.y] = fmaxf(shared_message_buffer[threadIdx.x][threadIdx.y], message_buffer[node_index].data[edge_index]);
__syncthreads();
node_states[node_index].data[edge_index] = shared_message_buffer[threadIdx.x][threadIdx.y];
}
}
}
/**
* Calculates the delta for a given message
* @param i The message's index
* @param current_messages The current messages
* @return The delta between the messages
*/
__device__
float calculate_local_delta(size_t i, const float * __restrict__ current_messages_previous, const float * __restrict__ current_messages_current){
float delta, diff;
diff = current_messages_previous[i] - current_messages_current[i];
if(diff != diff){
diff = 0.0;
}
delta = (float)fabs(diff);
return delta;
}
/**
* Calculates the delta used for testing for convergence via reduction
* @param current_messages The current beliefs of the graph
* @param delta The delta to write back
* @param delta_array Temp array used to store partial deltas for reduction
* @param num_edges The number of edges in the graph
*/
__global__
void calculate_delta(const float * __restrict__ current_messages_previous, const float * __restrict__ current_messages_current,
float * __restrict__ delta, float * __restrict__ delta_array, const size_t num_edges){
extern __shared__ float shared_delta[];
size_t tid, idx, i, s;
tid = threadIdx.x;
i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
for(idx = blockIdx.x*blockDim.x + threadIdx.x; idx < num_edges; idx += blockDim.x * gridDim.x){
delta_array[idx] = calculate_local_delta(idx, current_messages_previous, current_messages_current);
}
__syncthreads();
float my_delta = (i < num_edges) ? delta_array[i] : 0;
if(i + BLOCK_SIZE < num_edges){
my_delta += delta_array[i + BLOCK_SIZE];
}
shared_delta[tid] = my_delta;
__syncthreads();
// do reduction in shared mememory
for(s= blockDim.x / 2; s > 32; s>>=1){
if(tid < s){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + s];
}
__syncthreads();
}
#if (__CUDA_ARCH__ >= 300)
if(tid < 32){
//fetch final intermediate sum from second warp
if(BLOCK_SIZE >= 64){
my_delta += shared_delta[tid + 32];
}
for(s = WARP_SIZE/2; s > 0; s /= 2){
my_delta += __shfl_down(my_delta, s);
}
}
#else
if((BLOCK_SIZE >= 64) && (tid < 32)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 32];
}
__syncthreads();
if((BLOCK_SIZE >= 32) && (tid < 16)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 16];
}
__syncthreads();
if((BLOCK_SIZE >= 16) && (tid < 8)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 8];
}
__syncthreads();
if((BLOCK_SIZE >= 8) && (tid < 4)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 4];
}
__syncthreads();
if((BLOCK_SIZE >= 4) && (tid < 2)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 2];
}
__syncthreads();
if((BLOCK_SIZE >= 2) && (tid < 1)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 1];
}
__syncthreads();
#endif
if(tid == 0) {
*delta = my_delta;
}
}
/**
* Calculates the delta used for testing for convergence via reduction
* @details Copied from the sample in the NVIDIA CUDA SDK
* @param current_messages The current beliefs of the graph
* @param delta The delta to write back
* @param delta_array Temp array used to store partial deltas for reduction
* @param num_edges The number of edges in the graph
*/
__global__
void calculate_delta_6(const float * __restrict__ current_messages_previous, const float * __restrict__ current_messages_current,
float * __restrict__ delta, float * __restrict__ delta_array,
const size_t num_edges, char n_is_pow_2, const size_t warp_size) {
extern __shared__ float shared_delta[];
size_t offset;
// perform first level of reduce
// reading from global memory, writing to shared memory
size_t idx;
size_t tid = threadIdx.x;
size_t i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
size_t grid_size = blockDim.x * 2 * gridDim.x;
for(idx = blockIdx.x*blockDim.x + threadIdx.x; idx < num_edges; idx += blockDim.x * gridDim.x){
delta_array[idx] = calculate_local_delta(idx, current_messages_previous, current_messages_current);
}
__syncthreads();
float my_delta = 0.0;
while (i < num_edges) {
my_delta = delta_array[i];
// ensure we don't read out of bounds
if (n_is_pow_2 || i + blockDim.x < num_edges) {
my_delta += delta_array[i];
}
i += grid_size;
}
//each thread puts its local sum into shared memory
shared_delta[tid] = my_delta;
__syncthreads();
// do reduction in shared mem
if ((blockDim.x >= 512) && (tid < 256)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 256];
}
__syncthreads();
if ((blockDim.x >= 256) && (tid < 128)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 128];
}
__syncthreads();
if ((blockDim.x >= 128) && (tid < 64)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if( tid < 32){
// fetch final intermediate sum from 2nd warp
if(blockDim.x >= 64){
my_delta += shared_delta[tid + 32];
}
for(offset = warp_size/2; offset > 0; offset /= 2 ){
my_delta += __shfl_down(my_delta, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockDim.x >= 64) && (tid < 32)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 32];
}
__syncthreads();
if ((blockDim.x >= 32) && (tid < 16)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 16];
}
__syncthreads();
if((blockDim.x >= 16) && (tid < 8)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 8];
}
__syncthreads();
if((blockDim.x >= 8) && (tid < 4)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 4];
}
__syncthreads();
if((blockDim.x >= 4) && (tid < 2)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 2];
}
__syncthreads();
if((blockDim.x >= 2) && (tid < 1)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 1];
}
__syncthreads();
#endif
//write result for this block to global mem
if(tid == 0){
*delta = my_delta;
}
}
/**
* Calculates the delta used for testing for convergence via reduction
* @details Simple kernel used for testing
* @param previous_messages The previous beliefs of the graph
* @param current_messages The current beliefs of the graph
* @param delta The delta to write back
* @param delta_array Temp array used to store partial deltas for reduction
* @param num_edges The number of edges in the graph
*/
__global__
void calculate_delta_simple(const float * __restrict__ current_messages_previous,
const float * __restrict__ current_messages_current,
float * __restrict__ delta, float * __restrict__ delta_array,
const size_t num_edges) {
extern __shared__ float shared_delta[];
size_t tid, idx, i, s;
tid = threadIdx.x;
for(idx = blockIdx.x*blockDim.x + threadIdx.x; idx < num_edges; idx += blockDim.x * gridDim.x){
delta_array[idx] = calculate_local_delta(idx, current_messages_previous, current_messages_current);
}
__syncthreads();
idx = blockIdx.x * blockDim.x + threadIdx.x;
shared_delta[tid] = (idx < num_edges) ? delta_array[idx] : 0;
__syncthreads();
// do reduction in shared mem
for(s = 1; s < blockDim.x; s *= 2){
i = 2 * s * tid;
if( i < blockDim.x ) {
shared_delta[i] += shared_delta[i + s];
}
__syncthreads();
}
//write result for this block to global mem
if(tid == 0){
*delta = shared_delta[0];
}
}
/**
* Helper function to test for errors for kernel calls
*/
void check_cuda_kernel_return_code(){
cudaError_t err;
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Error: %s\n", cudaGetErrorString(err));
exit(-1);
}
}
/**
* Runs loopy BP on the graph
* @param graph The graph to use
* @param convergence The convergence threshold; if the delta falls below it, execution will stop
* @param max_iterations The maximum number of iterations to run for
* @return The actual number of iterations executed
*/
int loopy_propagate_until_cuda_kernels(Graph_t graph, const float convergence, const int max_iterations){
size_t i, j;
int num_iter;
float * delta;
float * delta_array;
float previous_delta, host_delta;
char is_pow_2;
struct belief * message_buffer;
struct belief * current_messages;
float * current_messages_previous;
float * current_messages_current;
size_t * src_nodes_to_edges_nodes;
size_t * src_nodes_to_edges_edges;
size_t * dest_nodes_to_edges_nodes;
size_t * dest_nodes_to_edges_edges;
struct belief * node_states;
host_delta = 0.0;
const size_t num_vertices = graph->current_num_vertices;
const size_t num_edges = graph->current_num_edges;
const size_t edge_joint_probability_dim_x = graph->edge_joint_probability_dim_x;
const size_t edge_joint_probability_dim_y = graph->edge_joint_probability_dim_y;
const size_t node_states_size = graph->node_states_size;
/*printf("Before=====");
print_edges(graph);
print_nodes(graph);*/
is_pow_2 = num_vertices % 2 == 0;
// allocate data
CUDA_CHECK_RETURN(cudaMalloc((void **)&dest_nodes_to_edges_nodes, sizeof(size_t) * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&dest_nodes_to_edges_edges, sizeof(size_t) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&src_nodes_to_edges_nodes, sizeof(size_t) * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&src_nodes_to_edges_edges, sizeof(size_t) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)¤t_messages, sizeof(struct belief) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)¤t_messages_current, sizeof(float) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)¤t_messages_previous, sizeof(float) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&node_states, sizeof(struct belief) * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&delta, sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void **)&delta_array, sizeof(float) * num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&message_buffer, sizeof(struct belief) * num_vertices));
// copy data
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(edge_joint_probability, &(graph->edge_joint_probability), sizeof(struct joint_probability)));
CUDA_CHECK_RETURN(cudaMemcpy(current_messages, graph->edges_messages, sizeof(struct belief) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(current_messages_previous, graph->edges_messages_previous, sizeof(float) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(current_messages_current, graph->edges_messages_current, sizeof(float) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(node_states, graph->node_states, sizeof(struct belief) * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(dest_nodes_to_edges_nodes, graph->dest_nodes_to_edges_node_list, sizeof(size_t) * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(dest_nodes_to_edges_edges, graph->dest_nodes_to_edges_edge_list, sizeof(size_t) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(src_nodes_to_edges_nodes, graph->src_nodes_to_edges_node_list, sizeof(size_t) * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(src_nodes_to_edges_edges, graph->src_nodes_to_edges_edge_list, sizeof(size_t) * graph->current_num_edges, cudaMemcpyHostToDevice));
const int blockEdge1dCount = (num_edges + BLOCK_SIZE - 1)/ BLOCK_SIZE;
const int blockNodeCount = (num_vertices + BLOCK_SIZE_2_D_X - 1)/BLOCK_SIZE_2_D_X;
const int blockStateCount = (MAX_STATES + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockDegreeCount = (graph->max_degree + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockMessageNodeCount = (num_vertices + BLOCK_SIZE_3_D_X - 1)/BLOCK_SIZE_3_D_X;
const int blockMessageDegreeCount = ( graph->max_degree + BLOCK_SIZE_3_D_Y - 1)/BLOCK_SIZE_3_D_Y;
const int blockMessageStateCount = ( MAX_STATES + BLOCK_SIZE_3_D_Z - 1)/BLOCK_SIZE_3_D_Z;
num_iter = 0;
dim3 dimReduceBlock(BLOCK_SIZE, 1, 1);
dim3 dimReduceGrid(blockEdge1dCount, 1, 1);
int reduceSmemSize = (BLOCK_SIZE <= 32) ? 2 * BLOCK_SIZE * sizeof(float) : BLOCK_SIZE * sizeof(float);
dim3 dimInitMessageBuffer(BLOCK_SIZE_2_D_X, BLOCK_SIZE_2_D_Y, 1);
dim3 dimInitGrid(blockNodeCount, blockStateCount, 1);
dim3 dimDegreeGrid(blockNodeCount, blockDegreeCount, 1);
// int reduce2DSmemSize = (BLOCK_SIZE_2_D_Y <= 32) ? 2 * BLOCK_SIZE_2_D_Y * sizeof(float) : BLOCK_SIZE_2_D_Y * sizeof(float);
dim3 dimMessagesBuffer(BLOCK_SIZE_3_D_X, BLOCK_SIZE_3_D_Y, BLOCK_SIZE_3_D_Z);
dim3 dimMessagesGrid(blockMessageNodeCount, blockMessageDegreeCount, blockMessageStateCount);
for(i = BATCH_SIZE; i <= max_iterations; i+= BATCH_SIZE){
for(j = 0; j < BATCH_SIZE; ++j) {
init_message_buffer_kernel<<<dimInitGrid, dimInitMessageBuffer>>>(message_buffer, node_states, node_states_size, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
read_incoming_messages_kernel <<<dimMessagesGrid, dimMessagesBuffer>>>(message_buffer, node_states_size, current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_edges, num_vertices, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
send_message_for_node_kernel<<<dimInitGrid, dimInitMessageBuffer>>>(message_buffer, num_edges,
edge_joint_probability_dim_x, edge_joint_probability_dim_y,
current_messages, current_messages_previous, current_messages_current,
src_nodes_to_edges_nodes, src_nodes_to_edges_edges, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
marginalize_node_combine_kernel<<<dimMessagesGrid, dimMessagesBuffer>>>(message_buffer,
node_states, node_states_size,
current_messages,
dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
marginalize_sum_node_kernel<<<dimInitGrid, dimInitMessageBuffer>>>(message_buffer,
node_states, node_states_size,
current_messages,
dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
num_iter++;
}
calculate_delta_6<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages_previous, current_messages_current, delta, delta_array, num_edges, is_pow_2, WARP_SIZE);
//calculate_delta<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages, delta, delta_array, num_edges);
//calculate_delta_simple<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages, delta, delta_array, num_edges);
check_cuda_kernel_return_code();
CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
// printf("Current delta: %f\n", host_delta);
if(host_delta < convergence || fabs(host_delta - previous_delta) < convergence){
break;
}
if(i < max_iterations - BATCH_SIZE) {
previous_delta = host_delta;
}
if(i >= max_iterations){
printf("No Convergence: previous: %f vs current: %f\n", previous_delta, host_delta);
}
}
// copy data back
CUDA_CHECK_RETURN(cudaMemcpy(graph->node_states, node_states, sizeof(struct belief) * num_vertices, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpy(graph->edges_messages, current_messages, sizeof(struct belief) * num_edges, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaFree(dest_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(cudaFree(dest_nodes_to_edges_edges));
CUDA_CHECK_RETURN(cudaFree(src_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(cudaFree(src_nodes_to_edges_edges));
CUDA_CHECK_RETURN(cudaFree(current_messages));
CUDA_CHECK_RETURN(cudaFree(current_messages_previous));
CUDA_CHECK_RETURN(cudaFree(current_messages_current));
CUDA_CHECK_RETURN(cudaFree(message_buffer));
CUDA_CHECK_RETURN(cudaFree(node_states));
CUDA_CHECK_RETURN(cudaFree(delta));
CUDA_CHECK_RETURN(cudaFree(delta_array));
/*printf("After=====");
print_nodes(graph);
print_edges(graph);*/
return num_iter;
}
/**
* Runs PageRank on the graph
* @param graph The graph to use
* @param convergence The convergence threshold; if the delta falls below it, execution will stop
* @param max_iterations The maximum number of iterations to run for
* @return The actual number of iterations executed
*/
int page_rank_until_cuda_kernels(Graph_t graph, const float convergence, const int max_iterations){
int num_iter;
size_t i, j;
float * delta;
float * delta_array;
float previous_delta, host_delta;
char is_pow_2;
struct belief * message_buffer;
struct belief * current_messages;
float * current_messages_previous;
float * current_messages_current;
size_t * src_nodes_to_edges_nodes;
size_t * src_nodes_to_edges_edges;
size_t * dest_nodes_to_edges_nodes;
size_t * dest_nodes_to_edges_edges;
struct belief * node_states;
host_delta = 0.0;
previous_delta = INFINITY;
const size_t num_vertices = graph->current_num_vertices;
const size_t num_edges = graph->current_num_edges;
const size_t edge_joint_probability_dim_x = graph->edge_joint_probability_dim_x;
const size_t edge_joint_probability_dim_y = graph->edge_joint_probability_dim_y;
const size_t node_states_size = graph->node_states_size;
/*printf("Before=====");
print_edges(graph);
print_nodes(graph);*/
is_pow_2 = num_vertices % 2 == 0;
// allocate data
CUDA_CHECK_RETURN(cudaMalloc((void **)&dest_nodes_to_edges_nodes, sizeof(size_t) * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&dest_nodes_to_edges_edges, sizeof(size_t) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&src_nodes_to_edges_nodes, sizeof(size_t) * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&src_nodes_to_edges_edges, sizeof(size_t) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)¤t_messages, sizeof(struct belief) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)¤t_messages_previous, sizeof(float) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)¤t_messages_current, sizeof(float) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&node_states, sizeof(struct belief) * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&delta, sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void **)&delta_array, sizeof(float) * num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&message_buffer, sizeof(struct belief) * num_vertices));
// copy data
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(edge_joint_probability, &(graph->edge_joint_probability), sizeof(struct joint_probability)));
CUDA_CHECK_RETURN(cudaMemcpy(current_messages, graph->edges_messages, sizeof(struct belief) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(current_messages, graph->edges_messages_previous, sizeof(float) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(current_messages, graph->edges_messages_current, sizeof(float) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(node_states, graph->node_states, sizeof(struct belief) * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(dest_nodes_to_edges_nodes, graph->dest_nodes_to_edges_node_list, sizeof(size_t) * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(dest_nodes_to_edges_edges, graph->dest_nodes_to_edges_edge_list, sizeof(size_t) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(src_nodes_to_edges_nodes, graph->src_nodes_to_edges_node_list, sizeof(size_t) * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(src_nodes_to_edges_edges, graph->src_nodes_to_edges_edge_list, sizeof(size_t) * graph->current_num_edges, cudaMemcpyHostToDevice));
const int blockEdge1dCount = (num_edges + BLOCK_SIZE - 1)/ BLOCK_SIZE;
const int blockNodeCount = (num_vertices + BLOCK_SIZE_2_D_X - 1)/BLOCK_SIZE_2_D_X;
const int blockStateCount = (MAX_STATES + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockDegreeCount = (graph->max_degree + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockMessageNodeCount = (num_vertices + BLOCK_SIZE_3_D_X - 1)/BLOCK_SIZE_3_D_X;
const int blockMessageDegreeCount = ( graph->max_degree + BLOCK_SIZE_3_D_Y - 1)/BLOCK_SIZE_3_D_Y;
const int blockMessageStateCount = ( MAX_STATES + BLOCK_SIZE_3_D_Z - 1)/BLOCK_SIZE_3_D_Z;
num_iter = 0;
dim3 dimReduceBlock(BLOCK_SIZE, 1, 1);
dim3 dimReduceGrid(blockEdge1dCount, 1, 1);
int reduceSmemSize = (BLOCK_SIZE <= 32) ? 2 * BLOCK_SIZE * sizeof(float) : BLOCK_SIZE * sizeof(float);
dim3 dimInitMessageBuffer(BLOCK_SIZE_2_D_X, BLOCK_SIZE_2_D_Y, 1);
dim3 dimInitGrid(blockNodeCount, blockStateCount, 1);
dim3 dimDegreeGrid(blockNodeCount, blockDegreeCount, 1);
// int reduce2DSmemSize = (BLOCK_SIZE_2_D_Y <= 32) ? 2 * BLOCK_SIZE_2_D_Y * sizeof(float) : BLOCK_SIZE_2_D_Y * sizeof(float);
dim3 dimMessagesBuffer(BLOCK_SIZE_3_D_X, BLOCK_SIZE_3_D_Y, BLOCK_SIZE_3_D_Z);
dim3 dimMessagesGrid(blockMessageNodeCount, blockMessageDegreeCount, blockMessageStateCount);
for(i = BATCH_SIZE; i <= max_iterations; i+= BATCH_SIZE){
for(j = 0; j < BATCH_SIZE; ++j) {
init_message_buffer_kernel<<<dimInitGrid, dimInitMessageBuffer>>>(message_buffer, node_states, node_states_size, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
read_incoming_messages_kernel <<<dimMessagesGrid, dimMessagesBuffer>>>(message_buffer, node_states_size,
current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_edges, num_vertices, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
send_message_for_node_kernel<<<dimInitGrid, dimInitMessageBuffer>>>(message_buffer, num_edges,
edge_joint_probability_dim_x, edge_joint_probability_dim_y,
current_messages, current_messages_previous, current_messages_current,
src_nodes_to_edges_nodes, src_nodes_to_edges_edges, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
marginalize_page_rank_node_combine_kernel<<<dimMessagesGrid, dimMessagesBuffer>>>(message_buffer,
node_states, node_states_size,
current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
marginalize_dampening_factor_kernel<<<dimInitGrid, dimInitMessageBuffer>>>(message_buffer, node_states, node_states_size, current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
num_iter++;
}
calculate_delta_6<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages_previous, current_messages_current, delta, delta_array, num_edges, is_pow_2, WARP_SIZE);
//calculate_delta<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages, delta, delta_array, num_edges);
//calculate_delta_simple<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages, delta, delta_array, num_edges);
check_cuda_kernel_return_code();
CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
// printf("Current delta: %f\n", host_delta);
if(host_delta < convergence || fabs(host_delta - previous_delta) < convergence){
break;
}
previous_delta = host_delta;
}
// copy data back
CUDA_CHECK_RETURN(cudaMemcpy(graph->node_states, node_states, sizeof(struct belief) * num_vertices, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpy(graph->edges_messages, current_messages, sizeof(struct belief) * num_edges, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaFree(dest_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(cudaFree(dest_nodes_to_edges_edges));
CUDA_CHECK_RETURN(cudaFree(src_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(cudaFree(src_nodes_to_edges_edges));
CUDA_CHECK_RETURN(cudaFree(current_messages));
CUDA_CHECK_RETURN(cudaFree(current_messages_previous));
CUDA_CHECK_RETURN(cudaFree(current_messages_current));
CUDA_CHECK_RETURN(cudaFree(message_buffer));
CUDA_CHECK_RETURN(cudaFree(node_states));
CUDA_CHECK_RETURN(cudaFree(delta));
CUDA_CHECK_RETURN(cudaFree(delta_array));
/*printf("After=====");
print_nodes(graph);
print_edges(graph);*/
return num_iter;
}
/**
* Runs Viterbi on the graph
* @param graph The graph to use
* @param convergence The convergence threshold; if the delta falls below it, execution will stop
* @param max_iterations The maximum number of iterations to run for
* @return The actual number of iterations executed
*/
int viterbi_until_cuda_kernels(Graph_t graph, const float convergence, const int max_iterations){
int num_iter;
size_t i, j;
float * delta;
float * delta_array;
float previous_delta, host_delta;
char is_pow_2;
struct belief * message_buffer;
struct belief * current_messages;
float * current_messages_previous;
float * current_messages_current;
size_t * src_nodes_to_edges_nodes;
size_t * src_nodes_to_edges_edges;
size_t * dest_nodes_to_edges_nodes;
size_t * dest_nodes_to_edges_edges;
struct belief * node_states;
previous_delta = INFINITY;
host_delta = 0.0;
const size_t num_vertices = graph->current_num_vertices;
const size_t num_edges = graph->current_num_edges;
const size_t edge_joint_probability_dim_x = graph->edge_joint_probability_dim_x;
const size_t edge_joint_probability_dim_y = graph->edge_joint_probability_dim_y;
const size_t node_states_size = graph->node_states_size;
/*printf("Before=====");
print_edges(graph);
print_nodes(graph);*/
is_pow_2 = num_vertices % 2 == 0;
// allocate data
CUDA_CHECK_RETURN(cudaMalloc((void **)&dest_nodes_to_edges_nodes, sizeof(size_t) * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&dest_nodes_to_edges_edges, sizeof(size_t) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&src_nodes_to_edges_nodes, sizeof(size_t) * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&src_nodes_to_edges_edges, sizeof(size_t) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)¤t_messages, sizeof(struct belief) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)¤t_messages_previous, sizeof(float) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)¤t_messages_current, sizeof(float) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&node_states, sizeof(struct belief) * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&delta, sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void **)&delta_array, sizeof(float) * num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&message_buffer, sizeof(struct belief) * num_vertices));
// copy data
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(edge_joint_probability, &(graph->edge_joint_probability), sizeof(struct joint_probability)));
CUDA_CHECK_RETURN(cudaMemcpy(current_messages, graph->edges_messages, sizeof(struct belief) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(current_messages_previous, graph->edges_messages_previous, sizeof(float) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(current_messages_current, graph->edges_messages_current, sizeof(float) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(node_states, graph->node_states, sizeof(struct belief) * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(dest_nodes_to_edges_nodes, graph->dest_nodes_to_edges_node_list, sizeof(size_t) * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(dest_nodes_to_edges_edges, graph->dest_nodes_to_edges_edge_list, sizeof(size_t) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(src_nodes_to_edges_nodes, graph->src_nodes_to_edges_node_list, sizeof(size_t) * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(src_nodes_to_edges_edges, graph->src_nodes_to_edges_edge_list, sizeof(size_t) * graph->current_num_edges, cudaMemcpyHostToDevice));
const int blockEdge1dCount = (num_edges + BLOCK_SIZE - 1)/ BLOCK_SIZE;
const int blockNodeCount = (num_vertices + BLOCK_SIZE_2_D_X - 1)/BLOCK_SIZE_2_D_X;
const int blockStateCount = (MAX_STATES + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockDegreeCount = (graph->max_degree + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockMessageNodeCount = (num_vertices + BLOCK_SIZE_3_D_X - 1)/BLOCK_SIZE_3_D_X;
const int blockMessageDegreeCount = ( graph->max_degree + BLOCK_SIZE_3_D_Y - 1)/BLOCK_SIZE_3_D_Y;
const int blockMessageStateCount = ( MAX_STATES + BLOCK_SIZE_3_D_Z - 1)/BLOCK_SIZE_3_D_Z;
num_iter = 0;
dim3 dimReduceBlock(BLOCK_SIZE, 1, 1);
dim3 dimReduceGrid(blockEdge1dCount, 1, 1);
int reduceSmemSize = (BLOCK_SIZE <= 32) ? 2 * BLOCK_SIZE * sizeof(float) : BLOCK_SIZE * sizeof(float);
dim3 dimInitMessageBuffer(BLOCK_SIZE_2_D_X, BLOCK_SIZE_2_D_Y, 1);
dim3 dimInitGrid(blockNodeCount, blockStateCount, 1);
dim3 dimDegreeGrid(blockNodeCount, blockDegreeCount, 1);
// int reduce2DSmemSize = (BLOCK_SIZE_2_D_Y <= 32) ? 2 * BLOCK_SIZE_2_D_Y * sizeof(float) : BLOCK_SIZE_2_D_Y * sizeof(float);
dim3 dimMessagesBuffer(BLOCK_SIZE_3_D_X, BLOCK_SIZE_3_D_Y, BLOCK_SIZE_3_D_Z);
dim3 dimMessagesGrid(blockMessageNodeCount, blockMessageDegreeCount, blockMessageStateCount);
for(i = BATCH_SIZE; i <= max_iterations; i+= BATCH_SIZE){
for(j = 0; j < BATCH_SIZE; ++j) {
init_message_buffer_kernel<<<dimInitGrid, dimInitMessageBuffer>>>(message_buffer, node_states, node_states_size, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
read_incoming_messages_kernel <<<dimMessagesGrid, dimMessagesBuffer>>>(message_buffer, node_states_size,
current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_edges, num_vertices, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
send_message_for_node_kernel<<<dimInitGrid, dimInitMessageBuffer>>>(message_buffer, num_edges,
edge_joint_probability_dim_x, edge_joint_probability_dim_y,
current_messages, current_messages_previous, current_messages_current,
src_nodes_to_edges_nodes, src_nodes_to_edges_edges, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
argmax_node_combine_kernel<<<dimMessagesGrid, dimMessagesBuffer>>>(message_buffer, node_states, node_states_size, current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
argmax_kernel<<<dimInitGrid, dimInitMessageBuffer>>>(message_buffer, node_states, node_states_size, current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
num_iter++;
}
calculate_delta_6<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages_previous, current_messages_current, delta, delta_array, num_edges, is_pow_2, WARP_SIZE);
//calculate_delta<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages, delta, delta_array, num_edges);
//calculate_delta_simple<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(current_messages, delta, delta_array, num_edges);
check_cuda_kernel_return_code();
CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
// printf("Current delta: %f\n", host_delta);
if(host_delta < convergence || fabs(host_delta - previous_delta) < convergence){
marginalize_viterbi_beliefs<<<num_vertices, BLOCK_SIZE>>>(node_states, node_states_size, num_vertices);
break;
}
previous_delta = host_delta;
}
// copy data back
CUDA_CHECK_RETURN(cudaMemcpy(graph->node_states, node_states, sizeof(struct belief) * num_vertices, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpy(graph->edges_messages, current_messages, sizeof(struct belief) * num_edges, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaFree(dest_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(cudaFree(dest_nodes_to_edges_edges));
CUDA_CHECK_RETURN(cudaFree(src_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(cudaFree(src_nodes_to_edges_edges));
CUDA_CHECK_RETURN(cudaFree(current_messages));
CUDA_CHECK_RETURN(cudaFree(current_messages_previous));
CUDA_CHECK_RETURN(cudaFree(current_messages_current));
CUDA_CHECK_RETURN(cudaFree(message_buffer));
CUDA_CHECK_RETURN(cudaFree(node_states));
CUDA_CHECK_RETURN(cudaFree(delta));
CUDA_CHECK_RETURN(cudaFree(delta_array));
/*printf("After=====");
print_nodes(graph);
print_edges(graph);*/
return num_iter;
}
/**
* Runs loopy BP on the file
* @param file_name The path of the file to read
*/
void test_loopy_belief_propagation_kernels(char * file_name){
struct expression * expression;
yyscan_t scanner;
YY_BUFFER_STATE state;
FILE * in;
Graph_t graph;
clock_t start, end;
double time_elapsed;
assert(yylex_init(&scanner) == 0);
in = fopen(file_name, "r");
yyset_in(in, scanner);
assert(yyparse(&expression, scanner) == 0);
//yy_delete_buffer(state, scanner);
yylex_destroy(scanner);
fclose(in);
assert(expression != NULL);
graph = build_graph(expression);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
init_previous_edge(graph);
start = clock();
loopy_propagate_until_cuda_kernels(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
printf("%s,loopy,%ld,%ld,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, time_elapsed);
delete_expression(expression);
graph_destroy(graph);
}
/**
* Runs loopy BP on the AST root node
* @param expression The BNF AST root node
* @param file_name The input file path
* @param out The file handle for the output CSV
*/
void run_test_loopy_belief_propagation_kernels(struct expression * expression, const char * file_name, FILE * out){
Graph_t graph;
clock_t start, end;
double time_elapsed;
int num_iterations;
graph = build_graph(expression);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
//calculate_diameter(graph);
init_previous_edge(graph);
start = clock();
num_iterations = loopy_propagate_until_cuda_kernels(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
fprintf(out, "%s,loopy,%ld,%ld,%d,%d,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, num_iterations, time_elapsed);
fflush(out);
graph_destroy(graph);
}
/**
* Reads the XML file and runs loopy BP on it
* @param file_name The input XML file path
* @param out The output CSV file handle
*/
void run_test_loopy_belief_propagation_xml_file_kernels(const char * file_name, FILE * out){
Graph_t graph;
clock_t start, end;
double time_elapsed;
int num_iterations;
graph = parse_xml_file(file_name);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
//calculate_diameter(graph);
init_previous_edge(graph);
start = clock();
num_iterations = loopy_propagate_until_cuda_kernels(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
fprintf(out, "%s,loopy,%ld,%ld,%d,%d,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, num_iterations, time_elapsed);
fflush(out);
graph_destroy(graph);
}
/**
* Reads the XML file and runs loopy BP on it
* @param edge_file_name The file to read for the SNAP edges
* @param node_file_name The file to read for the SNAP observed nodes
* @param out The CSV file to output to
*/
void run_test_loopy_belief_propagation_snap_file_kernels(const char * edge_file_name, const char * node_file_name,
FILE * out){
Graph_t graph;
clock_t start, end;
double time_elapsed;
int num_iterations;
graph = parse_graph_from_snap_files(edge_file_name, node_file_name);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
//calculate_diameter(graph);
init_previous_edge(graph);
start = clock();
num_iterations = loopy_propagate_until_cuda_kernels(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
fprintf(out, "%s-%s,loopy,%ld,%ld,%d,%d,%lf\n", edge_file_name, node_file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, num_iterations, time_elapsed);
fflush(out);
graph_destroy(graph);
}
void run_test_loopy_belief_propagation_mtx_files_kernels(const char * edges_mtx, const char * nodes_mtx,
const struct joint_probability * edge_probability,
int dim_x, int dim_y,
FILE * out){
Graph_t graph;
clock_t start, end, begin;
double time_elapsed, total_time_elapsed;
int num_iterations;
begin = clock();
graph = build_graph_from_mtx(edges_mtx, nodes_mtx, edge_probability, dim_x, dim_y);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges_no_hsearch(graph);
set_up_dest_nodes_to_edges_no_hsearch(graph);
//calculate_diameter(graph);
init_previous_edge(graph);
start = clock();
num_iterations = loopy_propagate_until_cuda_kernels(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
total_time_elapsed = (double)(end - begin)/CLOCKS_PER_SEC;
//print_nodes(graph);
fprintf(out, "%s-%s,loopy,%ld,%ld,%d,%d,%lf,%d,%lf,%d,%lf,%lf,%lf\n", edges_mtx, nodes_mtx, graph->current_num_vertices, graph->current_num_edges, graph->diameter, graph->max_in_degree, graph->avg_in_degree, graph->max_out_degree, graph->avg_out_degree, num_iterations, time_elapsed, time_elapsed/num_iterations, total_time_elapsed);
fflush(out);
graph_destroy(graph);
}
/**
* Function for printing errors with kernel execution
* @param file The source code file
* @param line The line within file
* @param statement The name of the kernel
* @param err The error message
*/
void CheckCudaErrorAux (const char *file, int line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess) {
return;
}
printf("%s returned %s (%d) at %s:%d\n", statement, cudaGetErrorString(err), err, file, line);
exit (1);
}
|
7b69b032277bcc5ef174c7093556f654b43724c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scc.h"
#include<map>
#include<set>
#include<algorithm>
#include<vector>
#include "scc_kernels.h"
using namespace std;
typedef struct {
uint32_t u1, u2;
} Ele;
__device__ Ele devData[10001];
__device__ int devCount = 0;
__device__ int VecPushBack(Ele &e) {
int insertPt = atomicAdd(&devCount, 1);
if (insertPt < 10001){
devData[insertPt] = e;
return insertPt;
}
else return -1;
}
__global__ void identifyTransEdges(const uint32_t *Fc, const uint32_t *Fr, uint32_t *range, const uint8_t *tags, const uint32_t num_rows, uint32_t *Pr, bool *Occ){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]) || Occ[row])
return;
uint32_t myPr = Pr[row];
uint32_t cnt = Fr[row + 1] - Fr[row];
const uint32_t *nbrs = &Fc[Fr[row]];
uint32_t frpr;
if(myPr & 1)
frpr = myPr - 1;
else
frpr = myPr + 1;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
if(!isRangeSet(tags[index]) && Pr[index] == frpr && !Occ[index]){
//printf("ROW: %d, INDEX: %d\n", row, index);
Ele e = { .u1 = row, .u2 = index };
VecPushBack(e);
}
}
}
void wHong(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, bool t1, bool t2, int warpSize){
//Set the device which exclusively used by this program
hipSetDevice(7);
float sccTime=0;
hipEvent_t sccTimeStart, sccTimeStop;
hipEventCreate(&sccTimeStart);
hipEventCreate(&sccTimeStop);
hipEventRecord(sccTimeStart, 0);
//-----------GPU initialization---------------------------->
uint32_t* d_Fr = NULL;
uint32_t* d_Br = NULL;
uint32_t* d_Fc = NULL;
uint32_t* d_Bc = NULL;
uint32_t* d_pivots = NULL;
uint32_t* d_range = NULL;
uint8_t* d_tags = NULL;
uint8_t* tags = new uint8_t[RSize+1];
bool volatile* d_terminatef = NULL;
bool terminatef = false;
bool volatile* d_terminateb = NULL;
bool terminateb = false;
int FWD_iterations = 0;
int BWD_iterations = 0;
uint32_t iterations = 0;
int Trimm_iterations = 0;
const uint32_t max_pivot_count = RSize;
hipError_t e1, e2, e3, e4, e5, e6, e7, e8, e9;
CUDA_SAFE_CALL( e1 = hipMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e2 = hipMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e3 = hipMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e4 = hipMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e5 = hipMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( e6 = hipMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t)));
CUDA_SAFE_CALL( e7 = hipMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e8 = hipMalloc( (void**) &d_terminatef, sizeof(bool) ));
CUDA_SAFE_CALL( e9 = hipMalloc( (void**) &d_terminateb, sizeof(bool) ));
if (e1 == hipErrorMemoryAllocation || e2 == hipErrorMemoryAllocation ||
e3 == hipErrorMemoryAllocation || e4 == hipErrorMemoryAllocation ||
e5 == hipErrorMemoryAllocation || e6 == hipErrorMemoryAllocation ||
e7 == hipErrorMemoryAllocation || e8 == hipErrorMemoryAllocation || e9 == hipErrorMemoryAllocation) {
throw "Error: Not enough memory on GPU\n";
}
CUDA_SAFE_CALL( hipMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( hipMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t)));
dim3 gridfb;
if((RSize * warpSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize * warpSize / BLOCKSIZE));
gridfb.x = dim;
gridfb.y = dim;
gridfb.z = 1;
}else{
gridfb.x = (RSize * warpSize + BLOCKSIZE - 1)/BLOCKSIZE;
gridfb.y = 1;
gridfb.z = 1;
}
//for vertex-to-thread mapping
dim3 grid;
if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize / BLOCKSIZE));
grid.x = dim;
grid.y = dim;
grid.z = 1;
}else{
grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE;
grid.y = 1;
grid.z = 1;
}
dim3 threads(BLOCKSIZE, 1, 1);
#ifdef _DEBUG
float pivotTime = 0, temp = 0, bTime = 0, trim1Time = 0, trim2Time = 0, updateTime = 0, wccTime = 0;
hipEvent_t bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop;
hipEvent_t trim1TimeStart, trim1TimeStop, trim2TimeStart, trim2TimeStop, wccTimeStart, wccTimeStop;
hipEventCreate(&bTimeStart);
hipEventCreate(&bTimeStop);
hipEventCreate(&pivotTimeStart);
hipEventCreate(&pivotTimeStop);
hipEventCreate(&trim1TimeStart);
hipEventCreate(&trim1TimeStop);
hipEventCreate(&trim2TimeStart);
hipEventCreate(&trim2TimeStop);
hipEventCreate(&updateTimeStart);
hipEventCreate(&updateTimeStop);
hipEventCreate(&wccTimeStart);
hipEventCreate(&wccTimeStop);
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
hipEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( d_pivots, 0, sizeof(uint32_t) ));
hipLaunchKernelGGL(( pollForFirstPivot), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_pivots, d_Fr, d_Br);
hipLaunchKernelGGL(( selectFirstPivot), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_pivots);
#ifdef _DEBUG
hipEventRecord(pivotTimeStop, 0);
hipEventSynchronize(pivotTimeStop);
hipEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
switch(warpSize){
case 1:
hipLaunchKernelGGL(( fwd_warp<1>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd_warp<1>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 2:
hipLaunchKernelGGL(( fwd_warp<2>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd_warp<2>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 4:
hipLaunchKernelGGL(( fwd_warp<4>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd_warp<4>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 8:
hipLaunchKernelGGL(( fwd_warp<8>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd_warp<8>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 16:
hipLaunchKernelGGL(( fwd_warp<16>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd_warp<16>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 32:
hipLaunchKernelGGL(( fwd_warp<32>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd_warp<32>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
}
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
switch(warpSize){
case 1:
hipLaunchKernelGGL(( fwd_warp<1>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 2:
hipLaunchKernelGGL(( fwd_warp<2>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 4:
hipLaunchKernelGGL(( fwd_warp<4>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 8:
hipLaunchKernelGGL(( fwd_warp<8>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 16:
hipLaunchKernelGGL(( fwd_warp<16>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 32:
hipLaunchKernelGGL(( fwd_warp<32>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
}
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
switch(warpSize){
case 1:
hipLaunchKernelGGL(( bwd_warp<1>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 2:
hipLaunchKernelGGL(( bwd_warp<2>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 4:
hipLaunchKernelGGL(( bwd_warp<4>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 8:
hipLaunchKernelGGL(( bwd_warp<8>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 16:
hipLaunchKernelGGL(( bwd_warp<16>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 32:
hipLaunchKernelGGL(( bwd_warp<32>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
}
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
#ifdef _DEBUG
hipEventRecord(bTimeStop, 0);
hipEventSynchronize(bTimeStop);
hipEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(updateTimeStart, 0);
#endif
hipLaunchKernelGGL(( update), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminatef);
#ifdef _DEBUG
hipEventRecord(updateTimeStop, 0);
hipEventSynchronize(updateTimeStop);
hipEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim2TimeStart, 0);
#endif
if(t2)
hipLaunchKernelGGL(( trim2), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize);
#ifdef _DEBUG
hipEventRecord(trim2TimeStop, 0);
hipEventSynchronize(trim2TimeStop);
hipEventElapsedTime(&temp, trim2TimeStart, trim2TimeStop);
trim2Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(wccTimeStart, 0);
#endif
//Now WCC decomposition
hipLaunchKernelGGL(( assignUniqueRange), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize);
do{
CUDA_SAFE_CALL( hipMemset((void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( propagateRange1), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( propagateRange2), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef || !terminateb);
#ifdef _DEBUG
hipEventRecord(wccTimeStop, 0);
hipEventSynchronize(wccTimeStop);
hipEventElapsedTime(&temp, wccTimeStart, wccTimeStop);
wccTime+=temp;
#endif
//-----------Main algorithm-------------------------------->
while ( true ) {
iterations++;
//cout<<"\nIteration : "<<iterations<<endl;
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
hipEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( d_pivots, 0, max_pivot_count * sizeof(uint32_t) ));
hipLaunchKernelGGL(( pollForPivots), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_pivots, max_pivot_count, d_Fr, d_Br);
hipLaunchKernelGGL(( selectPivots), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_pivots, max_pivot_count);
#ifdef _DEBUG
hipEventRecord(pivotTimeStop, 0);
hipEventSynchronize(pivotTimeStop);
hipEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
#ifdef _DEBUG
hipEventRecord(bTimeStop, 0);
hipEventSynchronize(bTimeStop);
hipEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(updateTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( update), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
if (terminatef)
break; //only way out
#ifdef _DEBUG
hipEventRecord(updateTimeStop, 0);
hipEventSynchronize(updateTimeStop);
hipEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
}
//<----------Main algorithm---------------------------------
//SCC extraction
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
uint32_t numberOf1Sccs = 0;
uint32_t numberOf2Sccs = 0;
uint32_t numberOfPivotSccs = 0;
uint32_t numberOfSccs = 0;
for(uint32_t i=1;i<=RSize;i++)
if(isTrim1(tags[i]))
numberOf1Sccs++;
else if(isTrim2(tags[i]))
numberOf2Sccs++;
else if(isPivot(tags[i]))
numberOfPivotSccs++;
numberOfSccs = numberOf1Sccs + numberOf2Sccs + numberOfPivotSccs;
hipEventRecord(sccTimeStop, 0);
hipEventSynchronize(sccTimeStop);
hipEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop);
//printf(", %u, %d, %d, %d", iterations, FWD_iterations , BWD_iterations, Trimm_iterations);
#ifdef _DEBUG
printf("bfs time (ms), %.3lf\n", bTime);
printf("trim_1 time (ms), %.3lf\n", trim1Time);
printf("trim_2 time (ms), %.3lf\n", trim2Time);
printf("pivot time (ms), %.3lf\n", pivotTime);
printf("update time (ms), %.3lf\n", updateTime);
printf("wcc time (ms), %.3lf\n", wccTime);
//printf(", %f", bTime);
//printf(", %f", trim1Time);
//printf(", %f", trim2Time);
//printf(", %f", pivotTime);
//printf(", %f", updateTime);
//printf(", %f", wccTime);
#endif
printf("\nNumber Of Sccs : %d", numberOfSccs);
printf("\nTime (ms): %.3lf\n", sccTime );
CUDA_SAFE_CALL( hipFree( d_Fc ));
CUDA_SAFE_CALL( hipFree( d_Fr ));
CUDA_SAFE_CALL( hipFree( d_Bc ));
CUDA_SAFE_CALL( hipFree( d_Br ));
CUDA_SAFE_CALL( hipFree( d_range));
CUDA_SAFE_CALL( hipFree( d_tags));
CUDA_SAFE_CALL( hipFree( d_pivots ));
CUDA_SAFE_CALL( hipFree( (void *)d_terminatef));
CUDA_SAFE_CALL( hipFree( (void *)d_terminateb));
hipEventDestroy(sccTimeStart);
hipEventDestroy(sccTimeStop);
#ifdef _DEBUG
hipEventDestroy(bTimeStart);
hipEventDestroy(bTimeStop);
hipEventDestroy(trim1TimeStart);
hipEventDestroy(trim1TimeStop);
hipEventDestroy(trim2TimeStart);
hipEventDestroy(trim2TimeStop);
hipEventDestroy(pivotTimeStart);
hipEventDestroy(pivotTimeStop);
hipEventDestroy(updateTimeStart);
hipEventDestroy(updateTimeStop);
hipEventDestroy(wccTimeStart);
hipEventDestroy(wccTimeStop);
#endif
return;
}
void vHong(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, bool t1, bool t2){
//Set the device which exclusively used by this program
hipSetDevice(7);
float sccTime=0;
hipEvent_t sccTimeStart, sccTimeStop;
hipEventCreate(&sccTimeStart);
hipEventCreate(&sccTimeStop);
hipEventRecord(sccTimeStart, 0);
//-----------GPU initialization---------------------------->
uint32_t* d_Fr = NULL;
uint32_t* d_Br = NULL;
uint32_t* d_Fc = NULL;
uint32_t* d_Bc = NULL;
uint32_t* d_pivots = NULL;
uint32_t* d_range = NULL;
uint8_t* d_tags = NULL;
uint8_t* tags = new uint8_t[RSize+1];
bool volatile* d_terminatef = NULL;
bool terminatef = false;
bool volatile* d_terminateb = NULL;
bool terminateb = false;
int FWD_iterations = 0;
int BWD_iterations = 0;
uint32_t iterations = 0;
int Trimm_iterations = 0;
const uint32_t max_pivot_count = RSize;
hipError_t e1, e2, e3, e4, e5, e6, e7, e8;
CUDA_SAFE_CALL( e1 = hipMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e2 = hipMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e3 = hipMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e4 = hipMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e5 = hipMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( e5 = hipMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t)));
CUDA_SAFE_CALL( e6 = hipMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e7 = hipMalloc( (void**) &d_terminatef, sizeof(bool) ));
CUDA_SAFE_CALL( e8 = hipMalloc( (void**) &d_terminateb, sizeof(bool) ));
if (e1 == hipErrorMemoryAllocation || e2 == hipErrorMemoryAllocation ||
e3 == hipErrorMemoryAllocation || e4 == hipErrorMemoryAllocation ||
e5 == hipErrorMemoryAllocation || e6 == hipErrorMemoryAllocation ||
e7 == hipErrorMemoryAllocation || e8 == hipErrorMemoryAllocation ) {
throw "Error: Not enough memory on GPU\n";
}
CUDA_SAFE_CALL( hipMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( hipMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t)));
//for vertex-to-thread mapping
dim3 grid;
if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize / BLOCKSIZE));
grid.x = dim;
grid.y = dim;
grid.z = 1;
}else{
grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE;
grid.y = 1;
grid.z = 1;
}
dim3 threads(BLOCKSIZE, 1, 1);
#ifdef _DEBUG
float pivotTime = 0, temp = 0, bTime = 0, trim1Time = 0, trim2Time = 0, updateTime = 0, wccTime = 0;
hipEvent_t bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop;
hipEvent_t trim1TimeStart, trim1TimeStop, trim2TimeStart, trim2TimeStop, wccTimeStart, wccTimeStop;
hipEventCreate(&bTimeStart);
hipEventCreate(&bTimeStop);
hipEventCreate(&pivotTimeStart);
hipEventCreate(&pivotTimeStop);
hipEventCreate(&trim1TimeStart);
hipEventCreate(&trim1TimeStop);
hipEventCreate(&trim2TimeStart);
hipEventCreate(&trim2TimeStop);
hipEventCreate(&updateTimeStart);
hipEventCreate(&updateTimeStop);
hipEventCreate(&wccTimeStart);
hipEventCreate(&wccTimeStop);
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
hipEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( d_pivots, 0, sizeof(uint32_t) ));
hipLaunchKernelGGL(( pollForFirstPivot), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_pivots, d_Fr, d_Br);
hipLaunchKernelGGL(( selectFirstPivot), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_pivots);
#ifdef _DEBUG
hipEventRecord(pivotTimeStop, 0);
hipEventSynchronize(pivotTimeStop);
hipEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
#ifdef _DEBUG
hipEventRecord(bTimeStop, 0);
hipEventSynchronize(bTimeStop);
hipEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(updateTimeStart, 0);
#endif
hipLaunchKernelGGL(( update), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminatef);
#ifdef _DEBUG
hipEventRecord(updateTimeStop, 0);
hipEventSynchronize(updateTimeStop);
hipEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim2TimeStart, 0);
#endif
if(t2)
hipLaunchKernelGGL(( trim2), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize);
#ifdef _DEBUG
hipEventRecord(trim2TimeStop, 0);
hipEventSynchronize(trim2TimeStop);
hipEventElapsedTime(&temp, trim2TimeStart, trim2TimeStop);
trim2Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(wccTimeStart, 0);
#endif
//Now WCC decomposition
hipLaunchKernelGGL(( assignUniqueRange), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize);
do{
CUDA_SAFE_CALL( hipMemset((void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( propagateRange1), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( propagateRange2), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef || !terminateb);
#ifdef _DEBUG
hipEventRecord(wccTimeStop, 0);
hipEventSynchronize(wccTimeStop);
hipEventElapsedTime(&temp, wccTimeStart, wccTimeStop);
wccTime+=temp;
#endif
//-----------Main algorithm-------------------------------->
while ( true ) {
iterations++;
//cout<<"\nIteration : "<<iterations<<endl;
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
hipEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( d_pivots, 0, max_pivot_count * sizeof(uint32_t) ));
hipLaunchKernelGGL(( pollForPivots), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_pivots, max_pivot_count, d_Fr, d_Br);
hipLaunchKernelGGL(( selectPivots), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_pivots, max_pivot_count);
#ifdef _DEBUG
hipEventRecord(pivotTimeStop, 0);
hipEventSynchronize(pivotTimeStop);
hipEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
#ifdef _DEBUG
hipEventRecord(bTimeStop, 0);
hipEventSynchronize(bTimeStop);
hipEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(updateTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( update), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
if (terminatef)
break; //only way out
#ifdef _DEBUG
hipEventRecord(updateTimeStop, 0);
hipEventSynchronize(updateTimeStop);
hipEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
}
//<----------Main algorithm---------------------------------
//SCC extraction
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
uint32_t numberOf1Sccs = 0;
uint32_t numberOf2Sccs = 0;
uint32_t numberOfPivotSccs = 0;
uint32_t numberOfSccs = 0;
for(uint32_t i=1;i<=RSize;i++)
if(isTrim1(tags[i]))
numberOf1Sccs++;
else if(isTrim2(tags[i]))
numberOf2Sccs++;
else if(isPivot(tags[i]))
numberOfPivotSccs++;
printf("numberOf1Sccs: %d\n", numberOf1Sccs);
numberOfSccs = numberOf1Sccs + numberOf2Sccs + numberOfPivotSccs;
hipEventRecord(sccTimeStop, 0);
hipEventSynchronize(sccTimeStop);
hipEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop);
//printf(", %u, %d, %d, %d", iterations, FWD_iterations , BWD_iterations, Trimm_iterations);
#ifdef _DEBUG
printf("bfs time (ms), %.3lf\n", bTime);
printf("trim_1 time (ms), %.3lf\n", trim1Time);
printf("trim_2 time (ms), %.3lf\n", trim2Time);
printf("pivot time (ms), %.3lf\n", pivotTime);
printf("update time (ms), %.3lf\n", updateTime);
printf("wcc time (ms), %.3lf\n", wccTime);
#endif
printf("\nNumber Of Sccs : %d", numberOfSccs);
printf("\nTime (ms): %f\n", sccTime );
CUDA_SAFE_CALL( hipFree( d_Fc ));
CUDA_SAFE_CALL( hipFree( d_Fr ));
CUDA_SAFE_CALL( hipFree( d_Bc ));
CUDA_SAFE_CALL( hipFree( d_Br ));
CUDA_SAFE_CALL( hipFree( d_range));
CUDA_SAFE_CALL( hipFree( d_tags));
CUDA_SAFE_CALL( hipFree( d_pivots ));
CUDA_SAFE_CALL( hipFree( (void *)d_terminatef));
CUDA_SAFE_CALL( hipFree( (void *)d_terminateb));
hipEventDestroy(sccTimeStart);
hipEventDestroy(sccTimeStop);
#ifdef _DEBUG
hipEventDestroy(bTimeStart);
hipEventDestroy(bTimeStop);
hipEventDestroy(trim1TimeStart);
hipEventDestroy(trim1TimeStop);
hipEventDestroy(trim2TimeStart);
hipEventDestroy(trim2TimeStop);
hipEventDestroy(pivotTimeStart);
hipEventDestroy(pivotTimeStop);
hipEventDestroy(updateTimeStart);
hipEventDestroy(updateTimeStop);
hipEventDestroy(wccTimeStart);
hipEventDestroy(wccTimeStop);
#endif
return;
}
void detectSCC(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, uint32_t * Pr, bool t1, bool t2){
//Set the device which exclusively used by this program
hipSetDevice(7);
//printf("RSize %d\n", RSize);
float sccTime=0;
hipEvent_t sccTimeStart, sccTimeStop;
hipEventCreate(&sccTimeStart);
hipEventCreate(&sccTimeStop);
hipEventRecord(sccTimeStart, 0);
//-----------GPU initialization---------------------------->
uint32_t* d_Fr = NULL;
uint32_t* d_Br = NULL;
uint32_t* d_Fc = NULL;
uint32_t* d_Bc = NULL;
uint32_t* d_pivots = NULL;
uint32_t* d_Pr = NULL;
uint32_t** d_dpivots = NULL;
uint32_t* d_range = NULL;
uint8_t* d_tags = NULL;
uint8_t* tags = new uint8_t[RSize+1];
uint32_t* range = new uint32_t[RSize+1];
bool volatile* d_terminatef = NULL;
bool terminatef = false;
bool volatile* d_terminateb = NULL;
bool terminateb = false;
int FWD_iterations = 0;
int BWD_iterations = 0;
uint32_t iterations = 0;
int Trimm_iterations = 0;
const uint32_t max_pivot_count = RSize + 1;
uint32_t partitionCount = 10;
uint32_t *HostArray[partitionCount];
CUDA_SAFE_CALL(hipMalloc((void**)&d_dpivots, partitionCount * sizeof(uint32_t *)));
for(int i = 0; i < partitionCount; i++)
{
CUDA_SAFE_CALL(hipMalloc((void**)&HostArray[i], max_pivot_count * sizeof(uint32_t)));
CUDA_SAFE_CALL(hipMemset(HostArray[i], 0, max_pivot_count * sizeof(uint32_t)));
}
CUDA_SAFE_CALL(hipMemcpy(d_dpivots, HostArray, partitionCount * sizeof(uint32_t *), hipMemcpyHostToDevice));
hipError_t e1, e2, e3, e4, e5, e6, e7, e8, e9;
CUDA_SAFE_CALL( e1 = hipMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e2 = hipMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e3 = hipMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e4 = hipMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e5 = hipMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( e5 = hipMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t)));
CUDA_SAFE_CALL( e6 = hipMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e7 = hipMalloc( (void**) &d_terminatef, sizeof(bool) ));
CUDA_SAFE_CALL( e8 = hipMalloc( (void**) &d_terminateb, sizeof(bool) ));
CUDA_SAFE_CALL( e9 = hipMalloc( (void**) &d_Pr, (RSize + 2) * sizeof(uint32_t) ));
if (e1 == hipErrorMemoryAllocation || e2 == hipErrorMemoryAllocation ||
e3 == hipErrorMemoryAllocation || e4 == hipErrorMemoryAllocation ||
e5 == hipErrorMemoryAllocation || e6 == hipErrorMemoryAllocation ||
e7 == hipErrorMemoryAllocation || e8 == hipErrorMemoryAllocation ||
e9 == hipErrorMemoryAllocation) {
throw "Error: Not enough memory on GPU\n";
}
CUDA_SAFE_CALL( hipMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Pr, Pr, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( hipMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t)));
//for vertex-to-thread mapping
dim3 grid;
if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize / BLOCKSIZE));
grid.x = dim;
grid.y = dim;
grid.z = 1;
}else{
grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE;
grid.y = 1;
grid.z = 1;
}
dim3 threads(BLOCKSIZE, 1, 1);
#ifdef _DEBUG
float pivotTime = 0, temp = 0, bTime = 0, trim1Time = 0, trim2Time = 0, updateTime = 0, wccTime = 0;
hipEvent_t bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop;
hipEvent_t trim1TimeStart, trim1TimeStop, trim2TimeStart, trim2TimeStop, wccTimeStart, wccTimeStop;
hipEventCreate(&bTimeStart);
hipEventCreate(&bTimeStop);
hipEventCreate(&pivotTimeStart);
hipEventCreate(&pivotTimeStop);
hipEventCreate(&trim1TimeStart);
hipEventCreate(&trim1TimeStop);
hipEventCreate(&trim2TimeStart);
hipEventCreate(&trim2TimeStop);
hipEventCreate(&updateTimeStart);
hipEventCreate(&updateTimeStop);
hipEventCreate(&wccTimeStart);
hipEventCreate(&wccTimeStop);
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim2TimeStart, 0);
#endif
if(t2)
hipLaunchKernelGGL(( trim2), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize);
#ifdef _DEBUG
hipEventRecord(trim2TimeStop, 0);
hipEventSynchronize(trim2TimeStop);
hipEventElapsedTime(&temp, trim2TimeStart, trim2TimeStop);
trim2Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
bool *d_auxRange = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_auxRange, sizeof(bool) * (RSize + 1)));
CUDA_SAFE_CALL(hipMemset(d_auxRange, false, sizeof(bool) * (RSize + 1)));
//-----------Main algorithm-------------------------------->
while ( true ) {
iterations++;
//cout<<"\nIteration : "<<iterations<<endl;
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
hipEventRecord(pivotTimeStart, 0);
#endif
for(int i = 0; i < partitionCount; i++)
{
CUDA_SAFE_CALL(hipMemset(HostArray[i], 0, max_pivot_count * sizeof(uint32_t)));
}
CUDA_SAFE_CALL(hipMemcpy(d_dpivots, HostArray, partitionCount * sizeof(uint32_t *), hipMemcpyHostToDevice));
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( pollForPivotsLocal), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_dpivots, max_pivot_count, d_Fr, d_Br, d_Pr, d_terminatef, d_auxRange);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
hipLaunchKernelGGL(( selectPivotsLocal), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_dpivots, max_pivot_count, d_Pr, d_auxRange);
#ifdef _DEBUG
hipEventRecord(pivotTimeStop, 0);
hipEventSynchronize(pivotTimeStop);
hipEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwdLocal), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef, d_auxRange);
hipLaunchKernelGGL(( bwdLocal), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb, d_auxRange);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwdLocal), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef, d_auxRange);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( bwdLocal), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb, d_auxRange);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
#ifdef _DEBUG
hipEventRecord(bTimeStop, 0);
hipEventSynchronize(bTimeStop);
hipEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(updateTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( updateLocal), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminatef, d_auxRange);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
//printf("$$$$$%d\n", terminatef);
if (terminatef)
break; //only way out
#ifdef _DEBUG
hipEventRecord(updateTimeStop, 0);
hipEventSynchronize(updateTimeStop);
hipEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
}
CUDA_SAFE_CALL( hipMemcpy(range, d_range, sizeof(uint32_t) * (RSize + 1), hipMemcpyDeviceToHost ));
printf("LOCAL SCC's IDENTIFIED! NODES WITH SAME RANGE VALUES BELONG TO THE SAME SCC!!\n");
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
set<int> Fs[RSize + 1], Bs[RSize + 1];
// Compute forward reachability and backward reachability
for(int i = 1; i <= RSize; i++)
{
if(isRangeSet(tags[i]))
continue;
hipLaunchKernelGGL(( resetTag), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, i);
hipDeviceSynchronize();
//printf("Processing %d\n", i);
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwdRc), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
hipLaunchKernelGGL(( bwdRc), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwdRc), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( bwdRc), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
//printf("Processed %d\n", i);
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
for(int j = 1; j <= RSize; j++)
{
if(isForwardVisited(tags[j]))
{
Fs[i].insert(j);
//printf("Inserting %d in Fs of %d\n", j, i);
}
if(isBackwardVisited(tags[j]))
{
Bs[i].insert(j);
//printf("Inserting %d in Bs of %d\n", j, i);
}
}
//printf("Node %d, FsSize %d, BsSize %d\n", i, (int)Fs[i].size(), (int)Bs[i].size());
}
hipLaunchKernelGGL(( resetTag), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, RSize + 2);
printf("Fs AND Bs ARE POPULATED!!\n");
uint32_t *d_Rm = NULL;
CUDA_SAFE_CALL( hipMalloc((void **)&d_Rm, sizeof(uint32_t) * partitionCount));
uint32_t itr = 0;
printf("STARTING MERGE!\n");
//<----------Merging Phase----------------------------------
bool terminatebb = false;
bool volatile *d_terminatebb = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_terminatebb, sizeof(bool) ));
unsigned char * _devCount;
while(!terminatebb)
{
hipGetSymbolAddress((void **)&_devCount, devCount);
hipMemset(_devCount, 0, sizeof(int));
itr++;
printf("Iterations %d\n", itr);
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatebb, true, sizeof(bool) ));
bool *d_Occ = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_Occ, (RSize + 1) * sizeof(bool)));
CUDA_SAFE_CALL(hipMemset((void*)d_Occ, false, (RSize + 1) * sizeof(bool)));
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( computeInDegree), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_Pr, d_Br, d_Bc, d_Occ, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( computeOutDegree), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_Pr, d_Fr, d_Fc, d_Occ, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
CUDA_SAFE_CALL( (hipMemset((void *)d_Rm, 0, sizeof(uint32_t) * partitionCount)));
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( getMaxRange), dim3(grid), dim3(threads), 0, 0, d_range, d_Pr, d_Rm, RSize, d_tags, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
hipLaunchKernelGGL(( shiftRange), dim3(grid), dim3(threads), 0, 0, d_range, d_Pr, d_Rm, RSize, d_tags);
hipLaunchKernelGGL(( identifyTransEdges), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_Pr, d_Occ);
hipDeviceSynchronize(); //Required?
//printf("Identified Trans-edges!\n");
int dsize;
hipMemcpyFromSymbol(&dsize, devCount, sizeof(int));
if (dsize >= CSize)
{
printf("No space!\n");
}
vector<Ele> results(dsize);
//printf("dsize: %d\n", dsize);
hipMemcpyFromSymbol(&(results[0]), devData, dsize * sizeof(Ele));
/*for(int i = 0; i < dsize; i++)
printf("transedge[%d]: <%d, %d>\n", i, results[i].u1, results[i].u2);*/
// Trans-edges are present in results -> <u1, u2>
CUDA_SAFE_CALL( hipMemcpy(range, d_range, sizeof(uint32_t) * (RSize + 1), hipMemcpyDeviceToHost ));
vector<vector<int> > transSets;
for(int i = 0; i < dsize; i++)
for(int j = i + 1; j < dsize; j++)
{
vector<int> temp1(Bs[results[i].u1].size() + Fs[results[j].u2].size());
vector<int>::iterator it;
it = set_intersection(Bs[results[i].u1].begin(), Bs[results[i].u1].end(), Fs[results[j].u2].begin(), Fs[results[j].u2].end(), temp1.begin());
temp1.resize(it - temp1.begin());
vector<int> temp2(Bs[results[j].u1].size() + Fs[results[i].u2].size());
it = set_intersection(Bs[results[j].u1].begin(), Bs[results[j].u1].end(), Fs[results[i].u2].begin(), Fs[results[i].u2].end(), temp2.begin());
temp2.resize(it - temp2.begin());
/*printf("BS U1: ");
for(set<int>::iterator it = Bs[results[j].u1].begin(); it != Bs[results[j].u1].end(); it++)
printf("%d ", *it);
printf("\n");
printf("FS U2: ");
for(set<int>::iterator it = Fs[results[i].u2].begin(); it != Fs[results[i].u2].end(); it++)
printf("%d ", *it);
printf("\n");
printf("temp2: ");
for(int k = 0; k < temp2.size(); k++)
printf("%d ", temp2[k]);
printf("\n");*/
temp1.insert(temp1.end(), temp2.begin(), temp2.end());
if((int)temp1.size() > 0)
transSets.push_back(temp1);
}
bool ok = true;
int ssSize = (int)transSets.size();
/*for(int i = 0; i < ssSize; i++)
{
printf("TRANS SET: ");
for(int j = 0; j < (int)transSets[i].size(); j++)
printf("%d ", transSets[i][j]);
printf("\n");
}*/
do
{
ok = true;
for(int i = 0; i < ssSize; i++)
{
uint32_t mxRange = 0;
for(int k = 0; k < (int)transSets[i].size(); k++)
{
if(mxRange < range[transSets[i][k]])
mxRange = range[transSets[i][k]];
}
for(int k = 0; k < (int)transSets[i].size(); k++)
{
if(range[transSets[i][k]] != mxRange)
{
range[transSets[i][k]] = mxRange;
ok = false;
}
}
}
}while(!ok);
CUDA_SAFE_CALL( hipMemcpy(d_range, range, sizeof(uint32_t) * (RSize + 1), hipMemcpyHostToDevice ));
hipLaunchKernelGGL(( updatePr), dim3(grid), dim3(threads), 0, 0, d_Pr, RSize, d_terminatebb, d_tags);
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
for(int i = 1; i <= RSize; i++)
{
if(isRangeSet(tags[i]))
continue;
hipLaunchKernelGGL(( resetTag), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, i);
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwdRc), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
hipLaunchKernelGGL(( bwdRc), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwdRc), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( bwdRc), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
for(int j = 1; j <= RSize; j++)
{
if(isForwardVisited(tags[j]))
{
Fs[i].insert(j);
//printf("Inserting %d in Fs of %d\n", j, i);
}
if(isBackwardVisited(tags[j]))
{
Bs[i].insert(j);
//printf("Inserting %d in Bs of %d\n", j, i);
}
}
}
CUDA_SAFE_CALL( hipMemcpy( &terminatebb, (const void *)d_terminatebb, sizeof(bool), hipMemcpyDeviceToHost ));
//printf("terminatebb: %d\n", terminatebb);
}
printf("MERGING DONE! ^_^\n");
//<----------Main algorithm---------------------------------
//SCC extraction
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy(range, d_range, sizeof(uint32_t) * (RSize + 1), hipMemcpyDeviceToHost ));
uint32_t numberOf1Sccs = 0;
uint32_t numberOf2Sccs = 0;
uint32_t numberOfPivotSccs = 0;
uint32_t numberOfSccs = 0;
map<uint32_t, bool> seen;
for(uint32_t i=1;i<=RSize;i++)
if(isTrim1(tags[i]))
{
numberOf1Sccs++;
//printf("TRIM1: %d\n", i);
}
else if(isTrim2(tags[i]))
numberOf2Sccs++;
else if(seen.find(range[i]) == seen.end())
{
numberOfPivotSccs++;
seen[range[i]] = true;
//printf("RANGE of %d: %d\n", range[i], i);
}
//printf("NumberOf1SccsS: %d\n", numberOf1Sccs);
numberOfSccs = numberOf1Sccs + numberOf2Sccs + numberOfPivotSccs;
hipEventRecord(sccTimeStop, 0);
hipEventSynchronize(sccTimeStop);
hipEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop);
//printf(", %u, %d, %d, %d", iterations, FWD_iterations , BWD_iterations, Trimm_iterations);
#ifdef _DEBUG
printf(", %f", bTime);
printf(", %f", trim1Time);
printf(", %f", trim2Time);
printf(", %f", pivotTime);
printf(", %f", updateTime);
printf(", %f", wccTime);
#endif
printf("\nNumber Of Sccs : %d", numberOfSccs);
printf("\nTime : %f\n", sccTime );
CUDA_SAFE_CALL( hipFree( d_Fc ));
CUDA_SAFE_CALL( hipFree( d_Fr ));
CUDA_SAFE_CALL( hipFree( d_Bc ));
CUDA_SAFE_CALL( hipFree( d_Br ));
CUDA_SAFE_CALL( hipFree( d_range));
CUDA_SAFE_CALL( hipFree( d_tags));
CUDA_SAFE_CALL( hipFree( d_pivots ));
CUDA_SAFE_CALL( hipFree( d_auxRange));
CUDA_SAFE_CALL( hipFree( d_Rm));
CUDA_SAFE_CALL( hipFree( (void *)d_terminatef));
CUDA_SAFE_CALL( hipFree( (void *)d_terminateb));
CUDA_SAFE_CALL( hipFree( (void *)d_terminatebb));
for(int i = 0; i < partitionCount; i++)
{
CUDA_SAFE_CALL(hipFree(HostArray[i]));
}
CUDA_SAFE_CALL(hipFree(d_dpivots));
hipEventDestroy(sccTimeStart);
hipEventDestroy(sccTimeStop);
#ifdef _DEBUG
hipEventDestroy(bTimeStart);
hipEventDestroy(bTimeStop);
hipEventDestroy(trim1TimeStart);
hipEventDestroy(trim1TimeStop);
hipEventDestroy(trim2TimeStart);
hipEventDestroy(trim2TimeStop);
hipEventDestroy(pivotTimeStart);
hipEventDestroy(pivotTimeStop);
hipEventDestroy(updateTimeStart);
hipEventDestroy(updateTimeStop);
hipEventDestroy(wccTimeStart);
hipEventDestroy(wccTimeStop);
#endif
return;
}
| 7b69b032277bcc5ef174c7093556f654b43724c2.cu | #include "scc.h"
#include<map>
#include<set>
#include<algorithm>
#include<vector>
#include "scc_kernels.h"
using namespace std;
typedef struct {
uint32_t u1, u2;
} Ele;
__device__ Ele devData[10001];
__device__ int devCount = 0;
__device__ int VecPushBack(Ele &e) {
int insertPt = atomicAdd(&devCount, 1);
if (insertPt < 10001){
devData[insertPt] = e;
return insertPt;
}
else return -1;
}
__global__ void identifyTransEdges(const uint32_t *Fc, const uint32_t *Fr, uint32_t *range, const uint8_t *tags, const uint32_t num_rows, uint32_t *Pr, bool *Occ){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]) || Occ[row])
return;
uint32_t myPr = Pr[row];
uint32_t cnt = Fr[row + 1] - Fr[row];
const uint32_t *nbrs = &Fc[Fr[row]];
uint32_t frpr;
if(myPr & 1)
frpr = myPr - 1;
else
frpr = myPr + 1;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
if(!isRangeSet(tags[index]) && Pr[index] == frpr && !Occ[index]){
//printf("ROW: %d, INDEX: %d\n", row, index);
Ele e = { .u1 = row, .u2 = index };
VecPushBack(e);
}
}
}
void wHong(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, bool t1, bool t2, int warpSize){
//Set the device which exclusively used by this program
cudaSetDevice(7);
float sccTime=0;
cudaEvent_t sccTimeStart, sccTimeStop;
cudaEventCreate(&sccTimeStart);
cudaEventCreate(&sccTimeStop);
cudaEventRecord(sccTimeStart, 0);
//-----------GPU initialization---------------------------->
uint32_t* d_Fr = NULL;
uint32_t* d_Br = NULL;
uint32_t* d_Fc = NULL;
uint32_t* d_Bc = NULL;
uint32_t* d_pivots = NULL;
uint32_t* d_range = NULL;
uint8_t* d_tags = NULL;
uint8_t* tags = new uint8_t[RSize+1];
bool volatile* d_terminatef = NULL;
bool terminatef = false;
bool volatile* d_terminateb = NULL;
bool terminateb = false;
int FWD_iterations = 0;
int BWD_iterations = 0;
uint32_t iterations = 0;
int Trimm_iterations = 0;
const uint32_t max_pivot_count = RSize;
cudaError_t e1, e2, e3, e4, e5, e6, e7, e8, e9;
CUDA_SAFE_CALL( e1 = cudaMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e2 = cudaMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e3 = cudaMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e4 = cudaMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e5 = cudaMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( e6 = cudaMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t)));
CUDA_SAFE_CALL( e7 = cudaMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e8 = cudaMalloc( (void**) &d_terminatef, sizeof(bool) ));
CUDA_SAFE_CALL( e9 = cudaMalloc( (void**) &d_terminateb, sizeof(bool) ));
if (e1 == cudaErrorMemoryAllocation || e2 == cudaErrorMemoryAllocation ||
e3 == cudaErrorMemoryAllocation || e4 == cudaErrorMemoryAllocation ||
e5 == cudaErrorMemoryAllocation || e6 == cudaErrorMemoryAllocation ||
e7 == cudaErrorMemoryAllocation || e8 == cudaErrorMemoryAllocation || e9 == cudaErrorMemoryAllocation) {
throw "Error: Not enough memory on GPU\n";
}
CUDA_SAFE_CALL( cudaMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( cudaMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t)));
dim3 gridfb;
if((RSize * warpSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize * warpSize / BLOCKSIZE));
gridfb.x = dim;
gridfb.y = dim;
gridfb.z = 1;
}else{
gridfb.x = (RSize * warpSize + BLOCKSIZE - 1)/BLOCKSIZE;
gridfb.y = 1;
gridfb.z = 1;
}
//for vertex-to-thread mapping
dim3 grid;
if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize / BLOCKSIZE));
grid.x = dim;
grid.y = dim;
grid.z = 1;
}else{
grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE;
grid.y = 1;
grid.z = 1;
}
dim3 threads(BLOCKSIZE, 1, 1);
#ifdef _DEBUG
float pivotTime = 0, temp = 0, bTime = 0, trim1Time = 0, trim2Time = 0, updateTime = 0, wccTime = 0;
cudaEvent_t bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop;
cudaEvent_t trim1TimeStart, trim1TimeStop, trim2TimeStart, trim2TimeStop, wccTimeStart, wccTimeStop;
cudaEventCreate(&bTimeStart);
cudaEventCreate(&bTimeStop);
cudaEventCreate(&pivotTimeStart);
cudaEventCreate(&pivotTimeStop);
cudaEventCreate(&trim1TimeStart);
cudaEventCreate(&trim1TimeStop);
cudaEventCreate(&trim2TimeStart);
cudaEventCreate(&trim2TimeStop);
cudaEventCreate(&updateTimeStart);
cudaEventCreate(&updateTimeStop);
cudaEventCreate(&wccTimeStart);
cudaEventCreate(&wccTimeStop);
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
cudaEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( d_pivots, 0, sizeof(uint32_t) ));
pollForFirstPivot<<<grid, threads>>>( d_tags, RSize, d_pivots, d_Fr, d_Br);
selectFirstPivot<<<grid, threads>>>( d_tags, RSize, d_pivots);
#ifdef _DEBUG
cudaEventRecord(pivotTimeStop, 0);
cudaEventSynchronize(pivotTimeStop);
cudaEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
switch(warpSize){
case 1:
fwd_warp<1><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd_warp<1><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 2:
fwd_warp<2><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd_warp<2><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 4:
fwd_warp<4><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd_warp<4><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 8:
fwd_warp<8><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd_warp<8><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 16:
fwd_warp<16><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd_warp<16><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 32:
fwd_warp<32><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd_warp<32><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
}
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
switch(warpSize){
case 1:
fwd_warp<1><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 2:
fwd_warp<2><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 4:
fwd_warp<4><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 8:
fwd_warp<8><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 16:
fwd_warp<16><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 32:
fwd_warp<32><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
}
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
switch(warpSize){
case 1:
bwd_warp<1><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 2:
bwd_warp<2><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 4:
bwd_warp<4><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 8:
bwd_warp<8><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 16:
bwd_warp<16><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 32:
bwd_warp<32><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
}
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
#ifdef _DEBUG
cudaEventRecord(bTimeStop, 0);
cudaEventSynchronize(bTimeStop);
cudaEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(updateTimeStart, 0);
#endif
update<<<grid, threads>>>(d_range, d_tags, RSize, d_terminatef);
#ifdef _DEBUG
cudaEventRecord(updateTimeStop, 0);
cudaEventSynchronize(updateTimeStop);
cudaEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim2TimeStart, 0);
#endif
if(t2)
trim2<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize);
#ifdef _DEBUG
cudaEventRecord(trim2TimeStop, 0);
cudaEventSynchronize(trim2TimeStop);
cudaEventElapsedTime(&temp, trim2TimeStart, trim2TimeStop);
trim2Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(wccTimeStart, 0);
#endif
//Now WCC decomposition
assignUniqueRange<<<grid, threads>>>(d_range, d_tags, RSize);
do{
CUDA_SAFE_CALL( cudaMemset((void *)d_terminatef, true, sizeof(bool) ));
propagateRange1<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
propagateRange2<<<grid, threads>>>( d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef || !terminateb);
#ifdef _DEBUG
cudaEventRecord(wccTimeStop, 0);
cudaEventSynchronize(wccTimeStop);
cudaEventElapsedTime(&temp, wccTimeStart, wccTimeStop);
wccTime+=temp;
#endif
//-----------Main algorithm-------------------------------->
while ( true ) {
iterations++;
//cout<<"\nIteration : "<<iterations<<endl;
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
cudaEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( d_pivots, 0, max_pivot_count * sizeof(uint32_t) ));
pollForPivots<<<grid, threads>>>( d_range, d_tags, RSize, d_pivots, max_pivot_count, d_Fr, d_Br);
selectPivots<<<grid, threads>>>( d_range, d_tags, RSize, d_pivots, max_pivot_count);
#ifdef _DEBUG
cudaEventRecord(pivotTimeStop, 0);
cudaEventSynchronize(pivotTimeStop);
cudaEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
#ifdef _DEBUG
cudaEventRecord(bTimeStop, 0);
cudaEventSynchronize(bTimeStop);
cudaEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(updateTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
update<<<grid, threads>>>(d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
if (terminatef)
break; //only way out
#ifdef _DEBUG
cudaEventRecord(updateTimeStop, 0);
cudaEventSynchronize(updateTimeStop);
cudaEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
}
//<----------Main algorithm---------------------------------
//SCC extraction
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
uint32_t numberOf1Sccs = 0;
uint32_t numberOf2Sccs = 0;
uint32_t numberOfPivotSccs = 0;
uint32_t numberOfSccs = 0;
for(uint32_t i=1;i<=RSize;i++)
if(isTrim1(tags[i]))
numberOf1Sccs++;
else if(isTrim2(tags[i]))
numberOf2Sccs++;
else if(isPivot(tags[i]))
numberOfPivotSccs++;
numberOfSccs = numberOf1Sccs + numberOf2Sccs + numberOfPivotSccs;
cudaEventRecord(sccTimeStop, 0);
cudaEventSynchronize(sccTimeStop);
cudaEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop);
//printf(", %u, %d, %d, %d", iterations, FWD_iterations , BWD_iterations, Trimm_iterations);
#ifdef _DEBUG
printf("bfs time (ms), %.3lf\n", bTime);
printf("trim_1 time (ms), %.3lf\n", trim1Time);
printf("trim_2 time (ms), %.3lf\n", trim2Time);
printf("pivot time (ms), %.3lf\n", pivotTime);
printf("update time (ms), %.3lf\n", updateTime);
printf("wcc time (ms), %.3lf\n", wccTime);
//printf(", %f", bTime);
//printf(", %f", trim1Time);
//printf(", %f", trim2Time);
//printf(", %f", pivotTime);
//printf(", %f", updateTime);
//printf(", %f", wccTime);
#endif
printf("\nNumber Of Sccs : %d", numberOfSccs);
printf("\nTime (ms): %.3lf\n", sccTime );
CUDA_SAFE_CALL( cudaFree( d_Fc ));
CUDA_SAFE_CALL( cudaFree( d_Fr ));
CUDA_SAFE_CALL( cudaFree( d_Bc ));
CUDA_SAFE_CALL( cudaFree( d_Br ));
CUDA_SAFE_CALL( cudaFree( d_range));
CUDA_SAFE_CALL( cudaFree( d_tags));
CUDA_SAFE_CALL( cudaFree( d_pivots ));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminatef));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminateb));
cudaEventDestroy(sccTimeStart);
cudaEventDestroy(sccTimeStop);
#ifdef _DEBUG
cudaEventDestroy(bTimeStart);
cudaEventDestroy(bTimeStop);
cudaEventDestroy(trim1TimeStart);
cudaEventDestroy(trim1TimeStop);
cudaEventDestroy(trim2TimeStart);
cudaEventDestroy(trim2TimeStop);
cudaEventDestroy(pivotTimeStart);
cudaEventDestroy(pivotTimeStop);
cudaEventDestroy(updateTimeStart);
cudaEventDestroy(updateTimeStop);
cudaEventDestroy(wccTimeStart);
cudaEventDestroy(wccTimeStop);
#endif
return;
}
void vHong(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, bool t1, bool t2){
//Set the device which exclusively used by this program
cudaSetDevice(7);
float sccTime=0;
cudaEvent_t sccTimeStart, sccTimeStop;
cudaEventCreate(&sccTimeStart);
cudaEventCreate(&sccTimeStop);
cudaEventRecord(sccTimeStart, 0);
//-----------GPU initialization---------------------------->
uint32_t* d_Fr = NULL;
uint32_t* d_Br = NULL;
uint32_t* d_Fc = NULL;
uint32_t* d_Bc = NULL;
uint32_t* d_pivots = NULL;
uint32_t* d_range = NULL;
uint8_t* d_tags = NULL;
uint8_t* tags = new uint8_t[RSize+1];
bool volatile* d_terminatef = NULL;
bool terminatef = false;
bool volatile* d_terminateb = NULL;
bool terminateb = false;
int FWD_iterations = 0;
int BWD_iterations = 0;
uint32_t iterations = 0;
int Trimm_iterations = 0;
const uint32_t max_pivot_count = RSize;
cudaError_t e1, e2, e3, e4, e5, e6, e7, e8;
CUDA_SAFE_CALL( e1 = cudaMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e2 = cudaMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e3 = cudaMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e4 = cudaMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e5 = cudaMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( e5 = cudaMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t)));
CUDA_SAFE_CALL( e6 = cudaMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e7 = cudaMalloc( (void**) &d_terminatef, sizeof(bool) ));
CUDA_SAFE_CALL( e8 = cudaMalloc( (void**) &d_terminateb, sizeof(bool) ));
if (e1 == cudaErrorMemoryAllocation || e2 == cudaErrorMemoryAllocation ||
e3 == cudaErrorMemoryAllocation || e4 == cudaErrorMemoryAllocation ||
e5 == cudaErrorMemoryAllocation || e6 == cudaErrorMemoryAllocation ||
e7 == cudaErrorMemoryAllocation || e8 == cudaErrorMemoryAllocation ) {
throw "Error: Not enough memory on GPU\n";
}
CUDA_SAFE_CALL( cudaMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( cudaMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t)));
//for vertex-to-thread mapping
dim3 grid;
if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize / BLOCKSIZE));
grid.x = dim;
grid.y = dim;
grid.z = 1;
}else{
grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE;
grid.y = 1;
grid.z = 1;
}
dim3 threads(BLOCKSIZE, 1, 1);
#ifdef _DEBUG
float pivotTime = 0, temp = 0, bTime = 0, trim1Time = 0, trim2Time = 0, updateTime = 0, wccTime = 0;
cudaEvent_t bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop;
cudaEvent_t trim1TimeStart, trim1TimeStop, trim2TimeStart, trim2TimeStop, wccTimeStart, wccTimeStop;
cudaEventCreate(&bTimeStart);
cudaEventCreate(&bTimeStop);
cudaEventCreate(&pivotTimeStart);
cudaEventCreate(&pivotTimeStop);
cudaEventCreate(&trim1TimeStart);
cudaEventCreate(&trim1TimeStop);
cudaEventCreate(&trim2TimeStart);
cudaEventCreate(&trim2TimeStop);
cudaEventCreate(&updateTimeStart);
cudaEventCreate(&updateTimeStop);
cudaEventCreate(&wccTimeStart);
cudaEventCreate(&wccTimeStop);
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
cudaEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( d_pivots, 0, sizeof(uint32_t) ));
pollForFirstPivot<<<grid, threads>>>( d_tags, RSize, d_pivots, d_Fr, d_Br);
selectFirstPivot<<<grid, threads>>>( d_tags, RSize, d_pivots);
#ifdef _DEBUG
cudaEventRecord(pivotTimeStop, 0);
cudaEventSynchronize(pivotTimeStop);
cudaEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
#ifdef _DEBUG
cudaEventRecord(bTimeStop, 0);
cudaEventSynchronize(bTimeStop);
cudaEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(updateTimeStart, 0);
#endif
update<<<grid, threads>>>(d_range, d_tags, RSize, d_terminatef);
#ifdef _DEBUG
cudaEventRecord(updateTimeStop, 0);
cudaEventSynchronize(updateTimeStop);
cudaEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim2TimeStart, 0);
#endif
if(t2)
trim2<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize);
#ifdef _DEBUG
cudaEventRecord(trim2TimeStop, 0);
cudaEventSynchronize(trim2TimeStop);
cudaEventElapsedTime(&temp, trim2TimeStart, trim2TimeStop);
trim2Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(wccTimeStart, 0);
#endif
//Now WCC decomposition
assignUniqueRange<<<grid, threads>>>(d_range, d_tags, RSize);
do{
CUDA_SAFE_CALL( cudaMemset((void *)d_terminatef, true, sizeof(bool) ));
propagateRange1<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
propagateRange2<<<grid, threads>>>( d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef || !terminateb);
#ifdef _DEBUG
cudaEventRecord(wccTimeStop, 0);
cudaEventSynchronize(wccTimeStop);
cudaEventElapsedTime(&temp, wccTimeStart, wccTimeStop);
wccTime+=temp;
#endif
//-----------Main algorithm-------------------------------->
while ( true ) {
iterations++;
//cout<<"\nIteration : "<<iterations<<endl;
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
cudaEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( d_pivots, 0, max_pivot_count * sizeof(uint32_t) ));
pollForPivots<<<grid, threads>>>( d_range, d_tags, RSize, d_pivots, max_pivot_count, d_Fr, d_Br);
selectPivots<<<grid, threads>>>( d_range, d_tags, RSize, d_pivots, max_pivot_count);
#ifdef _DEBUG
cudaEventRecord(pivotTimeStop, 0);
cudaEventSynchronize(pivotTimeStop);
cudaEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
#ifdef _DEBUG
cudaEventRecord(bTimeStop, 0);
cudaEventSynchronize(bTimeStop);
cudaEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(updateTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
update<<<grid, threads>>>(d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
if (terminatef)
break; //only way out
#ifdef _DEBUG
cudaEventRecord(updateTimeStop, 0);
cudaEventSynchronize(updateTimeStop);
cudaEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
}
//<----------Main algorithm---------------------------------
//SCC extraction
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
uint32_t numberOf1Sccs = 0;
uint32_t numberOf2Sccs = 0;
uint32_t numberOfPivotSccs = 0;
uint32_t numberOfSccs = 0;
for(uint32_t i=1;i<=RSize;i++)
if(isTrim1(tags[i]))
numberOf1Sccs++;
else if(isTrim2(tags[i]))
numberOf2Sccs++;
else if(isPivot(tags[i]))
numberOfPivotSccs++;
printf("numberOf1Sccs: %d\n", numberOf1Sccs);
numberOfSccs = numberOf1Sccs + numberOf2Sccs + numberOfPivotSccs;
cudaEventRecord(sccTimeStop, 0);
cudaEventSynchronize(sccTimeStop);
cudaEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop);
//printf(", %u, %d, %d, %d", iterations, FWD_iterations , BWD_iterations, Trimm_iterations);
#ifdef _DEBUG
printf("bfs time (ms), %.3lf\n", bTime);
printf("trim_1 time (ms), %.3lf\n", trim1Time);
printf("trim_2 time (ms), %.3lf\n", trim2Time);
printf("pivot time (ms), %.3lf\n", pivotTime);
printf("update time (ms), %.3lf\n", updateTime);
printf("wcc time (ms), %.3lf\n", wccTime);
#endif
printf("\nNumber Of Sccs : %d", numberOfSccs);
printf("\nTime (ms): %f\n", sccTime );
CUDA_SAFE_CALL( cudaFree( d_Fc ));
CUDA_SAFE_CALL( cudaFree( d_Fr ));
CUDA_SAFE_CALL( cudaFree( d_Bc ));
CUDA_SAFE_CALL( cudaFree( d_Br ));
CUDA_SAFE_CALL( cudaFree( d_range));
CUDA_SAFE_CALL( cudaFree( d_tags));
CUDA_SAFE_CALL( cudaFree( d_pivots ));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminatef));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminateb));
cudaEventDestroy(sccTimeStart);
cudaEventDestroy(sccTimeStop);
#ifdef _DEBUG
cudaEventDestroy(bTimeStart);
cudaEventDestroy(bTimeStop);
cudaEventDestroy(trim1TimeStart);
cudaEventDestroy(trim1TimeStop);
cudaEventDestroy(trim2TimeStart);
cudaEventDestroy(trim2TimeStop);
cudaEventDestroy(pivotTimeStart);
cudaEventDestroy(pivotTimeStop);
cudaEventDestroy(updateTimeStart);
cudaEventDestroy(updateTimeStop);
cudaEventDestroy(wccTimeStart);
cudaEventDestroy(wccTimeStop);
#endif
return;
}
void detectSCC(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, uint32_t * Pr, bool t1, bool t2){
//Set the device which exclusively used by this program
cudaSetDevice(7);
//printf("RSize %d\n", RSize);
float sccTime=0;
cudaEvent_t sccTimeStart, sccTimeStop;
cudaEventCreate(&sccTimeStart);
cudaEventCreate(&sccTimeStop);
cudaEventRecord(sccTimeStart, 0);
//-----------GPU initialization---------------------------->
uint32_t* d_Fr = NULL;
uint32_t* d_Br = NULL;
uint32_t* d_Fc = NULL;
uint32_t* d_Bc = NULL;
uint32_t* d_pivots = NULL;
uint32_t* d_Pr = NULL;
uint32_t** d_dpivots = NULL;
uint32_t* d_range = NULL;
uint8_t* d_tags = NULL;
uint8_t* tags = new uint8_t[RSize+1];
uint32_t* range = new uint32_t[RSize+1];
bool volatile* d_terminatef = NULL;
bool terminatef = false;
bool volatile* d_terminateb = NULL;
bool terminateb = false;
int FWD_iterations = 0;
int BWD_iterations = 0;
uint32_t iterations = 0;
int Trimm_iterations = 0;
const uint32_t max_pivot_count = RSize + 1;
uint32_t partitionCount = 10;
uint32_t *HostArray[partitionCount];
CUDA_SAFE_CALL(cudaMalloc((void**)&d_dpivots, partitionCount * sizeof(uint32_t *)));
for(int i = 0; i < partitionCount; i++)
{
CUDA_SAFE_CALL(cudaMalloc((void**)&HostArray[i], max_pivot_count * sizeof(uint32_t)));
CUDA_SAFE_CALL(cudaMemset(HostArray[i], 0, max_pivot_count * sizeof(uint32_t)));
}
CUDA_SAFE_CALL(cudaMemcpy(d_dpivots, HostArray, partitionCount * sizeof(uint32_t *), cudaMemcpyHostToDevice));
cudaError_t e1, e2, e3, e4, e5, e6, e7, e8, e9;
CUDA_SAFE_CALL( e1 = cudaMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e2 = cudaMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e3 = cudaMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e4 = cudaMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e5 = cudaMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( e5 = cudaMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t)));
CUDA_SAFE_CALL( e6 = cudaMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e7 = cudaMalloc( (void**) &d_terminatef, sizeof(bool) ));
CUDA_SAFE_CALL( e8 = cudaMalloc( (void**) &d_terminateb, sizeof(bool) ));
CUDA_SAFE_CALL( e9 = cudaMalloc( (void**) &d_Pr, (RSize + 2) * sizeof(uint32_t) ));
if (e1 == cudaErrorMemoryAllocation || e2 == cudaErrorMemoryAllocation ||
e3 == cudaErrorMemoryAllocation || e4 == cudaErrorMemoryAllocation ||
e5 == cudaErrorMemoryAllocation || e6 == cudaErrorMemoryAllocation ||
e7 == cudaErrorMemoryAllocation || e8 == cudaErrorMemoryAllocation ||
e9 == cudaErrorMemoryAllocation) {
throw "Error: Not enough memory on GPU\n";
}
CUDA_SAFE_CALL( cudaMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Pr, Pr, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( cudaMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t)));
//for vertex-to-thread mapping
dim3 grid;
if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize / BLOCKSIZE));
grid.x = dim;
grid.y = dim;
grid.z = 1;
}else{
grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE;
grid.y = 1;
grid.z = 1;
}
dim3 threads(BLOCKSIZE, 1, 1);
#ifdef _DEBUG
float pivotTime = 0, temp = 0, bTime = 0, trim1Time = 0, trim2Time = 0, updateTime = 0, wccTime = 0;
cudaEvent_t bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop;
cudaEvent_t trim1TimeStart, trim1TimeStop, trim2TimeStart, trim2TimeStop, wccTimeStart, wccTimeStop;
cudaEventCreate(&bTimeStart);
cudaEventCreate(&bTimeStop);
cudaEventCreate(&pivotTimeStart);
cudaEventCreate(&pivotTimeStop);
cudaEventCreate(&trim1TimeStart);
cudaEventCreate(&trim1TimeStop);
cudaEventCreate(&trim2TimeStart);
cudaEventCreate(&trim2TimeStop);
cudaEventCreate(&updateTimeStart);
cudaEventCreate(&updateTimeStop);
cudaEventCreate(&wccTimeStart);
cudaEventCreate(&wccTimeStop);
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim2TimeStart, 0);
#endif
if(t2)
trim2<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize);
#ifdef _DEBUG
cudaEventRecord(trim2TimeStop, 0);
cudaEventSynchronize(trim2TimeStop);
cudaEventElapsedTime(&temp, trim2TimeStart, trim2TimeStop);
trim2Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
bool *d_auxRange = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_auxRange, sizeof(bool) * (RSize + 1)));
CUDA_SAFE_CALL(cudaMemset(d_auxRange, false, sizeof(bool) * (RSize + 1)));
//-----------Main algorithm-------------------------------->
while ( true ) {
iterations++;
//cout<<"\nIteration : "<<iterations<<endl;
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
cudaEventRecord(pivotTimeStart, 0);
#endif
for(int i = 0; i < partitionCount; i++)
{
CUDA_SAFE_CALL(cudaMemset(HostArray[i], 0, max_pivot_count * sizeof(uint32_t)));
}
CUDA_SAFE_CALL(cudaMemcpy(d_dpivots, HostArray, partitionCount * sizeof(uint32_t *), cudaMemcpyHostToDevice));
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
pollForPivotsLocal<<<grid, threads>>>( d_range, d_tags, RSize, d_dpivots, max_pivot_count, d_Fr, d_Br, d_Pr, d_terminatef, d_auxRange);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
selectPivotsLocal<<<grid, threads>>>( d_range, d_tags, RSize, d_dpivots, max_pivot_count, d_Pr, d_auxRange);
#ifdef _DEBUG
cudaEventRecord(pivotTimeStop, 0);
cudaEventSynchronize(pivotTimeStop);
cudaEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
fwdLocal<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef, d_auxRange);
bwdLocal<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb, d_auxRange);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
fwdLocal<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef, d_auxRange);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
bwdLocal<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb, d_auxRange);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
#ifdef _DEBUG
cudaEventRecord(bTimeStop, 0);
cudaEventSynchronize(bTimeStop);
cudaEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(updateTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
updateLocal<<<grid, threads>>>(d_range, d_tags, RSize, d_terminatef, d_auxRange);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
//printf("$$$$$%d\n", terminatef);
if (terminatef)
break; //only way out
#ifdef _DEBUG
cudaEventRecord(updateTimeStop, 0);
cudaEventSynchronize(updateTimeStop);
cudaEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
}
CUDA_SAFE_CALL( cudaMemcpy(range, d_range, sizeof(uint32_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
printf("LOCAL SCC's IDENTIFIED! NODES WITH SAME RANGE VALUES BELONG TO THE SAME SCC!!\n");
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
set<int> Fs[RSize + 1], Bs[RSize + 1];
// Compute forward reachability and backward reachability
for(int i = 1; i <= RSize; i++)
{
if(isRangeSet(tags[i]))
continue;
resetTag<<<grid, threads>>>(d_range, d_tags, RSize, i);
cudaDeviceSynchronize();
//printf("Processing %d\n", i);
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
fwdRc<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
bwdRc<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
fwdRc<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
bwdRc<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
//printf("Processed %d\n", i);
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
for(int j = 1; j <= RSize; j++)
{
if(isForwardVisited(tags[j]))
{
Fs[i].insert(j);
//printf("Inserting %d in Fs of %d\n", j, i);
}
if(isBackwardVisited(tags[j]))
{
Bs[i].insert(j);
//printf("Inserting %d in Bs of %d\n", j, i);
}
}
//printf("Node %d, FsSize %d, BsSize %d\n", i, (int)Fs[i].size(), (int)Bs[i].size());
}
resetTag<<<grid, threads>>>(d_range, d_tags, RSize, RSize + 2);
printf("Fs AND Bs ARE POPULATED!!\n");
uint32_t *d_Rm = NULL;
CUDA_SAFE_CALL( cudaMalloc((void **)&d_Rm, sizeof(uint32_t) * partitionCount));
uint32_t itr = 0;
printf("STARTING MERGE!\n");
//<----------Merging Phase----------------------------------
bool terminatebb = false;
bool volatile *d_terminatebb = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_terminatebb, sizeof(bool) ));
unsigned char * _devCount;
while(!terminatebb)
{
cudaGetSymbolAddress((void **)&_devCount, devCount);
cudaMemset(_devCount, 0, sizeof(int));
itr++;
printf("Iterations %d\n", itr);
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatebb, true, sizeof(bool) ));
bool *d_Occ = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_Occ, (RSize + 1) * sizeof(bool)));
CUDA_SAFE_CALL(cudaMemset((void*)d_Occ, false, (RSize + 1) * sizeof(bool)));
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
computeInDegree<<<grid, threads>>>(d_tags, RSize, d_Pr, d_Br, d_Bc, d_Occ, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
computeOutDegree<<<grid, threads>>>(d_tags, RSize, d_Pr, d_Fr, d_Fc, d_Occ, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
CUDA_SAFE_CALL( (cudaMemset((void *)d_Rm, 0, sizeof(uint32_t) * partitionCount)));
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
getMaxRange<<<grid, threads>>>(d_range, d_Pr, d_Rm, RSize, d_tags, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
shiftRange<<<grid, threads>>>(d_range, d_Pr, d_Rm, RSize, d_tags);
identifyTransEdges<<<grid, threads>>>(d_Fc, d_Fr, d_range, d_tags, RSize, d_Pr, d_Occ);
cudaDeviceSynchronize(); //Required?
//printf("Identified Trans-edges!\n");
int dsize;
cudaMemcpyFromSymbol(&dsize, devCount, sizeof(int));
if (dsize >= CSize)
{
printf("No space!\n");
}
vector<Ele> results(dsize);
//printf("dsize: %d\n", dsize);
cudaMemcpyFromSymbol(&(results[0]), devData, dsize * sizeof(Ele));
/*for(int i = 0; i < dsize; i++)
printf("transedge[%d]: <%d, %d>\n", i, results[i].u1, results[i].u2);*/
// Trans-edges are present in results -> <u1, u2>
CUDA_SAFE_CALL( cudaMemcpy(range, d_range, sizeof(uint32_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
vector<vector<int> > transSets;
for(int i = 0; i < dsize; i++)
for(int j = i + 1; j < dsize; j++)
{
vector<int> temp1(Bs[results[i].u1].size() + Fs[results[j].u2].size());
vector<int>::iterator it;
it = set_intersection(Bs[results[i].u1].begin(), Bs[results[i].u1].end(), Fs[results[j].u2].begin(), Fs[results[j].u2].end(), temp1.begin());
temp1.resize(it - temp1.begin());
vector<int> temp2(Bs[results[j].u1].size() + Fs[results[i].u2].size());
it = set_intersection(Bs[results[j].u1].begin(), Bs[results[j].u1].end(), Fs[results[i].u2].begin(), Fs[results[i].u2].end(), temp2.begin());
temp2.resize(it - temp2.begin());
/*printf("BS U1: ");
for(set<int>::iterator it = Bs[results[j].u1].begin(); it != Bs[results[j].u1].end(); it++)
printf("%d ", *it);
printf("\n");
printf("FS U2: ");
for(set<int>::iterator it = Fs[results[i].u2].begin(); it != Fs[results[i].u2].end(); it++)
printf("%d ", *it);
printf("\n");
printf("temp2: ");
for(int k = 0; k < temp2.size(); k++)
printf("%d ", temp2[k]);
printf("\n");*/
temp1.insert(temp1.end(), temp2.begin(), temp2.end());
if((int)temp1.size() > 0)
transSets.push_back(temp1);
}
bool ok = true;
int ssSize = (int)transSets.size();
/*for(int i = 0; i < ssSize; i++)
{
printf("TRANS SET: ");
for(int j = 0; j < (int)transSets[i].size(); j++)
printf("%d ", transSets[i][j]);
printf("\n");
}*/
do
{
ok = true;
for(int i = 0; i < ssSize; i++)
{
uint32_t mxRange = 0;
for(int k = 0; k < (int)transSets[i].size(); k++)
{
if(mxRange < range[transSets[i][k]])
mxRange = range[transSets[i][k]];
}
for(int k = 0; k < (int)transSets[i].size(); k++)
{
if(range[transSets[i][k]] != mxRange)
{
range[transSets[i][k]] = mxRange;
ok = false;
}
}
}
}while(!ok);
CUDA_SAFE_CALL( cudaMemcpy(d_range, range, sizeof(uint32_t) * (RSize + 1), cudaMemcpyHostToDevice ));
updatePr<<<grid, threads>>>(d_Pr, RSize, d_terminatebb, d_tags);
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
for(int i = 1; i <= RSize; i++)
{
if(isRangeSet(tags[i]))
continue;
resetTag<<<grid, threads>>>(d_range, d_tags, RSize, i);
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
fwdRc<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
bwdRc<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
fwdRc<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
bwdRc<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
for(int j = 1; j <= RSize; j++)
{
if(isForwardVisited(tags[j]))
{
Fs[i].insert(j);
//printf("Inserting %d in Fs of %d\n", j, i);
}
if(isBackwardVisited(tags[j]))
{
Bs[i].insert(j);
//printf("Inserting %d in Bs of %d\n", j, i);
}
}
}
CUDA_SAFE_CALL( cudaMemcpy( &terminatebb, (const void *)d_terminatebb, sizeof(bool), cudaMemcpyDeviceToHost ));
//printf("terminatebb: %d\n", terminatebb);
}
printf("MERGING DONE! ^_^\n");
//<----------Main algorithm---------------------------------
//SCC extraction
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy(range, d_range, sizeof(uint32_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
uint32_t numberOf1Sccs = 0;
uint32_t numberOf2Sccs = 0;
uint32_t numberOfPivotSccs = 0;
uint32_t numberOfSccs = 0;
map<uint32_t, bool> seen;
for(uint32_t i=1;i<=RSize;i++)
if(isTrim1(tags[i]))
{
numberOf1Sccs++;
//printf("TRIM1: %d\n", i);
}
else if(isTrim2(tags[i]))
numberOf2Sccs++;
else if(seen.find(range[i]) == seen.end())
{
numberOfPivotSccs++;
seen[range[i]] = true;
//printf("RANGE of %d: %d\n", range[i], i);
}
//printf("NumberOf1SccsS: %d\n", numberOf1Sccs);
numberOfSccs = numberOf1Sccs + numberOf2Sccs + numberOfPivotSccs;
cudaEventRecord(sccTimeStop, 0);
cudaEventSynchronize(sccTimeStop);
cudaEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop);
//printf(", %u, %d, %d, %d", iterations, FWD_iterations , BWD_iterations, Trimm_iterations);
#ifdef _DEBUG
printf(", %f", bTime);
printf(", %f", trim1Time);
printf(", %f", trim2Time);
printf(", %f", pivotTime);
printf(", %f", updateTime);
printf(", %f", wccTime);
#endif
printf("\nNumber Of Sccs : %d", numberOfSccs);
printf("\nTime : %f\n", sccTime );
CUDA_SAFE_CALL( cudaFree( d_Fc ));
CUDA_SAFE_CALL( cudaFree( d_Fr ));
CUDA_SAFE_CALL( cudaFree( d_Bc ));
CUDA_SAFE_CALL( cudaFree( d_Br ));
CUDA_SAFE_CALL( cudaFree( d_range));
CUDA_SAFE_CALL( cudaFree( d_tags));
CUDA_SAFE_CALL( cudaFree( d_pivots ));
CUDA_SAFE_CALL( cudaFree( d_auxRange));
CUDA_SAFE_CALL( cudaFree( d_Rm));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminatef));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminateb));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminatebb));
for(int i = 0; i < partitionCount; i++)
{
CUDA_SAFE_CALL(cudaFree(HostArray[i]));
}
CUDA_SAFE_CALL(cudaFree(d_dpivots));
cudaEventDestroy(sccTimeStart);
cudaEventDestroy(sccTimeStop);
#ifdef _DEBUG
cudaEventDestroy(bTimeStart);
cudaEventDestroy(bTimeStop);
cudaEventDestroy(trim1TimeStart);
cudaEventDestroy(trim1TimeStop);
cudaEventDestroy(trim2TimeStart);
cudaEventDestroy(trim2TimeStop);
cudaEventDestroy(pivotTimeStart);
cudaEventDestroy(pivotTimeStop);
cudaEventDestroy(updateTimeStart);
cudaEventDestroy(updateTimeStop);
cudaEventDestroy(wccTimeStart);
cudaEventDestroy(wccTimeStop);
#endif
return;
}
|
9b4b9048cac23f99a9c6b2c769c546450010ddea.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel2DYp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *dataOutput = NULL;
hipMalloc(&dataOutput, XSIZE*YSIZE);
double *dataInput = NULL;
hipMalloc(&dataInput, XSIZE*YSIZE);
double *boundaryTop = NULL;
hipMalloc(&boundaryTop, XSIZE*YSIZE);
double *boundaryBottom = NULL;
hipMalloc(&boundaryBottom, XSIZE*YSIZE);
const double *weights = NULL;
hipMalloc(&weights, XSIZE*YSIZE);
const int numSten = 1;
const int numStenTop = 1;
const int numStenBottom = 1;
const int nxLocal = 1;
const int nyLocal = 1;
const int BLOCK_Y = 1;
const int nx = 1;
const int nyTile = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel2DYp), dim3(gridBlock),dim3(threadBlock), 0, 0, dataOutput,dataInput,boundaryTop,boundaryBottom,weights,numSten,numStenTop,numStenBottom,nxLocal,nyLocal,BLOCK_Y,nx,nyTile);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel2DYp), dim3(gridBlock),dim3(threadBlock), 0, 0, dataOutput,dataInput,boundaryTop,boundaryBottom,weights,numSten,numStenTop,numStenBottom,nxLocal,nyLocal,BLOCK_Y,nx,nyTile);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel2DYp), dim3(gridBlock),dim3(threadBlock), 0, 0, dataOutput,dataInput,boundaryTop,boundaryBottom,weights,numSten,numStenTop,numStenBottom,nxLocal,nyLocal,BLOCK_Y,nx,nyTile);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9b4b9048cac23f99a9c6b2c769c546450010ddea.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel2DYp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *dataOutput = NULL;
cudaMalloc(&dataOutput, XSIZE*YSIZE);
double *dataInput = NULL;
cudaMalloc(&dataInput, XSIZE*YSIZE);
double *boundaryTop = NULL;
cudaMalloc(&boundaryTop, XSIZE*YSIZE);
double *boundaryBottom = NULL;
cudaMalloc(&boundaryBottom, XSIZE*YSIZE);
const double *weights = NULL;
cudaMalloc(&weights, XSIZE*YSIZE);
const int numSten = 1;
const int numStenTop = 1;
const int numStenBottom = 1;
const int nxLocal = 1;
const int nyLocal = 1;
const int BLOCK_Y = 1;
const int nx = 1;
const int nyTile = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel2DYp<<<gridBlock,threadBlock>>>(dataOutput,dataInput,boundaryTop,boundaryBottom,weights,numSten,numStenTop,numStenBottom,nxLocal,nyLocal,BLOCK_Y,nx,nyTile);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel2DYp<<<gridBlock,threadBlock>>>(dataOutput,dataInput,boundaryTop,boundaryBottom,weights,numSten,numStenTop,numStenBottom,nxLocal,nyLocal,BLOCK_Y,nx,nyTile);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel2DYp<<<gridBlock,threadBlock>>>(dataOutput,dataInput,boundaryTop,boundaryBottom,weights,numSten,numStenTop,numStenBottom,nxLocal,nyLocal,BLOCK_Y,nx,nyTile);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
23262320b63699fb514e7b62c27ca1671a88a959.hip | // !!! This is a file automatically generated by hipify!!!
//Calculo de la FFT 2D
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <hipfft.h>
#define RENGLONES 3
#define COLUMNAS 6
int main()
{
int i,j;
int n[2] = {COLUMNAS,RENGLONES};
int inembed[2] = {COLUMNAS,RENGLONES};
int onembed[2] = {COLUMNAS,RENGLONES};
cuFloatComplex *h_xn;
cuFloatComplex *h_Xk;
hipfftComplex *in,*out;
//Se reserva memoria para h_xn en el host
h_xn = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*RENGLONES*COLUMNAS);
//Se reserva memoria para h_Xk en el host
h_Xk = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*RENGLONES*COLUMNAS);
//Se dan valores a x[n]
for(i=0;i<RENGLONES;i++)
{
for(j=0;j<COLUMNAS;j++)
{
//h_xn[i] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21));
h_xn[(i*COLUMNAS)+j] = make_cuFloatComplex((float)(((i*COLUMNAS)+j) + 1),(float)(0.0));
}
}
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(i=0;i<RENGLONES;i++)
{
for(j=0;j<COLUMNAS;j++)
{
printf(" (%f) + (%f) ",cuCrealf(h_xn[(i*COLUMNAS)+j]),cuCimagf(h_xn[(i*COLUMNAS)+j]));
}
printf("\n");
}
//Se reserva memoria para "in" en el device
hipMalloc((void**)&in,sizeof(hipfftComplex)*RENGLONES*COLUMNAS);
//Se reserva memoria para "out" en el device
hipMalloc((void**)&out,sizeof(hipfftComplex)*RENGLONES*COLUMNAS);
//Se copian los datos de h_xn >>> in
hipMemcpy(in,h_xn,sizeof(cuFloatComplex)*RENGLONES*COLUMNAS,hipMemcpyHostToDevice);
//CUFFT plan
hipfftHandle plan;
hipfftPlanMany(&plan,2,n,inembed,COLUMNAS,1,onembed,COLUMNAS,1,HIPFFT_C2C,COLUMNAS);
//Ejecucion de la fft
hipfftExecC2C(plan,in,out,HIPFFT_FORWARD);
//Se copian los datos de out >>> h_Xk
hipMemcpy(h_Xk,out,sizeof(hipfftComplex)*RENGLONES*COLUMNAS,hipMemcpyDeviceToHost);
//Se imprimen los valores de salida X[k]
printf("\n---ELEMENTOS DE SALIDA X[k]---\n\n");
for(i=0;i<RENGLONES;i++)
{
for(j=0;j<COLUMNAS;j++)
{
printf(" (%f) + (%f) ",cuCrealf(h_Xk[(i*COLUMNAS)+j]),cuCimagf(h_Xk[(i*COLUMNAS)+j]));
}
printf("\n");
}
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan memorias
free(h_xn);
free(h_Xk);
hipFree(in);
hipFree(out);
}
| 23262320b63699fb514e7b62c27ca1671a88a959.cu | //Calculo de la FFT 2D
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cufft.h>
#define RENGLONES 3
#define COLUMNAS 6
int main()
{
int i,j;
int n[2] = {COLUMNAS,RENGLONES};
int inembed[2] = {COLUMNAS,RENGLONES};
int onembed[2] = {COLUMNAS,RENGLONES};
cuFloatComplex *h_xn;
cuFloatComplex *h_Xk;
cufftComplex *in,*out;
//Se reserva memoria para h_xn en el host
h_xn = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*RENGLONES*COLUMNAS);
//Se reserva memoria para h_Xk en el host
h_Xk = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*RENGLONES*COLUMNAS);
//Se dan valores a x[n]
for(i=0;i<RENGLONES;i++)
{
for(j=0;j<COLUMNAS;j++)
{
//h_xn[i] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21));
h_xn[(i*COLUMNAS)+j] = make_cuFloatComplex((float)(((i*COLUMNAS)+j) + 1),(float)(0.0));
}
}
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(i=0;i<RENGLONES;i++)
{
for(j=0;j<COLUMNAS;j++)
{
printf(" (%f) + (%f) ",cuCrealf(h_xn[(i*COLUMNAS)+j]),cuCimagf(h_xn[(i*COLUMNAS)+j]));
}
printf("\n");
}
//Se reserva memoria para "in" en el device
cudaMalloc((void**)&in,sizeof(cufftComplex)*RENGLONES*COLUMNAS);
//Se reserva memoria para "out" en el device
cudaMalloc((void**)&out,sizeof(cufftComplex)*RENGLONES*COLUMNAS);
//Se copian los datos de h_xn >>> in
cudaMemcpy(in,h_xn,sizeof(cuFloatComplex)*RENGLONES*COLUMNAS,cudaMemcpyHostToDevice);
//CUFFT plan
cufftHandle plan;
cufftPlanMany(&plan,2,n,inembed,COLUMNAS,1,onembed,COLUMNAS,1,CUFFT_C2C,COLUMNAS);
//Ejecucion de la fft
cufftExecC2C(plan,in,out,CUFFT_FORWARD);
//Se copian los datos de out >>> h_Xk
cudaMemcpy(h_Xk,out,sizeof(cufftComplex)*RENGLONES*COLUMNAS,cudaMemcpyDeviceToHost);
//Se imprimen los valores de salida X[k]
printf("\n---ELEMENTOS DE SALIDA X[k]---\n\n");
for(i=0;i<RENGLONES;i++)
{
for(j=0;j<COLUMNAS;j++)
{
printf(" (%f) + (%f) ",cuCrealf(h_Xk[(i*COLUMNAS)+j]),cuCimagf(h_Xk[(i*COLUMNAS)+j]));
}
printf("\n");
}
//Se destruye el plan
cufftDestroy(plan);
//Se liberan memorias
free(h_xn);
free(h_Xk);
cudaFree(in);
cudaFree(out);
}
|
b5362de8fe1feb7c1ffab3c042869094177bb4f4.hip | // !!! This is a file automatically generated by hipify!!!
#define _SIZE_T_DEFINED
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <builtin_types.h>
#include <vector_functions.h>
#include <float.h>
#include "SegmentDefs.cu"
#include "VisionMath.cu"
extern "C"
{
//index for enforce connectivity
const int dx4[4] = {-1, 0, 1, 0};
const int dy4[4] = { 0, -1, 0, 1};
//--- now kernels that do the job...
__global__ void kInitClusterCenters( float4* floatBuffer, int nWidth, int nHeight, SLICClusterCenter* vSLICCenterList )
{
int blockWidth=nWidth/blockDim.x;
int blockHeight=nHeight/gridDim.x;
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
int offsetBlock = blockIdx.x * blockHeight * nWidth + threadIdx.x * blockWidth;
float2 avXY;
avXY.x=threadIdx.x*blockWidth + (float)blockWidth/2.0;
avXY.y=blockIdx.x*blockHeight + (float)blockHeight/2.0;
//use a single point to init center
int offset=offsetBlock + blockHeight/2 * nWidth+ blockWidth/2 ;
float4 fPixel=floatBuffer[offset];
vSLICCenterList[clusterIdx].lab=fPixel;
vSLICCenterList[clusterIdx].xy=avXY;
vSLICCenterList[clusterIdx].nPoints=0;
}
__global__ void kIterateKmeans( int* maskBuffer, float4* floatBuffer,
int nWidth, int nHeight, int nSegs, int nClusterIdxStride,
SLICClusterCenter* vSLICCenterList, int listSize,
bool bLabelImg, float weight)
{
//for reading cluster centers
__shared__ float4 fShareLab[3][3];
__shared__ float2 fShareXY[3][3];
//pixel index
__shared__ SLICClusterCenter pixelUpdateList[MAX_BLOCK_SIZE];
__shared__ float2 pixelUpdateIdx[MAX_BLOCK_SIZE];
int clusterIdx=blockIdx.y;
int blockCol=clusterIdx%nClusterIdxStride;
int blockRow=clusterIdx/nClusterIdxStride;
//int upperBlockHeight=blockDim.y*gridDim.x;
int lowerBlockHeight=blockDim.y;
int blockWidth=blockDim.x;
int upperBlockHeight=blockWidth;
int innerBlockHeightIdx=lowerBlockHeight*blockIdx.x+threadIdx.y;
float M=weight;
float invWeight=1/((blockWidth/M)*(blockWidth/M));
int offsetBlock = (blockRow*upperBlockHeight+blockIdx.x*lowerBlockHeight)*nWidth+blockCol*blockWidth;
int offset=offsetBlock+threadIdx.x+threadIdx.y*nWidth;
int rBegin=(blockRow>0)?0:1;
int rEnd=(blockRow+1>(gridDim.y/nClusterIdxStride-1))?1:2;
int cBegin=(blockCol>0)?0:1;
int cEnd=(blockCol+1>(nClusterIdxStride-1))?1:2;
if (threadIdx.x<3 && threadIdx.y<3) {
if (threadIdx.x>=cBegin && threadIdx.x<=cEnd && threadIdx.y>=rBegin && threadIdx.y<=rEnd) {
int cmprIdx=(blockRow+threadIdx.y-1)*nClusterIdxStride+(blockCol+threadIdx.x-1);
fShareLab[threadIdx.y][threadIdx.x]=vSLICCenterList[cmprIdx].lab;
fShareXY[threadIdx.y][threadIdx.x]=vSLICCenterList[cmprIdx].xy;
}
}
__syncthreads();
if (innerBlockHeightIdx>=blockWidth)
return;
if (offset>=nWidth*nHeight)
return;
// finding the nearest center for current pixel
float fY=blockRow*upperBlockHeight+blockIdx.x*lowerBlockHeight+threadIdx.y;
float fX=blockCol*blockWidth+threadIdx.x;
if (fY<nHeight && fX<nWidth)
{
float4 fPoint=floatBuffer[offset];
float minDis=9999;
int nearestCenter=-1;
int nearestR, nearestC;
for (int r=rBegin;r<=rEnd;r++)
{
for (int c=cBegin;c<=cEnd;c++)
{
int cmprIdx=(blockRow+r-1)*nClusterIdxStride+(blockCol+c-1);
//compute SLIC distance
float fDab=(fPoint.x-fShareLab[r][c].x)*(fPoint.x-fShareLab[r][c].x)
+(fPoint.y-fShareLab[r][c].y)*(fPoint.y-fShareLab[r][c].y)
+(fPoint.z-fShareLab[r][c].z)*(fPoint.z-fShareLab[r][c].z);
//fDab=sqrt(fDab);
float fDxy=(fX-fShareXY[r][c].x)*(fX-fShareXY[r][c].x)
+(fY-fShareXY[r][c].y)*(fY-fShareXY[r][c].y);
//fDxy=sqrt(fDxy);
float fDis=fDab+invWeight*fDxy;
if (fDis<minDis)
{
minDis=fDis;
nearestCenter=cmprIdx;
nearestR=r;
nearestC=c;
}
}
}
if (nearestCenter>-1) {
int pixelIdx=threadIdx.y*blockWidth+threadIdx.x;
if(pixelIdx < MAX_BLOCK_SIZE) {
pixelUpdateList[pixelIdx].lab=fPoint;
pixelUpdateList[pixelIdx].xy.x=fX;
pixelUpdateList[pixelIdx].xy.y=fY;
pixelUpdateIdx[pixelIdx].x=nearestC;
pixelUpdateIdx[pixelIdx].y=nearestR;
}
if (bLabelImg)
maskBuffer[offset]=nearestCenter;
}
}
else {
int pixelIdx=threadIdx.y*blockWidth+threadIdx.x;
if(pixelIdx < MAX_BLOCK_SIZE) {
pixelUpdateIdx[pixelIdx].x=-1;
pixelUpdateIdx[pixelIdx].y=-1;
}
}
__syncthreads();
}
__global__ void kUpdateClusterCenters( float4* floatBuffer,int* maskBuffer, int nWidth, int nHeight, int nSegs, SLICClusterCenter* vSLICCenterList, int listSize)
{
int blockWidth=nWidth/blockDim.x;
int blockHeight=nHeight/gridDim.x;
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
int offsetBlock = threadIdx.x * blockWidth+ blockIdx.x * blockHeight * nWidth;
float2 crntXY=vSLICCenterList[clusterIdx].xy;
float4 avLab;
float2 avXY;
int nPoints=0;
avLab.x=0;
avLab.y=0;
avLab.z=0;
avXY.x=0;
avXY.y=0;
int yBegin=0 < (crntXY.y - blockHeight) ? (crntXY.y - blockHeight) : 0;
int yEnd= nHeight > (crntXY.y + blockHeight) ? (crntXY.y + blockHeight) : (nHeight-1);
int xBegin=0 < (crntXY.x - blockWidth) ? (crntXY.x - blockWidth) : 0;
int xEnd= nWidth > (crntXY.x + blockWidth) ? (crntXY.x + blockWidth) : (nWidth-1);
//update to cluster centers
for (int i = yBegin; i < yEnd ; i++)
{
for (int j = xBegin; j < xEnd; j++)
{
int offset=j + i * nWidth;
float4 fPixel=floatBuffer[offset];
int pIdx=maskBuffer[offset];
if (pIdx==clusterIdx)
{
avLab.x+=fPixel.x;
avLab.y+=fPixel.y;
avLab.z+=fPixel.z;
avXY.x+=j;
avXY.y+=i;
nPoints++;
}
}
}
if(nPoints == 0)
return;
avLab.x/=nPoints;
avLab.y/=nPoints;
avLab.z/=nPoints;
avXY.x/=nPoints;
avXY.y/=nPoints;
vSLICCenterList[clusterIdx].lab=avLab;
vSLICCenterList[clusterIdx].xy=avXY;
vSLICCenterList[clusterIdx].nPoints=nPoints;
}
//=======================================================
// create descriptors
///------------------------------------------------------
/// Add how edges around look like...
__device__ void Desc_get_hists_stats_for_each_segment (float2 xy, int id, float* edge_im , float* feat_desc, int dim_desc, int width){
int dim_id_start = 5;
int ngh_max = 5;
feat_desc[id*dim_desc+dim_id_start+0] = edge_im[((int)xy.x) + ((int)xy.y)*width];
feat_desc[id*dim_desc+dim_id_start+1] = abs(feat_desc[id*dim_desc+dim_id_start+0]);
feat_desc[id*dim_desc+dim_id_start+2] = edge_im[((int)xy.x)+ngh_max + ((int)xy.y+ngh_max)*width];
feat_desc[id*dim_desc+dim_id_start+3] = abs(feat_desc[id*dim_desc+dim_id_start+2]);
feat_desc[id*dim_desc+dim_id_start+4] = edge_im[((int)xy.x)+ngh_max + ((int)xy.y-ngh_max)*width];
feat_desc[id*dim_desc+dim_id_start+5] = abs(feat_desc[id*dim_desc+dim_id_start+4]);
feat_desc[id*dim_desc+dim_id_start+6] = edge_im[((int)xy.x)-ngh_max + ((int)xy.y+ngh_max)*width];
feat_desc[id*dim_desc+dim_id_start+7] = abs(feat_desc[id*dim_desc+dim_id_start+6]);
feat_desc[id*dim_desc+dim_id_start+8] = edge_im[((int)xy.x)-ngh_max + ((int)xy.y-ngh_max)*width];
feat_desc[id*dim_desc+dim_id_start+9] = abs(feat_desc[id*dim_desc+dim_id_start+8]);
}
// m_kernel_desc.Run(devSLICCCenter.DevicePointer, Owner.features_xy, Owner.features_desc , Owner.nSegs , 2 , Owner.dim_feat_desc); /// fill image with average color
__global__ void Desc (SLICClusterCenter* vSLICCenterList , float* feat_xy , float* feat_desc , int size , int dim_xy , int dim_desc , int width , int height){//, float* edge_im){
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id<size){
float2 xy = vSLICCenterList[id].xy;
float4 lab = vSLICCenterList[id].lab;
feat_xy[id*dim_xy+0] = xy.x;// (xy.x-width/2) / (width/2);
feat_xy[id*dim_xy+1] = xy.y;// (xy.y-height/2) / (height/2);
feat_xy[id*dim_xy+2] = (int)vSLICCenterList[id].nPoints;// 0.2;// id;
//--- old desc
float desc_prev[3];
desc_prev[0] = feat_desc[id*dim_desc+0];
desc_prev[1] = feat_desc[id*dim_desc+1];
desc_prev[2] = feat_desc[id*dim_desc+2];
feat_desc[id*dim_desc+0] = lab.x/1;
feat_desc[id*dim_desc+1] = lab.y/1;
feat_desc[id*dim_desc+2] = lab.z/1;
feat_desc[id*dim_desc+3] = Dist_between_two_vec(&desc_prev[0] , feat_desc+id*dim_desc , 3);
}
}
__global__ void Copy_intMat2Float(float* A , int* B , int size){
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
A[id] = (float)B[id];
}
}
//=======================================================
// From image to float4 buffer
///------------------------------------------------------
__global__ void kBw2XYZ(float* im , float4* outputImg , int size)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
float _b= im[id];
float _g= im[id];
float _r= im[id];
float x=_r*0.412453 +_g*0.357580 +_b*0.180423;
float y=_r*0.212671 +_g*0.715160 +_b*0.072169;
float z=_r*0.019334 +_g*0.119193 +_b*0.950227;
float4 fPixel;
fPixel.x=x;
fPixel.y=y;
fPixel.z=z;
outputImg[id]=fPixel;
}
}
__global__ void kRgb2XYZ(float* imR , float* imG , float* imB , float4* outputImg , int size)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
float _b= imB[id];
float _g= imG[id];
float _r= imR[id];
float x=_r*0.412453 +_g*0.357580 +_b*0.180423;
float y=_r*0.212671 +_g*0.715160 +_b*0.072169;
float z=_r*0.019334 +_g*0.119193 +_b*0.950227;
float4 fPixel;
fPixel.x=x;
fPixel.y=y;
fPixel.z=z;
outputImg[id]=fPixel;
}
}
__global__ void kRgb2LAB(float* imR , float* imG , float* imB , float4* outputImg , int size)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
float _b= imB[id];
float _g= imG[id];
float _r= imR[id];
float x=_r*0.412453 +_g*0.357580 +_b*0.180423;
float y=_r*0.212671 +_g*0.715160 +_b*0.072169;
float z=_r*0.019334 +_g*0.119193 +_b*0.950227;
float l,a,b;
x/=0.950456;
float y3=exp(log(y)/3.0);
z/=1.088754;
x = x>0.008856 ? exp(log(x)/3.0) : (7.787*x+0.13793);
y = y>0.008856 ? y3 : 7.787*y+0.13793;
z = z>0.008856 ? z/=exp(log(z)/3.0) : (7.787*z+0.13793);
l = y>0.008856 ? (116.0*y3-16.0) : 903.3*y;
a=(x-y)*500.0;
b=(y-z)*200.0;
float4 fPixel;
fPixel.x=l;
fPixel.y=a;
fPixel.z=b;
outputImg[id]=fPixel;
}
}
}
| b5362de8fe1feb7c1ffab3c042869094177bb4f4.cu | #define _SIZE_T_DEFINED
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <builtin_types.h>
#include <vector_functions.h>
#include <float.h>
#include "SegmentDefs.cu"
#include "VisionMath.cu"
extern "C"
{
//index for enforce connectivity
const int dx4[4] = {-1, 0, 1, 0};
const int dy4[4] = { 0, -1, 0, 1};
//--- now kernels that do the job...
__global__ void kInitClusterCenters( float4* floatBuffer, int nWidth, int nHeight, SLICClusterCenter* vSLICCenterList )
{
int blockWidth=nWidth/blockDim.x;
int blockHeight=nHeight/gridDim.x;
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
int offsetBlock = blockIdx.x * blockHeight * nWidth + threadIdx.x * blockWidth;
float2 avXY;
avXY.x=threadIdx.x*blockWidth + (float)blockWidth/2.0;
avXY.y=blockIdx.x*blockHeight + (float)blockHeight/2.0;
//use a single point to init center
int offset=offsetBlock + blockHeight/2 * nWidth+ blockWidth/2 ;
float4 fPixel=floatBuffer[offset];
vSLICCenterList[clusterIdx].lab=fPixel;
vSLICCenterList[clusterIdx].xy=avXY;
vSLICCenterList[clusterIdx].nPoints=0;
}
__global__ void kIterateKmeans( int* maskBuffer, float4* floatBuffer,
int nWidth, int nHeight, int nSegs, int nClusterIdxStride,
SLICClusterCenter* vSLICCenterList, int listSize,
bool bLabelImg, float weight)
{
//for reading cluster centers
__shared__ float4 fShareLab[3][3];
__shared__ float2 fShareXY[3][3];
//pixel index
__shared__ SLICClusterCenter pixelUpdateList[MAX_BLOCK_SIZE];
__shared__ float2 pixelUpdateIdx[MAX_BLOCK_SIZE];
int clusterIdx=blockIdx.y;
int blockCol=clusterIdx%nClusterIdxStride;
int blockRow=clusterIdx/nClusterIdxStride;
//int upperBlockHeight=blockDim.y*gridDim.x;
int lowerBlockHeight=blockDim.y;
int blockWidth=blockDim.x;
int upperBlockHeight=blockWidth;
int innerBlockHeightIdx=lowerBlockHeight*blockIdx.x+threadIdx.y;
float M=weight;
float invWeight=1/((blockWidth/M)*(blockWidth/M));
int offsetBlock = (blockRow*upperBlockHeight+blockIdx.x*lowerBlockHeight)*nWidth+blockCol*blockWidth;
int offset=offsetBlock+threadIdx.x+threadIdx.y*nWidth;
int rBegin=(blockRow>0)?0:1;
int rEnd=(blockRow+1>(gridDim.y/nClusterIdxStride-1))?1:2;
int cBegin=(blockCol>0)?0:1;
int cEnd=(blockCol+1>(nClusterIdxStride-1))?1:2;
if (threadIdx.x<3 && threadIdx.y<3) {
if (threadIdx.x>=cBegin && threadIdx.x<=cEnd && threadIdx.y>=rBegin && threadIdx.y<=rEnd) {
int cmprIdx=(blockRow+threadIdx.y-1)*nClusterIdxStride+(blockCol+threadIdx.x-1);
fShareLab[threadIdx.y][threadIdx.x]=vSLICCenterList[cmprIdx].lab;
fShareXY[threadIdx.y][threadIdx.x]=vSLICCenterList[cmprIdx].xy;
}
}
__syncthreads();
if (innerBlockHeightIdx>=blockWidth)
return;
if (offset>=nWidth*nHeight)
return;
// finding the nearest center for current pixel
float fY=blockRow*upperBlockHeight+blockIdx.x*lowerBlockHeight+threadIdx.y;
float fX=blockCol*blockWidth+threadIdx.x;
if (fY<nHeight && fX<nWidth)
{
float4 fPoint=floatBuffer[offset];
float minDis=9999;
int nearestCenter=-1;
int nearestR, nearestC;
for (int r=rBegin;r<=rEnd;r++)
{
for (int c=cBegin;c<=cEnd;c++)
{
int cmprIdx=(blockRow+r-1)*nClusterIdxStride+(blockCol+c-1);
//compute SLIC distance
float fDab=(fPoint.x-fShareLab[r][c].x)*(fPoint.x-fShareLab[r][c].x)
+(fPoint.y-fShareLab[r][c].y)*(fPoint.y-fShareLab[r][c].y)
+(fPoint.z-fShareLab[r][c].z)*(fPoint.z-fShareLab[r][c].z);
//fDab=sqrt(fDab);
float fDxy=(fX-fShareXY[r][c].x)*(fX-fShareXY[r][c].x)
+(fY-fShareXY[r][c].y)*(fY-fShareXY[r][c].y);
//fDxy=sqrt(fDxy);
float fDis=fDab+invWeight*fDxy;
if (fDis<minDis)
{
minDis=fDis;
nearestCenter=cmprIdx;
nearestR=r;
nearestC=c;
}
}
}
if (nearestCenter>-1) {
int pixelIdx=threadIdx.y*blockWidth+threadIdx.x;
if(pixelIdx < MAX_BLOCK_SIZE) {
pixelUpdateList[pixelIdx].lab=fPoint;
pixelUpdateList[pixelIdx].xy.x=fX;
pixelUpdateList[pixelIdx].xy.y=fY;
pixelUpdateIdx[pixelIdx].x=nearestC;
pixelUpdateIdx[pixelIdx].y=nearestR;
}
if (bLabelImg)
maskBuffer[offset]=nearestCenter;
}
}
else {
int pixelIdx=threadIdx.y*blockWidth+threadIdx.x;
if(pixelIdx < MAX_BLOCK_SIZE) {
pixelUpdateIdx[pixelIdx].x=-1;
pixelUpdateIdx[pixelIdx].y=-1;
}
}
__syncthreads();
}
__global__ void kUpdateClusterCenters( float4* floatBuffer,int* maskBuffer, int nWidth, int nHeight, int nSegs, SLICClusterCenter* vSLICCenterList, int listSize)
{
int blockWidth=nWidth/blockDim.x;
int blockHeight=nHeight/gridDim.x;
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
int offsetBlock = threadIdx.x * blockWidth+ blockIdx.x * blockHeight * nWidth;
float2 crntXY=vSLICCenterList[clusterIdx].xy;
float4 avLab;
float2 avXY;
int nPoints=0;
avLab.x=0;
avLab.y=0;
avLab.z=0;
avXY.x=0;
avXY.y=0;
int yBegin=0 < (crntXY.y - blockHeight) ? (crntXY.y - blockHeight) : 0;
int yEnd= nHeight > (crntXY.y + blockHeight) ? (crntXY.y + blockHeight) : (nHeight-1);
int xBegin=0 < (crntXY.x - blockWidth) ? (crntXY.x - blockWidth) : 0;
int xEnd= nWidth > (crntXY.x + blockWidth) ? (crntXY.x + blockWidth) : (nWidth-1);
//update to cluster centers
for (int i = yBegin; i < yEnd ; i++)
{
for (int j = xBegin; j < xEnd; j++)
{
int offset=j + i * nWidth;
float4 fPixel=floatBuffer[offset];
int pIdx=maskBuffer[offset];
if (pIdx==clusterIdx)
{
avLab.x+=fPixel.x;
avLab.y+=fPixel.y;
avLab.z+=fPixel.z;
avXY.x+=j;
avXY.y+=i;
nPoints++;
}
}
}
if(nPoints == 0)
return;
avLab.x/=nPoints;
avLab.y/=nPoints;
avLab.z/=nPoints;
avXY.x/=nPoints;
avXY.y/=nPoints;
vSLICCenterList[clusterIdx].lab=avLab;
vSLICCenterList[clusterIdx].xy=avXY;
vSLICCenterList[clusterIdx].nPoints=nPoints;
}
//=======================================================
// create descriptors
///------------------------------------------------------
/// Add how edges around look like...
__device__ void Desc_get_hists_stats_for_each_segment (float2 xy, int id, float* edge_im , float* feat_desc, int dim_desc, int width){
int dim_id_start = 5;
int ngh_max = 5;
feat_desc[id*dim_desc+dim_id_start+0] = edge_im[((int)xy.x) + ((int)xy.y)*width];
feat_desc[id*dim_desc+dim_id_start+1] = abs(feat_desc[id*dim_desc+dim_id_start+0]);
feat_desc[id*dim_desc+dim_id_start+2] = edge_im[((int)xy.x)+ngh_max + ((int)xy.y+ngh_max)*width];
feat_desc[id*dim_desc+dim_id_start+3] = abs(feat_desc[id*dim_desc+dim_id_start+2]);
feat_desc[id*dim_desc+dim_id_start+4] = edge_im[((int)xy.x)+ngh_max + ((int)xy.y-ngh_max)*width];
feat_desc[id*dim_desc+dim_id_start+5] = abs(feat_desc[id*dim_desc+dim_id_start+4]);
feat_desc[id*dim_desc+dim_id_start+6] = edge_im[((int)xy.x)-ngh_max + ((int)xy.y+ngh_max)*width];
feat_desc[id*dim_desc+dim_id_start+7] = abs(feat_desc[id*dim_desc+dim_id_start+6]);
feat_desc[id*dim_desc+dim_id_start+8] = edge_im[((int)xy.x)-ngh_max + ((int)xy.y-ngh_max)*width];
feat_desc[id*dim_desc+dim_id_start+9] = abs(feat_desc[id*dim_desc+dim_id_start+8]);
}
// m_kernel_desc.Run(devSLICCCenter.DevicePointer, Owner.features_xy, Owner.features_desc , Owner.nSegs , 2 , Owner.dim_feat_desc); /// fill image with average color
__global__ void Desc (SLICClusterCenter* vSLICCenterList , float* feat_xy , float* feat_desc , int size , int dim_xy , int dim_desc , int width , int height){//, float* edge_im){
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id<size){
float2 xy = vSLICCenterList[id].xy;
float4 lab = vSLICCenterList[id].lab;
feat_xy[id*dim_xy+0] = xy.x;// (xy.x-width/2) / (width/2);
feat_xy[id*dim_xy+1] = xy.y;// (xy.y-height/2) / (height/2);
feat_xy[id*dim_xy+2] = (int)vSLICCenterList[id].nPoints;// 0.2;// id;
//--- old desc
float desc_prev[3];
desc_prev[0] = feat_desc[id*dim_desc+0];
desc_prev[1] = feat_desc[id*dim_desc+1];
desc_prev[2] = feat_desc[id*dim_desc+2];
feat_desc[id*dim_desc+0] = lab.x/1;
feat_desc[id*dim_desc+1] = lab.y/1;
feat_desc[id*dim_desc+2] = lab.z/1;
feat_desc[id*dim_desc+3] = Dist_between_two_vec(&desc_prev[0] , feat_desc+id*dim_desc , 3);
}
}
__global__ void Copy_intMat2Float(float* A , int* B , int size){
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
A[id] = (float)B[id];
}
}
//=======================================================
// From image to float4 buffer
///------------------------------------------------------
__global__ void kBw2XYZ(float* im , float4* outputImg , int size)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
float _b= im[id];
float _g= im[id];
float _r= im[id];
float x=_r*0.412453 +_g*0.357580 +_b*0.180423;
float y=_r*0.212671 +_g*0.715160 +_b*0.072169;
float z=_r*0.019334 +_g*0.119193 +_b*0.950227;
float4 fPixel;
fPixel.x=x;
fPixel.y=y;
fPixel.z=z;
outputImg[id]=fPixel;
}
}
__global__ void kRgb2XYZ(float* imR , float* imG , float* imB , float4* outputImg , int size)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
float _b= imB[id];
float _g= imG[id];
float _r= imR[id];
float x=_r*0.412453 +_g*0.357580 +_b*0.180423;
float y=_r*0.212671 +_g*0.715160 +_b*0.072169;
float z=_r*0.019334 +_g*0.119193 +_b*0.950227;
float4 fPixel;
fPixel.x=x;
fPixel.y=y;
fPixel.z=z;
outputImg[id]=fPixel;
}
}
__global__ void kRgb2LAB(float* imR , float* imG , float* imB , float4* outputImg , int size)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
float _b= imB[id];
float _g= imG[id];
float _r= imR[id];
float x=_r*0.412453 +_g*0.357580 +_b*0.180423;
float y=_r*0.212671 +_g*0.715160 +_b*0.072169;
float z=_r*0.019334 +_g*0.119193 +_b*0.950227;
float l,a,b;
x/=0.950456;
float y3=exp(log(y)/3.0);
z/=1.088754;
x = x>0.008856 ? exp(log(x)/3.0) : (7.787*x+0.13793);
y = y>0.008856 ? y3 : 7.787*y+0.13793;
z = z>0.008856 ? z/=exp(log(z)/3.0) : (7.787*z+0.13793);
l = y>0.008856 ? (116.0*y3-16.0) : 903.3*y;
a=(x-y)*500.0;
b=(y-z)*200.0;
float4 fPixel;
fPixel.x=l;
fPixel.y=a;
fPixel.z=b;
outputImg[id]=fPixel;
}
}
}
|
2668a920c85eb5a3ba2184085dfe624391fbfb91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cuda_mesh_operations.hpp>
#include <cuda_util.cuh>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/execution_policy.h>
#include <device_launch_parameters.h>
#include <chrono>
#include <timing_struct.hpp>
#include <iomanip>
using namespace thrust;
namespace ab {
__device__ int thread_offset(){
return blockIdx.x * blockDim.x + threadIdx.x;
}
__device__ int thread_stride(){
return blockDim.x * gridDim.x;
}
__global__ void cuda_sqrtf(float* a) {
*a = sqrtf(*a);
}
__global__ void kernel_train() {
}
__global__ void kernel_normalize_vectors(float3* vec,unsigned size){
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < size; i += stride) {
vec[i] = normalized(vec[i]);
}
}
__global__ void kernel_divide(float3* vec,float* div,unsigned vec_size){
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < vec_size; i += stride) {
float fdiv = 1.f/div[i];
vec[i].x *= fdiv;
vec[i].y *= fdiv;
vec[i].z *= fdiv;
}
}
__global__ void kernel_divide(float3* vec, int* div, unsigned vec_size) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < vec_size; i += stride) {
float fdiv = 1.f/static_cast<float>(div[i]);
vec[i].x *= fdiv;
vec[i].y *= fdiv;
vec[i].z *= fdiv;
}
}
__global__ void kernel_calculate_normals_scatter(float3* positions,int* faces,int* face_indices, float3* normals, int face_count) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < face_count-1; i += stride) {
int base_index = faces[i];
int next_face = faces[i+1];
float3 point_a = positions[face_indices[next_face-1]];
float3 point_b = positions[face_indices[base_index]];
float3 point_c = positions[face_indices[base_index+1]];
float3 edge_vector_ab = point_b-point_a;
float3 edge_vector_bc = point_c-point_b;
float3 normal{ 0.f,0.f,0.f };
//assume planar polygon
normal += normalized(cross3df(edge_vector_ab, edge_vector_bc));
//add to every vertice in the face
for (int j = 0;j< next_face-base_index;++j){
float3* vn = &normals[face_indices[base_index+j]];
atomicAdd(&vn->x, normal.x);
atomicAdd(&vn->y, normal.y);
atomicAdd(&vn->z, normal.z);
}
}
}
//does it only once
__global__ void kernel_calculate_face_normals_gather(Vertex* vertices, ReducedHalfEdge* half_edges, Loop* loops, float3* normals, unsigned loops_count) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < loops_count; i += stride) {
if (loops[i].is_border) {
continue;
}
ReducedHalfEdge halfedge_a = half_edges[loops[i].he];
ReducedHalfEdge halfedge_b = half_edges[halfedge_a.next];
ReducedHalfEdge halfedge_c = half_edges[halfedge_b.next];
float3 b = vertices[halfedge_b.origin].position;
normals[i] = normalized(cross3df(b-vertices[halfedge_a.origin].position, vertices[halfedge_c.origin].position)-b);
}
}
__global__ void kernel_calculate_normals_gather_from_loops(Vertex* vertices, ReducedHalfEdge* half_edges, Loop* loops, float3* face_normals, float3* normals, unsigned vertice_count) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < vertice_count; i += stride) {
const auto& vert = vertices[i];
if (vert.he == -1) {
continue;
}
float3 normal{ 0.f,0.f,0.f };
int he = vert.he;
do {//for every neighbor
const ReducedHalfEdge& halfedge = half_edges[he];
//skip boundary loops
if (loops[halfedge.loop].is_border) {
he = half_edges[halfedge.inv].next;
continue;
}
normal += face_normals[halfedge.loop];
he = half_edges[halfedge.inv].next;
} while (he != vert.he);
normals[i] = normalized(normal);
}
}
//recalculates normals for every face
__global__ void kernel_calculate_normals_gather(Vertex* vertices, HalfEdge* half_edges,Loop* loops, float3* normals, unsigned loops_count) {
int stride = thread_stride();
int offset = thread_offset();
//calculate normals
for (int i = offset; i < loops_count; i+=stride) {
auto& vert = vertices[i];
if (vert.he == -1) {
continue;
}
float3 normal{ 0.f,0.f,0.f };
int he = vert.he;
do {//for every neighbor
HalfEdge& halfedge = half_edges[he];
//skip boundary loops
if (loops[halfedge.loop].is_border) {
he = half_edges[halfedge.inv].next;
continue;
}
float3 point_c = vertices[half_edges[halfedge.inv].origin].position;
float3 point_a = vertices[half_edges[halfedge.prev].origin].position;
//float3 point_b = vert.position;
float3 edge_vector_ab = vert.position - point_a;
float3 edge_vector_bc = point_c - vert.position;
normal += normalized(cross3df(edge_vector_ab, edge_vector_bc));
he = half_edges[halfedge.inv].next;
} while (he != vert.he);
normals[i] = normalized(normal);
}
}
__global__ void kernel_calculate_ring_centroids_gather(Vertex* vertices, HalfEdge* half_edges, float3* centroids, unsigned vertice_count) {
int stride = thread_stride();
int offset = thread_offset();
//calculate centroids
for (int i = offset; i < vertice_count; i += stride) {
auto& vert = vertices[i];
//check for orphaned vertices
if (vert.he == -1) {
continue;
}
float3 centroid;
centroid.x = 0.f;
centroid.y = 0.f;
centroid.z = 0.f;
int he = vert.he;
unsigned neighbors = 0;
do {//for every neighbor
HalfEdge& halfedge = half_edges[he];
HalfEdge& inv_halfedge = half_edges[halfedge.inv];
centroid += vertices[inv_halfedge.origin].position;
++neighbors;
he = inv_halfedge.next;
} while (he != vert.he);
centroid.x /= neighbors;
centroid.y /= neighbors;
centroid.z /= neighbors;
centroids[i] = centroid;
}
}
__global__ void kernel_calculate_ring_centroids_scatter_no_borders(float3* positions, int* faces, int* face_indices, int* face_sizes, float3* centroids, int* duped_neighbor_counts, int face_count) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < face_count; i += stride) {
int base_index = faces[i];
int face_size = face_sizes[i];
//circulate trough the face and add it to the centroids
for (int j = 0; j < face_size; ++j) {
float3 next = positions[face_indices[base_index + ((j+1) % face_size)]];
//float3 prev = positions[face_indices[base_index + ((j-1) % face_size)]];
float3* centroid = centroids+face_indices[base_index+j];
int* neighbor_count = duped_neighbor_counts+face_indices[base_index+j];
atomicAdd(¢roid->x, next.x);
atomicAdd(¢roid->y, next.y);
atomicAdd(¢roid->z, next.z);
atomicAdd(neighbor_count, 1);
}
}
}
__global__ void kernel_calculate_ring_centroids_scatter(float3* positions, pair<int,int>* edges, float3* centroids, int* neighbor_counts, int edge_count) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < edge_count; i += stride) {
pair<int, int> edge = edges[i];
if (edge.first > -1) {
float3* centroid_a = centroids + edge.first;
float3* centroid_b = centroids + edge.second;
float3 pa = positions[edge.first];
float3 pb = positions[edge.second];
atomicAdd(¢roid_a->x, pb.x);
atomicAdd(¢roid_a->y, pb.y);
atomicAdd(¢roid_a->z, pb.z);
atomicAdd(¢roid_b->x, pa.x);
atomicAdd(¢roid_b->y, pa.y);
atomicAdd(¢roid_b->z, pa.z);
atomicAdd(neighbor_counts + edge.first, 1);
atomicAdd(neighbor_counts + edge.second, 1);
}
}
}
__global__ void kernel_find_edges(pair<int,int>* pairs, int* faces, int* face_indices,int face_count) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i+1<face_count; i += stride) {
int face_start = faces[i];
int face_end = faces[i + 1];
for (int j = face_start; j < face_end; ++j) {
int first, second;
//check for edge
if (face_end == j + 1) {
second = face_indices[face_start];
first = face_indices[j];
}
else {
first = face_indices[j];
second = face_indices[j + 1];
}
if (first > second) {
pairs[j] = pair<int, int>(second, first);
}
else {
pairs[j] = pair<int, int>(first, second);
}
}
}
}
struct PairLessThan {
__device__ __host__ bool operator()(const pair<int, int>& a, const pair<int, int>& b) {
return a.first < b.first || (a.first == b.first && a.second < b.second);
}
};
void normals_he_cuda_twopass_broken(HalfedgeMesh* mesh, int threads, int blocks, timing_struct& timing) {
mesh->normals.resize(mesh->vertices.size()); //prepare vector for normals
create_reduced_halfedges(*mesh);
if (threads == 0) optimal_configuration(blocks, threads, kernel_calculate_normals_gather);
timing.block_size = threads;
timing.grid_size = blocks;
std::chrono::steady_clock::time_point start, stop, pstart, pstop;
start = std::chrono::steady_clock::now(); //upload time
thrust::device_vector<ReducedHalfEdge> half_edges(mesh->reduced_half_edges.size());
thrust::device_vector<Vertex> vertices(mesh->vertices.size());
thrust::device_vector<Loop> loops(mesh->loops.size());
thrust::device_vector<float3> normals(mesh->vertices.size());
thrust::device_vector<float3> face_normals(loops.size(), { 0.f,0.f,0.f });
hipMemcpyAsync(half_edges.data().get(), mesh->reduced_half_edges.data(), mesh->reduced_half_edges.size() * sizeof(HalfEdge), hipMemcpyHostToDevice);
hipMemcpyAsync(vertices.data().get(), mesh->vertices.data(), mesh->vertices.size() * sizeof(Vertex), hipMemcpyHostToDevice);
hipMemcpyAsync(loops.data().get(), mesh->loops.data(), mesh->loops.size() * sizeof(Loop), hipMemcpyHostToDevice);
hipDeviceSynchronize();
stop = std::chrono::steady_clock::now();
timing.data_upload_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
//setup timer
hipEvent_t cu_start, cu_stop;
hipEventCreate(&cu_start);
hipEventCreate(&cu_stop);
//kernel launch
pstart = std::chrono::steady_clock::now();
hipEventRecord(cu_start);
hipLaunchKernelGGL(( kernel_calculate_face_normals_gather), dim3(blocks), dim3(threads), 0, 0, vertices.data().get(),
half_edges.data().get(), loops.data().get(), face_normals.data().get(), loops.size());
hipEventRecord(cu_stop);
hipEventSynchronize(cu_stop);
//hipDeviceSynchronize();
timing.kernel_execution_time_a = cuda_elapsed_time(cu_start, cu_stop);
hipEventRecord(cu_start);
hipLaunchKernelGGL(( kernel_calculate_normals_gather_from_loops), dim3(blocks), dim3(threads), 0, 0, vertices.data().get(),
half_edges.data().get(), loops.data().get(), face_normals.data().get(),normals.data().get(), vertices.size());
hipEventRecord(cu_stop);
hipEventSynchronize(cu_stop);
//hipDeviceSynchronize();
pstop = std::chrono::steady_clock::now();
timing.kernel_execution_time_b = cuda_elapsed_time(cu_start, cu_stop);
timing.processing_time = std::chrono::duration_cast<std::chrono::nanoseconds>(pstop - pstart).count();
hipEventDestroy(cu_start);
hipEventDestroy(cu_stop);
start = std::chrono::steady_clock::now();//download time
thrust::copy(normals.begin(), normals.end(), mesh->normals.begin());
stop = std::chrono::steady_clock::now();
timing.data_download_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
void normals_he_cuda(HalfedgeMesh* mesh, int threads,int blocks, timing_struct& timing) {
mesh->normals.resize(mesh->vertices.size()); //prepare vector for normals
if (threads == 0) optimal_configuration(blocks, threads, kernel_calculate_normals_gather);
timing.block_size = threads;
timing.grid_size = blocks;
std::chrono::steady_clock::time_point start, stop, pstart, pstop;
start = std::chrono::steady_clock::now(); //upload time
thrust::device_vector<HalfEdge> half_edges(mesh->half_edges.size());
thrust::device_vector<Vertex> vertices(mesh->vertices.size());
thrust::device_vector<Loop> loops(mesh->loops.size());
thrust::device_vector<float3> normals(mesh->vertices.size());
hipMemcpyAsync(half_edges.data().get(), mesh->half_edges.data(), mesh->half_edges.size() * sizeof(HalfEdge), hipMemcpyHostToDevice);
hipMemcpyAsync(vertices.data().get(), mesh->vertices.data(), mesh->vertices.size() * sizeof(Vertex), hipMemcpyHostToDevice);
hipMemcpyAsync(loops.data().get(), mesh->loops.data(), mesh->loops.size() * sizeof(Loop), hipMemcpyHostToDevice);
hipDeviceSynchronize();
stop = std::chrono::steady_clock::now();
timing.data_upload_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
//setup timer
hipEvent_t cu_start, cu_stop;
hipEventCreate(&cu_start);
hipEventCreate(&cu_stop);
//kernel launch
pstart = std::chrono::steady_clock::now();
hipEventRecord(cu_start);
hipLaunchKernelGGL(( kernel_calculate_normals_gather), dim3(blocks),dim3(threads), 0, 0, vertices.data().get(),
half_edges.data().get(),loops.data().get(), normals.data().get(), vertices.size());
hipEventRecord(cu_stop);
hipEventSynchronize(cu_stop);
//hipDeviceSynchronize();
pstop = std::chrono::steady_clock::now();
timing.kernel_execution_time_a = cuda_elapsed_time(cu_start, cu_stop);
timing.processing_time = std::chrono::duration_cast<std::chrono::nanoseconds>(pstop - pstart).count();
hipEventDestroy(cu_start);
hipEventDestroy(cu_stop);
start = std::chrono::steady_clock::now();//download time
thrust::copy(normals.begin(), normals.end(), mesh->normals.begin());
stop = std::chrono::steady_clock::now();
timing.data_download_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
/// normals from a simple mesh
void normals_sm_cuda(SimpleMesh* mesh,int threads,int blocks, timing_struct& timing) {
mesh->normals.resize(mesh->positions.size());
if (threads == 0) optimal_configuration(blocks, threads, kernel_calculate_normals_scatter);
timing.block_size = threads;
timing.grid_size = blocks;
std::chrono::steady_clock::time_point start, stop, pstart, pstop;
start = std::chrono::steady_clock::now(); //upload time
thrust::device_vector<float3> positions(mesh->positions.size());
thrust::device_vector<int> faces(mesh->face_starts.size());
thrust::device_vector<int> face_indices(mesh->faces.size());
thrust::device_vector<float3> normals(mesh->positions.size());
hipMemcpyAsync(positions.data().get(), mesh->positions.data(), mesh->positions.size() * sizeof(float3), hipMemcpyHostToDevice);
hipMemcpyAsync(faces.data().get(), mesh->face_starts.data(), mesh->face_starts.size() * sizeof(int), hipMemcpyHostToDevice);
hipMemcpyAsync(face_indices.data().get(), mesh->faces.data(), mesh->faces.size() * sizeof(int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
stop = std::chrono::steady_clock::now();
timing.data_upload_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
hipEvent_t cu_start, cu_stop;
hipEventCreate(&cu_start);
hipEventCreate(&cu_stop);
//run kernel
pstart = std::chrono::steady_clock::now();
hipEventRecord(cu_start);
hipLaunchKernelGGL(( kernel_calculate_normals_scatter), dim3(blocks), dim3(threads), 0, 0, positions.data().get(), faces.data().get(), face_indices.data().get(), normals.data().get(), faces.size());
hipEventRecord(cu_stop);
hipEventSynchronize(cu_stop);
timing.kernel_execution_time_a = cuda_elapsed_time(cu_start, cu_stop);
//run secound kernel
hipEventRecord(cu_start);
hipLaunchKernelGGL(( kernel_normalize_vectors), dim3(blocks), dim3(threads), 0, 0, normals.data().get(),normals.size());
hipEventRecord(cu_stop);
hipEventSynchronize(cu_stop);
pstop = std::chrono::steady_clock::now();
timing.kernel_execution_time_b = cuda_elapsed_time(cu_start, cu_stop);
timing.processing_time = std::chrono::duration_cast<std::chrono::nanoseconds>(pstop - pstart).count();
hipEventDestroy(cu_start);
hipEventDestroy(cu_stop);
start = std::chrono::steady_clock::now();
//printf("CUDA error: %s\n", hipGetErrorString(hipGetLastError()));
thrust::copy(normals.begin(), normals.end(), mesh->normals.begin());
stop = std::chrono::steady_clock::now();
timing.data_download_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
void centroids_he_cuda(HalfedgeMesh* mesh, attribute_vector<float3>& centroids_array, int threads,int blocks, timing_struct& timing) {
centroids_array.resize(mesh->vertices.size());
if (threads == 0) optimal_configuration(blocks, threads, kernel_calculate_ring_centroids_gather);
timing.block_size = threads;
timing.grid_size = blocks;
std::chrono::steady_clock::time_point start, stop, pstart, pstop;
start = std::chrono::steady_clock::now();
thrust::device_vector<HalfEdge> half_edges(mesh->half_edges.size());
thrust::device_vector<Vertex> vertices(mesh->vertices.size());
thrust::device_vector<Loop> loops(mesh->loops.size());
thrust::device_vector<float3> centroids(mesh->vertices.size(), { 0,0,0 });
hipMemcpyAsync(half_edges.data().get(), mesh->half_edges.data(), mesh->half_edges.size() * sizeof(HalfEdge), hipMemcpyHostToDevice);
hipMemcpyAsync(vertices.data().get(), mesh->vertices.data(), mesh->vertices.size() * sizeof(Vertex), hipMemcpyHostToDevice);
hipMemcpyAsync(loops.data().get(), mesh->loops.data(), mesh->loops.size() * sizeof(Loop), hipMemcpyHostToDevice);
hipDeviceSynchronize();
stop = std::chrono::steady_clock::now();
timing.data_upload_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
//create events
hipEvent_t cu_start, cu_stop;
hipEventCreate(&cu_start);
hipEventCreate(&cu_stop);
//launch kernel
pstart = std::chrono::steady_clock::now();
hipEventRecord(cu_start);
hipLaunchKernelGGL(( kernel_calculate_ring_centroids_gather), dim3(blocks), dim3(threads), 0, 0, vertices.data().get(), half_edges.data().get(), centroids.data().get(), vertices.size());
hipEventRecord(cu_stop);
hipEventSynchronize(cu_stop);
pstop = std::chrono::steady_clock::now();
timing.kernel_execution_time_a = cuda_elapsed_time(cu_start, cu_stop);
timing.processing_time = std::chrono::duration_cast<std::chrono::nanoseconds>(pstop - pstart).count();
hipEventDestroy(cu_start);
hipEventDestroy(cu_stop);
//read back
start = std::chrono::steady_clock::now();
thrust::copy(centroids.begin(), centroids.end(), centroids_array.begin());
stop = std::chrono::steady_clock::now();
timing.data_download_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
void centroids_sm_cuda(SimpleMesh* mesh, attribute_vector<float3>& centroids_array, int threads,int blocks, timing_struct& timing) {
centroids_array.resize(mesh->positions.size());
if (threads == 0) optimal_configuration(blocks, threads, kernel_calculate_ring_centroids_scatter);
timing.block_size = threads;
timing.grid_size = blocks;
std::chrono::steady_clock::time_point start, stop, pstart, pstop;
//Data Upload Phase
start = std::chrono::steady_clock::now();
thrust::device_vector<float3> positions(mesh->positions.size());
thrust::device_vector<int> faces(mesh->face_starts.size());
thrust::device_vector<int> faces_indices(mesh->faces.size());
thrust::device_vector<float3> centroids(mesh->positions.size(), { 0,0,0 });
thrust::device_vector<int> neighbor_count(mesh->positions.size(), 0);
thrust::device_vector<pair<int, int>> edges(faces_indices.size() - 1, pair<int, int>(-1, -1));//max size == edgecount <= face_indices - 1
hipMemcpyAsync(positions.data().get(), mesh->positions.data(), mesh->positions.size() * sizeof(float3), hipMemcpyHostToDevice);
hipMemcpyAsync(faces.data().get(), mesh->face_starts.data(), mesh->face_starts.size() * sizeof(int), hipMemcpyHostToDevice);
hipMemcpyAsync(faces_indices.data().get(), mesh->faces.data(), mesh->faces.size() * sizeof(int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
stop = std::chrono::steady_clock::now();
timing.data_upload_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
//Processing phase
hipEvent_t cu_start, cu_stop;
hipEventCreate(&cu_start);
hipEventCreate(&cu_stop);
//prepare edge list
hipEventRecord(cu_start);
hipLaunchKernelGGL(( kernel_find_edges), dim3(blocks),dim3(threads), 0, 0, edges.data().get(), faces.data().get(), faces_indices.data().get(), faces.size());
hipEventRecord(cu_stop);
hipEventSynchronize(cu_stop);
timing.kernel_execution_time_prepare = cuda_elapsed_time(cu_start, cu_stop);
start = std::chrono::steady_clock::now();
thrust::sort(thrust::device,reinterpret_cast<size_t*>(edges.data().get()), reinterpret_cast<size_t*>(edges.data().get()+edges.size()));
stop = std::chrono::steady_clock::now();
timing.sorting_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
start = std::chrono::steady_clock::now();
auto last = thrust::unique(thrust::device,edges.begin(), edges.end());
//edges.resize(last-edges.begin());
stop = std::chrono::steady_clock::now();
timing.unique_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
//run kernel
pstart = std::chrono::steady_clock::now();
hipEventRecord(cu_start);
//kernel_calculate_ring_centroids_scatter_no_borders<<<blocks, threads>>>(positions.data().get(), faces.data().get(),
// faces_indices.data().get(), faces_sizes.data().get(), centroids.data().get(),neighbor_count.data().get(), faces.size());
hipLaunchKernelGGL(( kernel_calculate_ring_centroids_scatter), dim3(blocks), dim3(threads), 0, 0, positions.data().get(),edges.data().get(), centroids.data().get(),neighbor_count.data().get(), last - edges.begin());
hipEventRecord(cu_stop);
hipEventSynchronize(cu_stop);
timing.kernel_execution_time_a = cuda_elapsed_time(cu_start, cu_stop);
//divide
hipEventRecord(cu_start);
hipLaunchKernelGGL(( kernel_divide), dim3(blocks), dim3(threads), 0, 0, centroids.data().get(), neighbor_count.data().get(), centroids.size());
hipEventRecord(cu_stop);
hipEventSynchronize(cu_stop);
pstop = std::chrono::steady_clock::now();
timing.kernel_execution_time_b = cuda_elapsed_time(cu_start, cu_stop);
timing.processing_time = std::chrono::duration_cast<std::chrono::nanoseconds>(pstop - pstart).count();
hipEventDestroy(cu_start);
hipEventDestroy(cu_stop);
//copy back phase
start = std::chrono::steady_clock::now();
thrust::copy(centroids.begin(), centroids.end(), centroids_array.begin());
stop = std::chrono::steady_clock::now();
timing.data_download_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
//causes the cuda driver to load to prevent loads on mesuring
void prepare_device() {
hipLaunchKernelGGL(( kernel_train), dim3(1),dim3(256), 0, 0, );
hipDeviceSynchronize();
}
}
| 2668a920c85eb5a3ba2184085dfe624391fbfb91.cu | #include <cuda_mesh_operations.hpp>
#include <cuda_util.cuh>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/execution_policy.h>
#include <device_launch_parameters.h>
#include <chrono>
#include <timing_struct.hpp>
#include <iomanip>
using namespace thrust;
namespace ab {
__device__ int thread_offset(){
return blockIdx.x * blockDim.x + threadIdx.x;
}
__device__ int thread_stride(){
return blockDim.x * gridDim.x;
}
__global__ void cuda_sqrtf(float* a) {
*a = sqrtf(*a);
}
__global__ void kernel_train() {
}
__global__ void kernel_normalize_vectors(float3* vec,unsigned size){
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < size; i += stride) {
vec[i] = normalized(vec[i]);
}
}
__global__ void kernel_divide(float3* vec,float* div,unsigned vec_size){
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < vec_size; i += stride) {
float fdiv = 1.f/div[i];
vec[i].x *= fdiv;
vec[i].y *= fdiv;
vec[i].z *= fdiv;
}
}
__global__ void kernel_divide(float3* vec, int* div, unsigned vec_size) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < vec_size; i += stride) {
float fdiv = 1.f/static_cast<float>(div[i]);
vec[i].x *= fdiv;
vec[i].y *= fdiv;
vec[i].z *= fdiv;
}
}
__global__ void kernel_calculate_normals_scatter(float3* positions,int* faces,int* face_indices, float3* normals, int face_count) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < face_count-1; i += stride) {
int base_index = faces[i];
int next_face = faces[i+1];
float3 point_a = positions[face_indices[next_face-1]];
float3 point_b = positions[face_indices[base_index]];
float3 point_c = positions[face_indices[base_index+1]];
float3 edge_vector_ab = point_b-point_a;
float3 edge_vector_bc = point_c-point_b;
float3 normal{ 0.f,0.f,0.f };
//assume planar polygon
normal += normalized(cross3df(edge_vector_ab, edge_vector_bc));
//add to every vertice in the face
for (int j = 0;j< next_face-base_index;++j){
float3* vn = &normals[face_indices[base_index+j]];
atomicAdd(&vn->x, normal.x);
atomicAdd(&vn->y, normal.y);
atomicAdd(&vn->z, normal.z);
}
}
}
//does it only once
__global__ void kernel_calculate_face_normals_gather(Vertex* vertices, ReducedHalfEdge* half_edges, Loop* loops, float3* normals, unsigned loops_count) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < loops_count; i += stride) {
if (loops[i].is_border) {
continue;
}
ReducedHalfEdge halfedge_a = half_edges[loops[i].he];
ReducedHalfEdge halfedge_b = half_edges[halfedge_a.next];
ReducedHalfEdge halfedge_c = half_edges[halfedge_b.next];
float3 b = vertices[halfedge_b.origin].position;
normals[i] = normalized(cross3df(b-vertices[halfedge_a.origin].position, vertices[halfedge_c.origin].position)-b);
}
}
__global__ void kernel_calculate_normals_gather_from_loops(Vertex* vertices, ReducedHalfEdge* half_edges, Loop* loops, float3* face_normals, float3* normals, unsigned vertice_count) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < vertice_count; i += stride) {
const auto& vert = vertices[i];
if (vert.he == -1) {
continue;
}
float3 normal{ 0.f,0.f,0.f };
int he = vert.he;
do {//for every neighbor
const ReducedHalfEdge& halfedge = half_edges[he];
//skip boundary loops
if (loops[halfedge.loop].is_border) {
he = half_edges[halfedge.inv].next;
continue;
}
normal += face_normals[halfedge.loop];
he = half_edges[halfedge.inv].next;
} while (he != vert.he);
normals[i] = normalized(normal);
}
}
//recalculates normals for every face
__global__ void kernel_calculate_normals_gather(Vertex* vertices, HalfEdge* half_edges,Loop* loops, float3* normals, unsigned loops_count) {
int stride = thread_stride();
int offset = thread_offset();
//calculate normals
for (int i = offset; i < loops_count; i+=stride) {
auto& vert = vertices[i];
if (vert.he == -1) {
continue;
}
float3 normal{ 0.f,0.f,0.f };
int he = vert.he;
do {//for every neighbor
HalfEdge& halfedge = half_edges[he];
//skip boundary loops
if (loops[halfedge.loop].is_border) {
he = half_edges[halfedge.inv].next;
continue;
}
float3 point_c = vertices[half_edges[halfedge.inv].origin].position;
float3 point_a = vertices[half_edges[halfedge.prev].origin].position;
//float3 point_b = vert.position;
float3 edge_vector_ab = vert.position - point_a;
float3 edge_vector_bc = point_c - vert.position;
normal += normalized(cross3df(edge_vector_ab, edge_vector_bc));
he = half_edges[halfedge.inv].next;
} while (he != vert.he);
normals[i] = normalized(normal);
}
}
__global__ void kernel_calculate_ring_centroids_gather(Vertex* vertices, HalfEdge* half_edges, float3* centroids, unsigned vertice_count) {
int stride = thread_stride();
int offset = thread_offset();
//calculate centroids
for (int i = offset; i < vertice_count; i += stride) {
auto& vert = vertices[i];
//check for orphaned vertices
if (vert.he == -1) {
continue;
}
float3 centroid;
centroid.x = 0.f;
centroid.y = 0.f;
centroid.z = 0.f;
int he = vert.he;
unsigned neighbors = 0;
do {//for every neighbor
HalfEdge& halfedge = half_edges[he];
HalfEdge& inv_halfedge = half_edges[halfedge.inv];
centroid += vertices[inv_halfedge.origin].position;
++neighbors;
he = inv_halfedge.next;
} while (he != vert.he);
centroid.x /= neighbors;
centroid.y /= neighbors;
centroid.z /= neighbors;
centroids[i] = centroid;
}
}
__global__ void kernel_calculate_ring_centroids_scatter_no_borders(float3* positions, int* faces, int* face_indices, int* face_sizes, float3* centroids, int* duped_neighbor_counts, int face_count) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < face_count; i += stride) {
int base_index = faces[i];
int face_size = face_sizes[i];
//circulate trough the face and add it to the centroids
for (int j = 0; j < face_size; ++j) {
float3 next = positions[face_indices[base_index + ((j+1) % face_size)]];
//float3 prev = positions[face_indices[base_index + ((j-1) % face_size)]];
float3* centroid = centroids+face_indices[base_index+j];
int* neighbor_count = duped_neighbor_counts+face_indices[base_index+j];
atomicAdd(¢roid->x, next.x);
atomicAdd(¢roid->y, next.y);
atomicAdd(¢roid->z, next.z);
atomicAdd(neighbor_count, 1);
}
}
}
__global__ void kernel_calculate_ring_centroids_scatter(float3* positions, pair<int,int>* edges, float3* centroids, int* neighbor_counts, int edge_count) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i < edge_count; i += stride) {
pair<int, int> edge = edges[i];
if (edge.first > -1) {
float3* centroid_a = centroids + edge.first;
float3* centroid_b = centroids + edge.second;
float3 pa = positions[edge.first];
float3 pb = positions[edge.second];
atomicAdd(¢roid_a->x, pb.x);
atomicAdd(¢roid_a->y, pb.y);
atomicAdd(¢roid_a->z, pb.z);
atomicAdd(¢roid_b->x, pa.x);
atomicAdd(¢roid_b->y, pa.y);
atomicAdd(¢roid_b->z, pa.z);
atomicAdd(neighbor_counts + edge.first, 1);
atomicAdd(neighbor_counts + edge.second, 1);
}
}
}
__global__ void kernel_find_edges(pair<int,int>* pairs, int* faces, int* face_indices,int face_count) {
int stride = thread_stride();
int offset = thread_offset();
for (int i = offset; i+1<face_count; i += stride) {
int face_start = faces[i];
int face_end = faces[i + 1];
for (int j = face_start; j < face_end; ++j) {
int first, second;
//check for edge
if (face_end == j + 1) {
second = face_indices[face_start];
first = face_indices[j];
}
else {
first = face_indices[j];
second = face_indices[j + 1];
}
if (first > second) {
pairs[j] = pair<int, int>(second, first);
}
else {
pairs[j] = pair<int, int>(first, second);
}
}
}
}
struct PairLessThan {
__device__ __host__ bool operator()(const pair<int, int>& a, const pair<int, int>& b) {
return a.first < b.first || (a.first == b.first && a.second < b.second);
}
};
void normals_he_cuda_twopass_broken(HalfedgeMesh* mesh, int threads, int blocks, timing_struct& timing) {
mesh->normals.resize(mesh->vertices.size()); //prepare vector for normals
create_reduced_halfedges(*mesh);
if (threads == 0) optimal_configuration(blocks, threads, kernel_calculate_normals_gather);
timing.block_size = threads;
timing.grid_size = blocks;
std::chrono::steady_clock::time_point start, stop, pstart, pstop;
start = std::chrono::steady_clock::now(); //upload time
thrust::device_vector<ReducedHalfEdge> half_edges(mesh->reduced_half_edges.size());
thrust::device_vector<Vertex> vertices(mesh->vertices.size());
thrust::device_vector<Loop> loops(mesh->loops.size());
thrust::device_vector<float3> normals(mesh->vertices.size());
thrust::device_vector<float3> face_normals(loops.size(), { 0.f,0.f,0.f });
cudaMemcpyAsync(half_edges.data().get(), mesh->reduced_half_edges.data(), mesh->reduced_half_edges.size() * sizeof(HalfEdge), cudaMemcpyHostToDevice);
cudaMemcpyAsync(vertices.data().get(), mesh->vertices.data(), mesh->vertices.size() * sizeof(Vertex), cudaMemcpyHostToDevice);
cudaMemcpyAsync(loops.data().get(), mesh->loops.data(), mesh->loops.size() * sizeof(Loop), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stop = std::chrono::steady_clock::now();
timing.data_upload_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
//setup timer
cudaEvent_t cu_start, cu_stop;
cudaEventCreate(&cu_start);
cudaEventCreate(&cu_stop);
//kernel launch
pstart = std::chrono::steady_clock::now();
cudaEventRecord(cu_start);
kernel_calculate_face_normals_gather<<<blocks, threads>>>(vertices.data().get(),
half_edges.data().get(), loops.data().get(), face_normals.data().get(), loops.size());
cudaEventRecord(cu_stop);
cudaEventSynchronize(cu_stop);
//cudaDeviceSynchronize();
timing.kernel_execution_time_a = cuda_elapsed_time(cu_start, cu_stop);
cudaEventRecord(cu_start);
kernel_calculate_normals_gather_from_loops<<<blocks, threads>>>(vertices.data().get(),
half_edges.data().get(), loops.data().get(), face_normals.data().get(),normals.data().get(), vertices.size());
cudaEventRecord(cu_stop);
cudaEventSynchronize(cu_stop);
//cudaDeviceSynchronize();
pstop = std::chrono::steady_clock::now();
timing.kernel_execution_time_b = cuda_elapsed_time(cu_start, cu_stop);
timing.processing_time = std::chrono::duration_cast<std::chrono::nanoseconds>(pstop - pstart).count();
cudaEventDestroy(cu_start);
cudaEventDestroy(cu_stop);
start = std::chrono::steady_clock::now();//download time
thrust::copy(normals.begin(), normals.end(), mesh->normals.begin());
stop = std::chrono::steady_clock::now();
timing.data_download_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
void normals_he_cuda(HalfedgeMesh* mesh, int threads,int blocks, timing_struct& timing) {
mesh->normals.resize(mesh->vertices.size()); //prepare vector for normals
if (threads == 0) optimal_configuration(blocks, threads, kernel_calculate_normals_gather);
timing.block_size = threads;
timing.grid_size = blocks;
std::chrono::steady_clock::time_point start, stop, pstart, pstop;
start = std::chrono::steady_clock::now(); //upload time
thrust::device_vector<HalfEdge> half_edges(mesh->half_edges.size());
thrust::device_vector<Vertex> vertices(mesh->vertices.size());
thrust::device_vector<Loop> loops(mesh->loops.size());
thrust::device_vector<float3> normals(mesh->vertices.size());
cudaMemcpyAsync(half_edges.data().get(), mesh->half_edges.data(), mesh->half_edges.size() * sizeof(HalfEdge), cudaMemcpyHostToDevice);
cudaMemcpyAsync(vertices.data().get(), mesh->vertices.data(), mesh->vertices.size() * sizeof(Vertex), cudaMemcpyHostToDevice);
cudaMemcpyAsync(loops.data().get(), mesh->loops.data(), mesh->loops.size() * sizeof(Loop), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stop = std::chrono::steady_clock::now();
timing.data_upload_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
//setup timer
cudaEvent_t cu_start, cu_stop;
cudaEventCreate(&cu_start);
cudaEventCreate(&cu_stop);
//kernel launch
pstart = std::chrono::steady_clock::now();
cudaEventRecord(cu_start);
kernel_calculate_normals_gather<<<blocks,threads>>>(vertices.data().get(),
half_edges.data().get(),loops.data().get(), normals.data().get(), vertices.size());
cudaEventRecord(cu_stop);
cudaEventSynchronize(cu_stop);
//cudaDeviceSynchronize();
pstop = std::chrono::steady_clock::now();
timing.kernel_execution_time_a = cuda_elapsed_time(cu_start, cu_stop);
timing.processing_time = std::chrono::duration_cast<std::chrono::nanoseconds>(pstop - pstart).count();
cudaEventDestroy(cu_start);
cudaEventDestroy(cu_stop);
start = std::chrono::steady_clock::now();//download time
thrust::copy(normals.begin(), normals.end(), mesh->normals.begin());
stop = std::chrono::steady_clock::now();
timing.data_download_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
/// normals from a simple mesh
void normals_sm_cuda(SimpleMesh* mesh,int threads,int blocks, timing_struct& timing) {
mesh->normals.resize(mesh->positions.size());
if (threads == 0) optimal_configuration(blocks, threads, kernel_calculate_normals_scatter);
timing.block_size = threads;
timing.grid_size = blocks;
std::chrono::steady_clock::time_point start, stop, pstart, pstop;
start = std::chrono::steady_clock::now(); //upload time
thrust::device_vector<float3> positions(mesh->positions.size());
thrust::device_vector<int> faces(mesh->face_starts.size());
thrust::device_vector<int> face_indices(mesh->faces.size());
thrust::device_vector<float3> normals(mesh->positions.size());
cudaMemcpyAsync(positions.data().get(), mesh->positions.data(), mesh->positions.size() * sizeof(float3), cudaMemcpyHostToDevice);
cudaMemcpyAsync(faces.data().get(), mesh->face_starts.data(), mesh->face_starts.size() * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpyAsync(face_indices.data().get(), mesh->faces.data(), mesh->faces.size() * sizeof(int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stop = std::chrono::steady_clock::now();
timing.data_upload_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
cudaEvent_t cu_start, cu_stop;
cudaEventCreate(&cu_start);
cudaEventCreate(&cu_stop);
//run kernel
pstart = std::chrono::steady_clock::now();
cudaEventRecord(cu_start);
kernel_calculate_normals_scatter<<<blocks, threads>>>(positions.data().get(), faces.data().get(), face_indices.data().get(), normals.data().get(), faces.size());
cudaEventRecord(cu_stop);
cudaEventSynchronize(cu_stop);
timing.kernel_execution_time_a = cuda_elapsed_time(cu_start, cu_stop);
//run secound kernel
cudaEventRecord(cu_start);
kernel_normalize_vectors<<<blocks, threads>>>(normals.data().get(),normals.size());
cudaEventRecord(cu_stop);
cudaEventSynchronize(cu_stop);
pstop = std::chrono::steady_clock::now();
timing.kernel_execution_time_b = cuda_elapsed_time(cu_start, cu_stop);
timing.processing_time = std::chrono::duration_cast<std::chrono::nanoseconds>(pstop - pstart).count();
cudaEventDestroy(cu_start);
cudaEventDestroy(cu_stop);
start = std::chrono::steady_clock::now();
//printf("CUDA error: %s\n", cudaGetErrorString(cudaGetLastError()));
thrust::copy(normals.begin(), normals.end(), mesh->normals.begin());
stop = std::chrono::steady_clock::now();
timing.data_download_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
void centroids_he_cuda(HalfedgeMesh* mesh, attribute_vector<float3>& centroids_array, int threads,int blocks, timing_struct& timing) {
centroids_array.resize(mesh->vertices.size());
if (threads == 0) optimal_configuration(blocks, threads, kernel_calculate_ring_centroids_gather);
timing.block_size = threads;
timing.grid_size = blocks;
std::chrono::steady_clock::time_point start, stop, pstart, pstop;
start = std::chrono::steady_clock::now();
thrust::device_vector<HalfEdge> half_edges(mesh->half_edges.size());
thrust::device_vector<Vertex> vertices(mesh->vertices.size());
thrust::device_vector<Loop> loops(mesh->loops.size());
thrust::device_vector<float3> centroids(mesh->vertices.size(), { 0,0,0 });
cudaMemcpyAsync(half_edges.data().get(), mesh->half_edges.data(), mesh->half_edges.size() * sizeof(HalfEdge), cudaMemcpyHostToDevice);
cudaMemcpyAsync(vertices.data().get(), mesh->vertices.data(), mesh->vertices.size() * sizeof(Vertex), cudaMemcpyHostToDevice);
cudaMemcpyAsync(loops.data().get(), mesh->loops.data(), mesh->loops.size() * sizeof(Loop), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stop = std::chrono::steady_clock::now();
timing.data_upload_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
//create events
cudaEvent_t cu_start, cu_stop;
cudaEventCreate(&cu_start);
cudaEventCreate(&cu_stop);
//launch kernel
pstart = std::chrono::steady_clock::now();
cudaEventRecord(cu_start);
kernel_calculate_ring_centroids_gather<<<blocks, threads>>>(vertices.data().get(), half_edges.data().get(), centroids.data().get(), vertices.size());
cudaEventRecord(cu_stop);
cudaEventSynchronize(cu_stop);
pstop = std::chrono::steady_clock::now();
timing.kernel_execution_time_a = cuda_elapsed_time(cu_start, cu_stop);
timing.processing_time = std::chrono::duration_cast<std::chrono::nanoseconds>(pstop - pstart).count();
cudaEventDestroy(cu_start);
cudaEventDestroy(cu_stop);
//read back
start = std::chrono::steady_clock::now();
thrust::copy(centroids.begin(), centroids.end(), centroids_array.begin());
stop = std::chrono::steady_clock::now();
timing.data_download_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
void centroids_sm_cuda(SimpleMesh* mesh, attribute_vector<float3>& centroids_array, int threads,int blocks, timing_struct& timing) {
centroids_array.resize(mesh->positions.size());
if (threads == 0) optimal_configuration(blocks, threads, kernel_calculate_ring_centroids_scatter);
timing.block_size = threads;
timing.grid_size = blocks;
std::chrono::steady_clock::time_point start, stop, pstart, pstop;
//Data Upload Phase
start = std::chrono::steady_clock::now();
thrust::device_vector<float3> positions(mesh->positions.size());
thrust::device_vector<int> faces(mesh->face_starts.size());
thrust::device_vector<int> faces_indices(mesh->faces.size());
thrust::device_vector<float3> centroids(mesh->positions.size(), { 0,0,0 });
thrust::device_vector<int> neighbor_count(mesh->positions.size(), 0);
thrust::device_vector<pair<int, int>> edges(faces_indices.size() - 1, pair<int, int>(-1, -1));//max size == edgecount <= face_indices - 1
cudaMemcpyAsync(positions.data().get(), mesh->positions.data(), mesh->positions.size() * sizeof(float3), cudaMemcpyHostToDevice);
cudaMemcpyAsync(faces.data().get(), mesh->face_starts.data(), mesh->face_starts.size() * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpyAsync(faces_indices.data().get(), mesh->faces.data(), mesh->faces.size() * sizeof(int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stop = std::chrono::steady_clock::now();
timing.data_upload_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
//Processing phase
cudaEvent_t cu_start, cu_stop;
cudaEventCreate(&cu_start);
cudaEventCreate(&cu_stop);
//prepare edge list
cudaEventRecord(cu_start);
kernel_find_edges<<<blocks,threads>>>(edges.data().get(), faces.data().get(), faces_indices.data().get(), faces.size());
cudaEventRecord(cu_stop);
cudaEventSynchronize(cu_stop);
timing.kernel_execution_time_prepare = cuda_elapsed_time(cu_start, cu_stop);
start = std::chrono::steady_clock::now();
thrust::sort(thrust::device,reinterpret_cast<size_t*>(edges.data().get()), reinterpret_cast<size_t*>(edges.data().get()+edges.size()));
stop = std::chrono::steady_clock::now();
timing.sorting_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
start = std::chrono::steady_clock::now();
auto last = thrust::unique(thrust::device,edges.begin(), edges.end());
//edges.resize(last-edges.begin());
stop = std::chrono::steady_clock::now();
timing.unique_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
//run kernel
pstart = std::chrono::steady_clock::now();
cudaEventRecord(cu_start);
//kernel_calculate_ring_centroids_scatter_no_borders<<<blocks, threads>>>(positions.data().get(), faces.data().get(),
// faces_indices.data().get(), faces_sizes.data().get(), centroids.data().get(),neighbor_count.data().get(), faces.size());
kernel_calculate_ring_centroids_scatter<<<blocks, threads>>>(positions.data().get(),edges.data().get(), centroids.data().get(),neighbor_count.data().get(), last - edges.begin());
cudaEventRecord(cu_stop);
cudaEventSynchronize(cu_stop);
timing.kernel_execution_time_a = cuda_elapsed_time(cu_start, cu_stop);
//divide
cudaEventRecord(cu_start);
kernel_divide<<<blocks, threads>>>(centroids.data().get(), neighbor_count.data().get(), centroids.size());
cudaEventRecord(cu_stop);
cudaEventSynchronize(cu_stop);
pstop = std::chrono::steady_clock::now();
timing.kernel_execution_time_b = cuda_elapsed_time(cu_start, cu_stop);
timing.processing_time = std::chrono::duration_cast<std::chrono::nanoseconds>(pstop - pstart).count();
cudaEventDestroy(cu_start);
cudaEventDestroy(cu_stop);
//copy back phase
start = std::chrono::steady_clock::now();
thrust::copy(centroids.begin(), centroids.end(), centroids_array.begin());
stop = std::chrono::steady_clock::now();
timing.data_download_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
//causes the cuda driver to load to prevent loads on mesuring
void prepare_device() {
kernel_train<<<1,256>>>();
cudaDeviceSynchronize();
}
}
|
b3a24d2db70d35a5d1b17e4716c44af9b95cedcf.hip | // !!! This is a file automatically generated by hipify!!!
#include "SequenceVisitor.cuh"
#include "SciFiRawBankDecoderV4.cuh"
DEFINE_EMPTY_SET_ARGUMENTS_SIZE(scifi_raw_bank_decoder_v4_t)
template<>
void SequenceVisitor::visit<scifi_raw_bank_decoder_v4_t>(
scifi_raw_bank_decoder_v4_t& state,
const scifi_raw_bank_decoder_v4_t::arguments_t& arguments,
const RuntimeOptions& runtime_options,
const Constants& constants,
HostBuffers& host_buffers,
hipStream_t& cuda_stream,
hipEvent_t& cuda_generic_event)
{
state.set_opts(dim3(host_buffers.host_number_of_selected_events[0]), dim3(256), cuda_stream);
state.set_arguments(
arguments.offset<dev_scifi_raw_input>(),
arguments.offset<dev_scifi_raw_input_offsets>(),
arguments.offset<dev_scifi_hit_count>(),
arguments.offset<dev_scifi_hits>(),
arguments.offset<dev_event_list>(),
constants.dev_scifi_geometry,
constants.dev_inv_clus_res);
state.invoke();
}
| b3a24d2db70d35a5d1b17e4716c44af9b95cedcf.cu | #include "SequenceVisitor.cuh"
#include "SciFiRawBankDecoderV4.cuh"
DEFINE_EMPTY_SET_ARGUMENTS_SIZE(scifi_raw_bank_decoder_v4_t)
template<>
void SequenceVisitor::visit<scifi_raw_bank_decoder_v4_t>(
scifi_raw_bank_decoder_v4_t& state,
const scifi_raw_bank_decoder_v4_t::arguments_t& arguments,
const RuntimeOptions& runtime_options,
const Constants& constants,
HostBuffers& host_buffers,
cudaStream_t& cuda_stream,
cudaEvent_t& cuda_generic_event)
{
state.set_opts(dim3(host_buffers.host_number_of_selected_events[0]), dim3(256), cuda_stream);
state.set_arguments(
arguments.offset<dev_scifi_raw_input>(),
arguments.offset<dev_scifi_raw_input_offsets>(),
arguments.offset<dev_scifi_hit_count>(),
arguments.offset<dev_scifi_hits>(),
arguments.offset<dev_event_list>(),
constants.dev_scifi_geometry,
constants.dev_inv_clus_res);
state.invoke();
}
|
e9e12312c971c8489528c420f3f695a8db8408c1.hip | // !!! This is a file automatically generated by hipify!!!
/*! \file StackResampler.cu
\brief Contains definition of CUDA implementation of a class for spline resampling of irregularly sampled columns in the z-direction
\author Jesper Andersson
\version 1.0b, March, 2016.
*/
//
// StackResampler.cu
//
// Jesper Andersson, FMRIB Image Analysis Group
//
// Copyright (C) 2016 University of Oxford
//
#include <cstdlib>
#include <string>
#include <vector>
#include <cmath>
#include <hip/hip_runtime.h>
#include <thrust/system_error.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
#include <thrust/fill.h>
#pragma push
#pragma diag_suppress = code_is_unreachable
#include "newmat.h"
#include "newimage/newimage.h"
#include "EddyHelperClasses.h"
#include "CudaVolume.h"
#pragma pop
#include "StackResampler.h"
#include "EddyKernels.h"
#include "EddyMatrixKernels.h"
#include "EddyFunctors.h"
namespace EDDY {
const dim3 StackResampler::_threads_per_block_WtW_StS = dim3(16,16);
StackResampler::StackResampler(const EDDY::CudaVolume& stack,
const EDDY::CudaVolume& zcoord,
const EDDY::CudaVolume& pred,
const EDDY::CudaVolume& mask,
double lambda) EddyTry : _resvol(stack,false), _mask(stack,false)
{
unsigned int matsz = stack.Size(2);
unsigned int nmat = stack.Size(0);
thrust::device_vector<float> StS_matrix(sqr(matsz));
thrust::device_vector<float> empty_StS_matrix(sqr(matsz));
thrust::device_vector<float> gpu_W_matrix(sqr(matsz));
thrust::device_vector<float> gpu_sorted_zcoord(zcoord.Size());
thrust::device_vector<float> gpu_Wir_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_weights(nmat*matsz);
thrust::device_vector<float> gpu_diagw_p_vectors(nmat*matsz);
thrust::device_vector<float> gpu_diagw_W_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_Wirty_vectors(nmat*matsz);
thrust::device_vector<float> gpu_dWtdp_vectors(nmat*matsz);
thrust::device_vector<float> gpu_WtW_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_dWtdW_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_sum_vectors(nmat*matsz);
thrust::device_vector<float> gpu_sum_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_c_hat_vectors(nmat*matsz);
thrust::device_vector<float> gpu_y_hat_vectors(nmat*matsz);
get_StS(matsz,lambda,StS_matrix);
get_StS(matsz,0.0,empty_StS_matrix);
get_regular_W(matsz,gpu_W_matrix);
make_mask(mask,zcoord,true,_mask);
sort_zcoords(zcoord,true,gpu_sorted_zcoord);
for (unsigned int j=0; j<stack.Size(1); j++) {
make_Wir_matrices(zcoord,j,true,gpu_Wir_matrices);
make_weight_vectors(gpu_sorted_zcoord,stack.Size(0),stack.Size(2),j,true,gpu_weights);
make_diagw_p_vectors(pred,gpu_weights,j,true,gpu_diagw_p_vectors);
make_diagw_W_matrices(gpu_weights,gpu_W_matrix,matsz,nmat,true,gpu_diagw_W_matrices);
make_Wir_t_y_vectors(stack,gpu_Wir_matrices,j,true,gpu_Wirty_vectors);
make_dwWt_dwp_vectors(gpu_diagw_W_matrices,gpu_diagw_p_vectors,matsz,nmat,true,gpu_dWtdp_vectors);
make_WtW_StS_matrices(gpu_Wir_matrices,matsz,nmat,StS_matrix,true,gpu_WtW_matrices);
make_WtW_StS_matrices(gpu_diagw_W_matrices,matsz,nmat,empty_StS_matrix,true,gpu_dWtdW_matrices);
thrust::transform(gpu_Wirty_vectors.begin(),gpu_Wirty_vectors.end(),gpu_dWtdp_vectors.begin(),gpu_sum_vectors.begin(),thrust::plus<float>());
thrust::transform(gpu_WtW_matrices.begin(),gpu_WtW_matrices.end(),gpu_dWtdW_matrices.begin(),gpu_sum_matrices.begin(),thrust::plus<float>());
solve_for_c_hat(gpu_sum_matrices,gpu_sum_vectors,matsz,nmat,true,gpu_c_hat_vectors);
make_y_hat_vectors(gpu_W_matrix,gpu_c_hat_vectors,matsz,nmat,true,gpu_y_hat_vectors);
transfer_y_hat_to_volume(gpu_y_hat_vectors,j,true,_resvol);
}
} EddyCatch
StackResampler::StackResampler(const EDDY::CudaVolume& stack,
const EDDY::CudaVolume& zcoord,
const EDDY::CudaVolume& mask,
NEWIMAGE::interpolation interp,
double lambda) EddyTry : _resvol(stack,false), _mask(stack,false)
{
make_mask(mask,zcoord,true,_mask);
if (interp == NEWIMAGE::spline) {
unsigned int matsz = stack.Size(2);
unsigned int nmat = stack.Size(0);
thrust::device_vector<float> gpu_StS_matrix(sqr(matsz));
thrust::device_vector<float> gpu_W_matrix(sqr(matsz));
thrust::device_vector<float> gpu_Wir_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_WtW_StS_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_Wirty_vectors(nmat*matsz);
thrust::device_vector<float> gpu_c_hat_vectors(nmat*matsz);
thrust::device_vector<float> gpu_y_hat_vectors(nmat*matsz);
get_StS(matsz,lambda,gpu_StS_matrix);
get_regular_W(matsz,gpu_W_matrix);
for (unsigned int j=0; j<stack.Size(1); j++) {
make_Wir_matrices(zcoord,j,true,gpu_Wir_matrices);
make_WtW_StS_matrices(gpu_Wir_matrices,matsz,nmat,gpu_StS_matrix,true,gpu_WtW_StS_matrices);
make_Wir_t_y_vectors(stack,gpu_Wir_matrices,j,true,gpu_Wirty_vectors);
solve_for_c_hat(gpu_WtW_StS_matrices,gpu_Wirty_vectors,matsz,nmat,true,gpu_c_hat_vectors);
make_y_hat_vectors(gpu_W_matrix,gpu_c_hat_vectors,matsz,nmat,true,gpu_y_hat_vectors);
transfer_y_hat_to_volume(gpu_y_hat_vectors,j,true,_resvol);
}
}
else if (interp == NEWIMAGE::trilinear) {
thrust::device_vector<float> gpu_sorted_zcoord(zcoord.Size());
thrust::device_vector<float> gpu_sorted_intensities(zcoord.Size());
thrust::device_vector<float> gpu_interpolated_columns(zcoord.Size(),0.0);
sort_zcoords_and_intensities(zcoord,stack,true,gpu_sorted_zcoord,gpu_sorted_intensities);
linear_interpolate_columns(gpu_sorted_zcoord,gpu_sorted_intensities,zcoord.Size(0),zcoord.Size(1),zcoord.Size(2),true,gpu_interpolated_columns);
transfer_interpolated_columns_to_volume(gpu_interpolated_columns,true,_resvol);
}
else throw EddyException("StackResampler::StackResampler: Invalid interpolation method");
} EddyCatch
void StackResampler::get_StS(unsigned int sz, double lambda, thrust::device_vector<float>& StS) const EddyTry
{
float six = lambda * 6.0; float minusfour = - lambda * 4.0; float one = lambda;
thrust::host_vector<float> hStS(StS.size(),0.0);
hStS[rfindx(0,0,sz)] = six; hStS[rfindx(0,1,sz)] = minusfour; hStS[rfindx(0,2,sz)] = one; hStS[rfindx(0,(sz-2),sz)] = one; hStS[rfindx(0,(sz-1),sz)] = minusfour;
hStS[rfindx(1,0,sz)] = minusfour; hStS[rfindx(1,1,sz)] = six; hStS[rfindx(1,2,sz)] = minusfour; hStS[rfindx(1,3,sz)] = one; hStS[rfindx(1,(sz-1),sz)] = one;
for ( unsigned int i=2; i<(sz-2); i++) {
hStS[rfindx(i,i-2,sz)] = one;
hStS[rfindx(i,i-1,sz)] = minusfour;
hStS[rfindx(i,i,sz)] = six;
hStS[rfindx(i,i+1,sz)] = minusfour;
hStS[rfindx(i,i+2,sz)] = one;
}
hStS[rfindx((sz-2),0,sz)] = one; hStS[rfindx((sz-2),(sz-4),sz)] = one; hStS[rfindx((sz-2),(sz-3),sz)] = minusfour; hStS[rfindx((sz-2),(sz-2),sz)] = six; hStS[rfindx((sz-2),(sz-1),sz)] = minusfour;
hStS[rfindx((sz-1),0,sz)] = minusfour; hStS[rfindx((sz-1),1,sz)] = one; hStS[rfindx((sz-1),(sz-3),sz)] = one; hStS[rfindx((sz-1),(sz-2),sz)] = minusfour; hStS[rfindx((sz-1),(sz-1),sz)] = six;
StS = hStS;
return;
} EddyCatch
void StackResampler::get_regular_W(unsigned int sz, thrust::device_vector<float>& W) const EddyTry
{
thrust::host_vector<float> hW(W.size(),0.0);
hW[rfindx(0,0,sz)] = 5.0/6.0; hW[rfindx(0,1,sz)] = 1.0/6.0;
for ( unsigned int i=1; i<(sz-1); i++) {
hW[rfindx(i,i-1,sz)] = 1.0/6.0;
hW[rfindx(i,i,sz)] = 4.0/6.0;
hW[rfindx(i,i+1,sz)] = 1.0/6.0;
}
hW[rfindx((sz-1),(sz-2),sz)] = 1.0/6.0; hW[rfindx((sz-1),(sz-1),sz)] = 5.0/6.0;
W = hW;
return;
} EddyCatch
void StackResampler::make_mask(const EDDY::CudaVolume& inmask,
const EDDY::CudaVolume& zcoord,
bool sync,
EDDY::CudaVolume& omask) EddyTry
{
omask = 0.0;
int nblocks = inmask.Size(1);
int tpb = inmask.Size(0);
hipLaunchKernelGGL(( EddyKernels::make_mask_from_stack), dim3(nblocks),dim3(tpb), 0, 0, inmask.GetPtr(),zcoord.GetPtr(),inmask.Size(0),
inmask.Size(1),inmask.Size(2),omask.GetPtr());
if (sync) EddyKernels::CudaSync("EddyKernels::make_mask_from_stack");
} EddyCatch
void StackResampler::sort_zcoords(const EDDY::CudaVolume& zcoord,
bool sync,
thrust::device_vector<float>& szcoord) const EddyTry
{
thrust::device_vector<unsigned int> ns_flags(zcoord.Size(0)*zcoord.Size(1),0);
hipLaunchKernelGGL(( EddyKernels::TransferAndCheckSorting), dim3(zcoord.Size(1)),dim3(zcoord.Size(0)), 0, 0, zcoord.GetPtr(),zcoord.Size(0),
zcoord.Size(1),zcoord.Size(2),
thrust::raw_pointer_cast(szcoord.data()),
thrust::raw_pointer_cast(ns_flags.data()));
EddyKernels::CudaSync("EddyKernels::TransferAndCheckSorting");
unsigned int nnsort = thrust::reduce(ns_flags.begin(),ns_flags.end(),0,EDDY::Sum<unsigned int,unsigned int>());
if (nnsort) {
thrust::host_vector<unsigned int> host_ns_flags = ns_flags;
thrust::host_vector<unsigned int> host_nsort_indx(nnsort,0);
for (unsigned int i=0, n=0; i<zcoord.Size(0)*zcoord.Size(1); i++) {
if (host_ns_flags[i]) { host_nsort_indx[n] = i; n++; }
}
thrust::device_vector<unsigned int> device_nsort_indx = host_nsort_indx;
int nb = (nnsort / 32) + 1;
hipLaunchKernelGGL(( EddyKernels::SortVectors), dim3(nb),dim3(32), 0, 0, thrust::raw_pointer_cast(device_nsort_indx.data()),nnsort,
zcoord.Size(2),thrust::raw_pointer_cast(szcoord.data()),NULL);
if (sync) EddyKernels::CudaSync("EddyKernels::SortVectors");
}
} EddyCatch
void StackResampler::sort_zcoords_and_intensities(const EDDY::CudaVolume& zcoord,
const EDDY::CudaVolume& data,
bool sync,
thrust::device_vector<float>& szcoord,
thrust::device_vector<float>& sdata) const EddyTry
{
thrust::device_vector<unsigned int> ns_flags(zcoord.Size(0)*zcoord.Size(1),0);
hipLaunchKernelGGL(( EddyKernels::TransferAndCheckSorting), dim3(zcoord.Size(1)),dim3(zcoord.Size(0)), 0, 0, zcoord.GetPtr(),zcoord.Size(0),
zcoord.Size(1),zcoord.Size(2),
thrust::raw_pointer_cast(szcoord.data()),
thrust::raw_pointer_cast(ns_flags.data()));
EddyKernels::CudaSync("EddyKernels::TransferAndCheckSorting");
unsigned int nnsort = thrust::reduce(ns_flags.begin(),ns_flags.end(),0,EDDY::Sum<unsigned int,unsigned int>());
hipLaunchKernelGGL(( EddyKernels::TransferVolumeToVectors), dim3(data.Size(1)),dim3(data.Size(0)), 0, 0, data.GetPtr(),data.Size(0),
data.Size(1),data.Size(2),
thrust::raw_pointer_cast(sdata.data()));
if (nnsort) {
thrust::host_vector<unsigned int> host_ns_flags = ns_flags;
thrust::host_vector<unsigned int> host_nsort_indx(nnsort,0);
for (unsigned int i=0, n=0; i<zcoord.Size(0)*zcoord.Size(1); i++) {
if (host_ns_flags[i]) { host_nsort_indx[n] = i; n++; }
}
thrust::device_vector<unsigned int> device_nsort_indx = host_nsort_indx;
int nb = (nnsort / 32) + 1;
hipLaunchKernelGGL(( EddyKernels::SortVectors), dim3(nb),dim3(32), 0, 0, thrust::raw_pointer_cast(device_nsort_indx.data()),nnsort,
zcoord.Size(2),thrust::raw_pointer_cast(szcoord.data()),
thrust::raw_pointer_cast(sdata.data()));
if (sync) EddyKernels::CudaSync("EddyKernels::SortVectors");
}
} EddyCatch
void StackResampler::linear_interpolate_columns(const thrust::device_vector<float>& zcoord,
const thrust::device_vector<float>& val,
unsigned int xsz,
unsigned int ysz,
unsigned int zsz,
bool sync,
thrust::device_vector<float>& ival) const EddyTry
{
hipLaunchKernelGGL(( EddyKernels::LinearInterpolate), dim3(ysz),dim3(xsz), 0, 0, thrust::raw_pointer_cast(zcoord.data()),
thrust::raw_pointer_cast(val.data()),zsz,
thrust::raw_pointer_cast(ival.data()));
if (sync) EddyKernels::CudaSync("EddyKernels::LinearInterpolate");
} EddyCatch
void StackResampler::transfer_interpolated_columns_to_volume(const thrust::device_vector<float>& zcols,
bool sync,
EDDY::CudaVolume& vol) EddyTry
{
hipLaunchKernelGGL(( EddyKernels::TransferColumnsToVolume), dim3(vol.Size(1)),dim3(vol.Size(0)), 0, 0, thrust::raw_pointer_cast(zcols.data()),
vol.Size(0),vol.Size(1),
vol.Size(2),vol.GetPtr());
if (sync) EddyKernels::CudaSync("EddyKernels::TransferColumnsToVolume");
} EddyCatch
void StackResampler::make_weight_vectors(const thrust::device_vector<float>& zcoord,
unsigned int xsz,
unsigned int zsz,
unsigned int xzp,
bool sync,
thrust::device_vector<float>& weights) const EddyTry
{
hipLaunchKernelGGL(( EddyKernels::MakeWeights), dim3(xsz),dim3(zsz), 0, 0, thrust::raw_pointer_cast(zcoord.data()),xsz,
zsz,xzp,thrust::raw_pointer_cast(weights.data()));
if (sync) EddyKernels::CudaSync("EddyKernels::MakeWeights");
} EddyCatch
void StackResampler::insert_weights(const thrust::device_vector<float>& wvec,
unsigned int j,
bool sync,
EDDY::CudaVolume& wvol) const EddyTry
{
hipLaunchKernelGGL(( EddyKernels::InsertWeights), dim3(wvol.Size(0)),dim3(wvol.Size(2)), 0, 0, thrust::raw_pointer_cast(wvec.data()),j,wvol.Size(0),
wvol.Size(1),wvol.Size(2),wvol.GetPtr());
if (sync) EddyKernels::CudaSync("EddyKernels::InsertWeights");
} EddyCatch
void StackResampler::make_diagw_p_vectors(const EDDY::CudaVolume& pred,
const thrust::device_vector<float>& wgts,
unsigned int xzp,
bool sync,
thrust::device_vector<float>& wp) const EddyTry
{
hipLaunchKernelGGL(( EddyKernels::MakeDiagwpVecs), dim3(pred.Size(0)),dim3(pred.Size(2)), 0, 0, pred.GetPtr(),thrust::raw_pointer_cast(wgts.data()),
pred.Size(0),pred.Size(1),pred.Size(2),xzp,
thrust::raw_pointer_cast(wp.data()));
if (sync) EddyKernels::CudaSync("EddyKernels::MakeDiagwpVecs");
} EddyCatch
void StackResampler::make_diagw_W_matrices(const thrust::device_vector<float>& wgts,
const thrust::device_vector<float>& W,
unsigned int matsz,
unsigned int nmat,
bool sync,
thrust::device_vector<float>& diagwW) const EddyTry
{
hipLaunchKernelGGL(( EddyMatrixKernels::DiagwA), dim3(nmat),dim3(matsz), 0, 0, thrust::raw_pointer_cast(wgts.data()),
thrust::raw_pointer_cast(W.data()),
matsz,matsz,nmat,
thrust::raw_pointer_cast(diagwW.data()));
if (sync) EddyKernels::CudaSync("EddyMatrixKernels::DiagwA");
} EddyCatch
void StackResampler::make_dwWt_dwp_vectors(const thrust::device_vector<float>& dW,
const thrust::device_vector<float>& dp,
unsigned int matsz,
unsigned int nmat,
bool sync,
thrust::device_vector<float>& dWtdp) const EddyTry
{
hipLaunchKernelGGL(( EddyMatrixKernels::Atb), dim3(nmat),dim3(matsz), 0, 0, thrust::raw_pointer_cast(dW.data()),
thrust::raw_pointer_cast(dp.data()),
matsz,matsz,nmat,nmat,
thrust::raw_pointer_cast(dWtdp.data()));
if (sync) EddyKernels::CudaSync("EddyMatrixKernels::Atb");
return;
} EddyCatch
void StackResampler::make_Wir_matrices(const EDDY::CudaVolume& zcoord,
unsigned int xzp,
bool sync,
thrust::device_vector<float>& Wir) const EddyTry
{
int tpb = _threads_per_block_Wir;
hipLaunchKernelGGL(( EddyMatrixKernels::Wir), dim3(zcoord.Size(0)),dim3(tpb), 0, 0, zcoord.GetPtr(),zcoord.Size(0),zcoord.Size(1),
zcoord.Size(2),zcoord.Size(0),xzp,
thrust::raw_pointer_cast(Wir.data()));
if (sync) EddyKernels::CudaSync("EddyMatrixKernels::Wir");
} EddyCatch
void StackResampler::make_Wir_t_y_vectors(const EDDY::CudaVolume& y,
const thrust::device_vector<float>& Wir,
unsigned int xzp,
bool sync,
thrust::device_vector<float>& Wirty) const EddyTry
{
int tpb = _threads_per_block_Wirty;
hipLaunchKernelGGL(( EddyMatrixKernels::Wirty), dim3(y.Size(0)),dim3(tpb), 0, 0, y.GetPtr(),thrust::raw_pointer_cast(Wir.data()),
y.Size(0),y.Size(1),y.Size(2),y.Size(0),xzp,
thrust::raw_pointer_cast(Wirty.data()));
if (sync) EddyKernels::CudaSync("EddyMatrixKernels::Wirty");
} EddyCatch
void StackResampler::make_WtW_StS_matrices(const thrust::device_vector<float>& Wir,
unsigned int mn,
unsigned int nmat,
const thrust::device_vector<float>& StS,
bool sync,
thrust::device_vector<float>& WtW) const EddyTry
{
dim3 block = _threads_per_block_WtW_StS;
hipLaunchKernelGGL(( EddyMatrixKernels::KtK), dim3(nmat),dim3(block), 0, 0, thrust::raw_pointer_cast(Wir.data()),mn,mn,nmat,
thrust::raw_pointer_cast(StS.data()),1.0,true,
thrust::raw_pointer_cast(WtW.data()));
if (sync) EddyKernels::CudaSync("KtK_Kernels::KtK");
return;
} EddyCatch
void StackResampler::solve_for_c_hat(
const thrust::device_vector<float>& WtW,
const thrust::device_vector<float>& Wty,
unsigned int n,
unsigned int nmat,
bool sync,
thrust::device_vector<float>& chat) const EddyTry
{
thrust::device_vector<float> Qt(nmat*n*n);
thrust::device_vector<float> R(nmat*n*n);
size_t sh_mem_sz = 2*n*sizeof(float);
int tpb = _threads_per_block_QR;
hipLaunchKernelGGL(( EddyMatrixKernels::QR), dim3(nmat),dim3(tpb),sh_mem_sz, 0, thrust::raw_pointer_cast(WtW.data()),n,n,nmat,
thrust::raw_pointer_cast(Qt.data()),
thrust::raw_pointer_cast(R.data()));
if (sync) EddyKernels::CudaSync("QR_Kernels::QR");
tpb = _threads_per_block_Solve;
hipLaunchKernelGGL(( EddyMatrixKernels::Solve), dim3(nmat),dim3(tpb), 0, 0, thrust::raw_pointer_cast(Qt.data()),
thrust::raw_pointer_cast(R.data()),
thrust::raw_pointer_cast(Wty.data()),n,n,nmat,
thrust::raw_pointer_cast(chat.data()));
if (sync) EddyKernels::CudaSync("QR_Kernels::Solve");
return;
} EddyCatch
void StackResampler::make_y_hat_vectors(
const thrust::device_vector<float>& W,
const thrust::device_vector<float>& chat,
unsigned int mn,
unsigned int nvec,
bool sync,
thrust::device_vector<float>& yhat) const EddyTry
{
int tpb = _threads_per_block_yhat;
hipLaunchKernelGGL(( EddyMatrixKernels::Ab), dim3(nvec),dim3(tpb), 0, 0, thrust::raw_pointer_cast(W.data()),
thrust::raw_pointer_cast(chat.data()),
mn,mn,1,nvec,thrust::raw_pointer_cast(yhat.data()));
if (sync) EddyKernels::CudaSync("EddyMatrixKernels::Ab");
return;
} EddyCatch
void StackResampler::transfer_y_hat_to_volume(
const thrust::device_vector<float>& yhat,
unsigned int xzp,
bool sync,
EDDY::CudaVolume& ovol) const EddyTry
{
int tpb = _threads_per_block_transfer;
int nblocks = (ovol.Size(0)%tpb) ? ovol.Size(0) / tpb + 1 : ovol.Size(0) / tpb;
hipLaunchKernelGGL(( EddyKernels::transfer_y_hat_to_volume), dim3(nblocks),dim3(tpb), 0, 0, thrust::raw_pointer_cast(yhat.data()),ovol.Size(0),
ovol.Size(1),ovol.Size(2),xzp,ovol.GetPtr());
if (sync) EddyKernels::CudaSync("EddyMatrixKernels::transfer_y_hat_to_volume");
return;
} EddyCatch
void StackResampler::write_matrix(const thrust::device_vector<float>& mats,
unsigned int offs,
unsigned int m,
unsigned int n,
const std::string& fname) const EddyTry
{
thrust::device_vector<float>::const_iterator first = mats.begin() + offs;
thrust::device_vector<float>::const_iterator last = mats.begin() + offs + m*n;
thrust::host_vector<float> mat(first,last);
NEWMAT::Matrix newmat(m,n);
for (unsigned int i=0; i<m; i++) {
for (unsigned int j=0; j<n; j++) {
newmat(i+1,j+1) = mat[rfindx(i,j,m)];
}
}
MISCMATHS::write_ascii_matrix(fname+std::string(".txt"),newmat);
} EddyCatch
void StackResampler::write_matrices(const thrust::device_vector<float>& mats,
unsigned int nmat,
unsigned int m,
unsigned int n,
const std::string& basefname) const EddyTry
{
char fname[256];
for (unsigned int f=0; f<nmat; f++) {
sprintf(fname,"%s_%03d",basefname.c_str(),f);
write_matrix(mats,f*m*n,m,n,std::string(fname));
}
} EddyCatch
void StackResampler::write_debug_info_for_pred_resampling(unsigned int x,
unsigned int y,
const std::string& bfname,
const EDDY::CudaVolume& z,
const EDDY::CudaVolume& g,
const EDDY::CudaVolume& p,
const thrust::device_vector<float>& sz,
const thrust::device_vector<float>& W,
const thrust::device_vector<float>& Wir,
const thrust::device_vector<float>& w,
const thrust::device_vector<float>& wp,
const thrust::device_vector<float>& wW,
const thrust::device_vector<float>& Wirtg,
const thrust::device_vector<float>& wWtwp,
const thrust::device_vector<float>& WirtWir,
const thrust::device_vector<float>& wWtwW,
const thrust::device_vector<float>& sum_vec,
const thrust::device_vector<float>& sum_mat,
const thrust::device_vector<float>& c_hat,
const thrust::device_vector<float>& y_hat) const EddyTry
{
unsigned int xs = z.Size(0);
unsigned int ys = z.Size(1);
unsigned int zs = z.Size(2);
NEWIMAGE::volume<float> tmpvol = z.GetVolume();
NEWMAT::ColumnVector tmpvec(zs);
for (unsigned int k=0; k<zs; k++) tmpvec(k+1) = tmpvol(x,y,k);
std::string tmpfname = bfname + "_z";
MISCMATHS::write_ascii_matrix(tmpfname,tmpvec);
tmpvol = g.GetVolume();
for (unsigned int k=0; k<zs; k++) tmpvec(k+1) = tmpvol(x,y,k);
tmpfname = bfname + "_g";
MISCMATHS::write_ascii_matrix(tmpfname,tmpvec);
tmpvol = p.GetVolume();
for (unsigned int k=0; k<zs; k++) tmpvec(k+1) = tmpvol(x,y,k);
tmpfname = bfname + "_p";
MISCMATHS::write_ascii_matrix(tmpfname,tmpvec);
tmpfname = bfname + "_sz";
write_matrix(sz,(y*xs+x)*zs,zs,1,tmpfname);
tmpfname = bfname + "_W";
write_matrix(W,0,zs,zs,tmpfname);
tmpfname = bfname + "_Wir";
write_matrix(Wir,x*zs*zs,zs,zs,tmpfname);
tmpfname = bfname + "_wgt";
write_matrix(w,x*zs,zs,1,tmpfname);
tmpfname = bfname + "_wp";
write_matrix(wp,x*zs,zs,1,tmpfname);
tmpfname = bfname + "_wW";
write_matrix(wW,x*zs*zs,zs,zs,tmpfname);
tmpfname = bfname + "_Wirtg";
write_matrix(Wirtg,x*zs,zs,1,tmpfname);
tmpfname = bfname + "_wWtwp";
write_matrix(wWtwp,x*zs,zs,1,tmpfname);
tmpfname = bfname + "_WirtWir";
write_matrix(WirtWir,x*zs*zs,zs,zs,tmpfname);
tmpfname = bfname + "_wWtwW";
write_matrix(wWtwW,x*zs*zs,zs,zs,tmpfname);
tmpfname = bfname + "_sum_vec";
write_matrix(sum_vec,x*zs,zs,1,tmpfname);
tmpfname = bfname + "_sum_mat";
write_matrix(sum_mat,x*zs*zs,zs,zs,tmpfname);
tmpfname = bfname + "_c_hat";
write_matrix(c_hat,x*zs,zs,1,tmpfname);
tmpfname = bfname + "_y_hat";
write_matrix(y_hat,x*zs,zs,1,tmpfname);
} EddyCatch
}
| e9e12312c971c8489528c420f3f695a8db8408c1.cu | /*! \file StackResampler.cu
\brief Contains definition of CUDA implementation of a class for spline resampling of irregularly sampled columns in the z-direction
\author Jesper Andersson
\version 1.0b, March, 2016.
*/
//
// StackResampler.cu
//
// Jesper Andersson, FMRIB Image Analysis Group
//
// Copyright (C) 2016 University of Oxford
//
#include <cstdlib>
#include <string>
#include <vector>
#include <cmath>
#include <cuda.h>
#include <thrust/system_error.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
#include <thrust/fill.h>
#pragma push
#pragma diag_suppress = code_is_unreachable
#include "newmat.h"
#include "newimage/newimage.h"
#include "EddyHelperClasses.h"
#include "CudaVolume.h"
#pragma pop
#include "StackResampler.h"
#include "EddyKernels.h"
#include "EddyMatrixKernels.h"
#include "EddyFunctors.h"
namespace EDDY {
const dim3 StackResampler::_threads_per_block_WtW_StS = dim3(16,16);
StackResampler::StackResampler(const EDDY::CudaVolume& stack,
const EDDY::CudaVolume& zcoord,
const EDDY::CudaVolume& pred,
const EDDY::CudaVolume& mask,
double lambda) EddyTry : _resvol(stack,false), _mask(stack,false)
{
unsigned int matsz = stack.Size(2);
unsigned int nmat = stack.Size(0);
thrust::device_vector<float> StS_matrix(sqr(matsz));
thrust::device_vector<float> empty_StS_matrix(sqr(matsz));
thrust::device_vector<float> gpu_W_matrix(sqr(matsz));
thrust::device_vector<float> gpu_sorted_zcoord(zcoord.Size());
thrust::device_vector<float> gpu_Wir_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_weights(nmat*matsz);
thrust::device_vector<float> gpu_diagw_p_vectors(nmat*matsz);
thrust::device_vector<float> gpu_diagw_W_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_Wirty_vectors(nmat*matsz);
thrust::device_vector<float> gpu_dWtdp_vectors(nmat*matsz);
thrust::device_vector<float> gpu_WtW_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_dWtdW_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_sum_vectors(nmat*matsz);
thrust::device_vector<float> gpu_sum_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_c_hat_vectors(nmat*matsz);
thrust::device_vector<float> gpu_y_hat_vectors(nmat*matsz);
get_StS(matsz,lambda,StS_matrix);
get_StS(matsz,0.0,empty_StS_matrix);
get_regular_W(matsz,gpu_W_matrix);
make_mask(mask,zcoord,true,_mask);
sort_zcoords(zcoord,true,gpu_sorted_zcoord);
for (unsigned int j=0; j<stack.Size(1); j++) {
make_Wir_matrices(zcoord,j,true,gpu_Wir_matrices);
make_weight_vectors(gpu_sorted_zcoord,stack.Size(0),stack.Size(2),j,true,gpu_weights);
make_diagw_p_vectors(pred,gpu_weights,j,true,gpu_diagw_p_vectors);
make_diagw_W_matrices(gpu_weights,gpu_W_matrix,matsz,nmat,true,gpu_diagw_W_matrices);
make_Wir_t_y_vectors(stack,gpu_Wir_matrices,j,true,gpu_Wirty_vectors);
make_dwWt_dwp_vectors(gpu_diagw_W_matrices,gpu_diagw_p_vectors,matsz,nmat,true,gpu_dWtdp_vectors);
make_WtW_StS_matrices(gpu_Wir_matrices,matsz,nmat,StS_matrix,true,gpu_WtW_matrices);
make_WtW_StS_matrices(gpu_diagw_W_matrices,matsz,nmat,empty_StS_matrix,true,gpu_dWtdW_matrices);
thrust::transform(gpu_Wirty_vectors.begin(),gpu_Wirty_vectors.end(),gpu_dWtdp_vectors.begin(),gpu_sum_vectors.begin(),thrust::plus<float>());
thrust::transform(gpu_WtW_matrices.begin(),gpu_WtW_matrices.end(),gpu_dWtdW_matrices.begin(),gpu_sum_matrices.begin(),thrust::plus<float>());
solve_for_c_hat(gpu_sum_matrices,gpu_sum_vectors,matsz,nmat,true,gpu_c_hat_vectors);
make_y_hat_vectors(gpu_W_matrix,gpu_c_hat_vectors,matsz,nmat,true,gpu_y_hat_vectors);
transfer_y_hat_to_volume(gpu_y_hat_vectors,j,true,_resvol);
}
} EddyCatch
StackResampler::StackResampler(const EDDY::CudaVolume& stack,
const EDDY::CudaVolume& zcoord,
const EDDY::CudaVolume& mask,
NEWIMAGE::interpolation interp,
double lambda) EddyTry : _resvol(stack,false), _mask(stack,false)
{
make_mask(mask,zcoord,true,_mask);
if (interp == NEWIMAGE::spline) {
unsigned int matsz = stack.Size(2);
unsigned int nmat = stack.Size(0);
thrust::device_vector<float> gpu_StS_matrix(sqr(matsz));
thrust::device_vector<float> gpu_W_matrix(sqr(matsz));
thrust::device_vector<float> gpu_Wir_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_WtW_StS_matrices(nmat*sqr(matsz));
thrust::device_vector<float> gpu_Wirty_vectors(nmat*matsz);
thrust::device_vector<float> gpu_c_hat_vectors(nmat*matsz);
thrust::device_vector<float> gpu_y_hat_vectors(nmat*matsz);
get_StS(matsz,lambda,gpu_StS_matrix);
get_regular_W(matsz,gpu_W_matrix);
for (unsigned int j=0; j<stack.Size(1); j++) {
make_Wir_matrices(zcoord,j,true,gpu_Wir_matrices);
make_WtW_StS_matrices(gpu_Wir_matrices,matsz,nmat,gpu_StS_matrix,true,gpu_WtW_StS_matrices);
make_Wir_t_y_vectors(stack,gpu_Wir_matrices,j,true,gpu_Wirty_vectors);
solve_for_c_hat(gpu_WtW_StS_matrices,gpu_Wirty_vectors,matsz,nmat,true,gpu_c_hat_vectors);
make_y_hat_vectors(gpu_W_matrix,gpu_c_hat_vectors,matsz,nmat,true,gpu_y_hat_vectors);
transfer_y_hat_to_volume(gpu_y_hat_vectors,j,true,_resvol);
}
}
else if (interp == NEWIMAGE::trilinear) {
thrust::device_vector<float> gpu_sorted_zcoord(zcoord.Size());
thrust::device_vector<float> gpu_sorted_intensities(zcoord.Size());
thrust::device_vector<float> gpu_interpolated_columns(zcoord.Size(),0.0);
sort_zcoords_and_intensities(zcoord,stack,true,gpu_sorted_zcoord,gpu_sorted_intensities);
linear_interpolate_columns(gpu_sorted_zcoord,gpu_sorted_intensities,zcoord.Size(0),zcoord.Size(1),zcoord.Size(2),true,gpu_interpolated_columns);
transfer_interpolated_columns_to_volume(gpu_interpolated_columns,true,_resvol);
}
else throw EddyException("StackResampler::StackResampler: Invalid interpolation method");
} EddyCatch
void StackResampler::get_StS(unsigned int sz, double lambda, thrust::device_vector<float>& StS) const EddyTry
{
float six = lambda * 6.0; float minusfour = - lambda * 4.0; float one = lambda;
thrust::host_vector<float> hStS(StS.size(),0.0);
hStS[rfindx(0,0,sz)] = six; hStS[rfindx(0,1,sz)] = minusfour; hStS[rfindx(0,2,sz)] = one; hStS[rfindx(0,(sz-2),sz)] = one; hStS[rfindx(0,(sz-1),sz)] = minusfour;
hStS[rfindx(1,0,sz)] = minusfour; hStS[rfindx(1,1,sz)] = six; hStS[rfindx(1,2,sz)] = minusfour; hStS[rfindx(1,3,sz)] = one; hStS[rfindx(1,(sz-1),sz)] = one;
for ( unsigned int i=2; i<(sz-2); i++) {
hStS[rfindx(i,i-2,sz)] = one;
hStS[rfindx(i,i-1,sz)] = minusfour;
hStS[rfindx(i,i,sz)] = six;
hStS[rfindx(i,i+1,sz)] = minusfour;
hStS[rfindx(i,i+2,sz)] = one;
}
hStS[rfindx((sz-2),0,sz)] = one; hStS[rfindx((sz-2),(sz-4),sz)] = one; hStS[rfindx((sz-2),(sz-3),sz)] = minusfour; hStS[rfindx((sz-2),(sz-2),sz)] = six; hStS[rfindx((sz-2),(sz-1),sz)] = minusfour;
hStS[rfindx((sz-1),0,sz)] = minusfour; hStS[rfindx((sz-1),1,sz)] = one; hStS[rfindx((sz-1),(sz-3),sz)] = one; hStS[rfindx((sz-1),(sz-2),sz)] = minusfour; hStS[rfindx((sz-1),(sz-1),sz)] = six;
StS = hStS;
return;
} EddyCatch
void StackResampler::get_regular_W(unsigned int sz, thrust::device_vector<float>& W) const EddyTry
{
thrust::host_vector<float> hW(W.size(),0.0);
hW[rfindx(0,0,sz)] = 5.0/6.0; hW[rfindx(0,1,sz)] = 1.0/6.0;
for ( unsigned int i=1; i<(sz-1); i++) {
hW[rfindx(i,i-1,sz)] = 1.0/6.0;
hW[rfindx(i,i,sz)] = 4.0/6.0;
hW[rfindx(i,i+1,sz)] = 1.0/6.0;
}
hW[rfindx((sz-1),(sz-2),sz)] = 1.0/6.0; hW[rfindx((sz-1),(sz-1),sz)] = 5.0/6.0;
W = hW;
return;
} EddyCatch
void StackResampler::make_mask(const EDDY::CudaVolume& inmask,
const EDDY::CudaVolume& zcoord,
bool sync,
EDDY::CudaVolume& omask) EddyTry
{
omask = 0.0;
int nblocks = inmask.Size(1);
int tpb = inmask.Size(0);
EddyKernels::make_mask_from_stack<<<nblocks,tpb>>>(inmask.GetPtr(),zcoord.GetPtr(),inmask.Size(0),
inmask.Size(1),inmask.Size(2),omask.GetPtr());
if (sync) EddyKernels::CudaSync("EddyKernels::make_mask_from_stack");
} EddyCatch
void StackResampler::sort_zcoords(const EDDY::CudaVolume& zcoord,
bool sync,
thrust::device_vector<float>& szcoord) const EddyTry
{
thrust::device_vector<unsigned int> ns_flags(zcoord.Size(0)*zcoord.Size(1),0);
EddyKernels::TransferAndCheckSorting<<<zcoord.Size(1),zcoord.Size(0)>>>(zcoord.GetPtr(),zcoord.Size(0),
zcoord.Size(1),zcoord.Size(2),
thrust::raw_pointer_cast(szcoord.data()),
thrust::raw_pointer_cast(ns_flags.data()));
EddyKernels::CudaSync("EddyKernels::TransferAndCheckSorting");
unsigned int nnsort = thrust::reduce(ns_flags.begin(),ns_flags.end(),0,EDDY::Sum<unsigned int,unsigned int>());
if (nnsort) {
thrust::host_vector<unsigned int> host_ns_flags = ns_flags;
thrust::host_vector<unsigned int> host_nsort_indx(nnsort,0);
for (unsigned int i=0, n=0; i<zcoord.Size(0)*zcoord.Size(1); i++) {
if (host_ns_flags[i]) { host_nsort_indx[n] = i; n++; }
}
thrust::device_vector<unsigned int> device_nsort_indx = host_nsort_indx;
int nb = (nnsort / 32) + 1;
EddyKernels::SortVectors<<<nb,32>>>(thrust::raw_pointer_cast(device_nsort_indx.data()),nnsort,
zcoord.Size(2),thrust::raw_pointer_cast(szcoord.data()),NULL);
if (sync) EddyKernels::CudaSync("EddyKernels::SortVectors");
}
} EddyCatch
void StackResampler::sort_zcoords_and_intensities(const EDDY::CudaVolume& zcoord,
const EDDY::CudaVolume& data,
bool sync,
thrust::device_vector<float>& szcoord,
thrust::device_vector<float>& sdata) const EddyTry
{
thrust::device_vector<unsigned int> ns_flags(zcoord.Size(0)*zcoord.Size(1),0);
EddyKernels::TransferAndCheckSorting<<<zcoord.Size(1),zcoord.Size(0)>>>(zcoord.GetPtr(),zcoord.Size(0),
zcoord.Size(1),zcoord.Size(2),
thrust::raw_pointer_cast(szcoord.data()),
thrust::raw_pointer_cast(ns_flags.data()));
EddyKernels::CudaSync("EddyKernels::TransferAndCheckSorting");
unsigned int nnsort = thrust::reduce(ns_flags.begin(),ns_flags.end(),0,EDDY::Sum<unsigned int,unsigned int>());
EddyKernels::TransferVolumeToVectors<<<data.Size(1),data.Size(0)>>>(data.GetPtr(),data.Size(0),
data.Size(1),data.Size(2),
thrust::raw_pointer_cast(sdata.data()));
if (nnsort) {
thrust::host_vector<unsigned int> host_ns_flags = ns_flags;
thrust::host_vector<unsigned int> host_nsort_indx(nnsort,0);
for (unsigned int i=0, n=0; i<zcoord.Size(0)*zcoord.Size(1); i++) {
if (host_ns_flags[i]) { host_nsort_indx[n] = i; n++; }
}
thrust::device_vector<unsigned int> device_nsort_indx = host_nsort_indx;
int nb = (nnsort / 32) + 1;
EddyKernels::SortVectors<<<nb,32>>>(thrust::raw_pointer_cast(device_nsort_indx.data()),nnsort,
zcoord.Size(2),thrust::raw_pointer_cast(szcoord.data()),
thrust::raw_pointer_cast(sdata.data()));
if (sync) EddyKernels::CudaSync("EddyKernels::SortVectors");
}
} EddyCatch
void StackResampler::linear_interpolate_columns(const thrust::device_vector<float>& zcoord,
const thrust::device_vector<float>& val,
unsigned int xsz,
unsigned int ysz,
unsigned int zsz,
bool sync,
thrust::device_vector<float>& ival) const EddyTry
{
EddyKernels::LinearInterpolate<<<ysz,xsz>>>(thrust::raw_pointer_cast(zcoord.data()),
thrust::raw_pointer_cast(val.data()),zsz,
thrust::raw_pointer_cast(ival.data()));
if (sync) EddyKernels::CudaSync("EddyKernels::LinearInterpolate");
} EddyCatch
void StackResampler::transfer_interpolated_columns_to_volume(const thrust::device_vector<float>& zcols,
bool sync,
EDDY::CudaVolume& vol) EddyTry
{
EddyKernels::TransferColumnsToVolume<<<vol.Size(1),vol.Size(0)>>>(thrust::raw_pointer_cast(zcols.data()),
vol.Size(0),vol.Size(1),
vol.Size(2),vol.GetPtr());
if (sync) EddyKernels::CudaSync("EddyKernels::TransferColumnsToVolume");
} EddyCatch
void StackResampler::make_weight_vectors(const thrust::device_vector<float>& zcoord,
unsigned int xsz,
unsigned int zsz,
unsigned int xzp,
bool sync,
thrust::device_vector<float>& weights) const EddyTry
{
EddyKernels::MakeWeights<<<xsz,zsz>>>(thrust::raw_pointer_cast(zcoord.data()),xsz,
zsz,xzp,thrust::raw_pointer_cast(weights.data()));
if (sync) EddyKernels::CudaSync("EddyKernels::MakeWeights");
} EddyCatch
void StackResampler::insert_weights(const thrust::device_vector<float>& wvec,
unsigned int j,
bool sync,
EDDY::CudaVolume& wvol) const EddyTry
{
EddyKernels::InsertWeights<<<wvol.Size(0),wvol.Size(2)>>>(thrust::raw_pointer_cast(wvec.data()),j,wvol.Size(0),
wvol.Size(1),wvol.Size(2),wvol.GetPtr());
if (sync) EddyKernels::CudaSync("EddyKernels::InsertWeights");
} EddyCatch
void StackResampler::make_diagw_p_vectors(const EDDY::CudaVolume& pred,
const thrust::device_vector<float>& wgts,
unsigned int xzp,
bool sync,
thrust::device_vector<float>& wp) const EddyTry
{
EddyKernels::MakeDiagwpVecs<<<pred.Size(0),pred.Size(2)>>>(pred.GetPtr(),thrust::raw_pointer_cast(wgts.data()),
pred.Size(0),pred.Size(1),pred.Size(2),xzp,
thrust::raw_pointer_cast(wp.data()));
if (sync) EddyKernels::CudaSync("EddyKernels::MakeDiagwpVecs");
} EddyCatch
void StackResampler::make_diagw_W_matrices(const thrust::device_vector<float>& wgts,
const thrust::device_vector<float>& W,
unsigned int matsz,
unsigned int nmat,
bool sync,
thrust::device_vector<float>& diagwW) const EddyTry
{
EddyMatrixKernels::DiagwA<<<nmat,matsz>>>(thrust::raw_pointer_cast(wgts.data()),
thrust::raw_pointer_cast(W.data()),
matsz,matsz,nmat,
thrust::raw_pointer_cast(diagwW.data()));
if (sync) EddyKernels::CudaSync("EddyMatrixKernels::DiagwA");
} EddyCatch
void StackResampler::make_dwWt_dwp_vectors(const thrust::device_vector<float>& dW,
const thrust::device_vector<float>& dp,
unsigned int matsz,
unsigned int nmat,
bool sync,
thrust::device_vector<float>& dWtdp) const EddyTry
{
EddyMatrixKernels::Atb<<<nmat,matsz>>>(thrust::raw_pointer_cast(dW.data()),
thrust::raw_pointer_cast(dp.data()),
matsz,matsz,nmat,nmat,
thrust::raw_pointer_cast(dWtdp.data()));
if (sync) EddyKernels::CudaSync("EddyMatrixKernels::Atb");
return;
} EddyCatch
void StackResampler::make_Wir_matrices(const EDDY::CudaVolume& zcoord,
unsigned int xzp,
bool sync,
thrust::device_vector<float>& Wir) const EddyTry
{
int tpb = _threads_per_block_Wir;
EddyMatrixKernels::Wir<<<zcoord.Size(0),tpb>>>(zcoord.GetPtr(),zcoord.Size(0),zcoord.Size(1),
zcoord.Size(2),zcoord.Size(0),xzp,
thrust::raw_pointer_cast(Wir.data()));
if (sync) EddyKernels::CudaSync("EddyMatrixKernels::Wir");
} EddyCatch
void StackResampler::make_Wir_t_y_vectors(const EDDY::CudaVolume& y,
const thrust::device_vector<float>& Wir,
unsigned int xzp,
bool sync,
thrust::device_vector<float>& Wirty) const EddyTry
{
int tpb = _threads_per_block_Wirty;
EddyMatrixKernels::Wirty<<<y.Size(0),tpb>>>(y.GetPtr(),thrust::raw_pointer_cast(Wir.data()),
y.Size(0),y.Size(1),y.Size(2),y.Size(0),xzp,
thrust::raw_pointer_cast(Wirty.data()));
if (sync) EddyKernels::CudaSync("EddyMatrixKernels::Wirty");
} EddyCatch
void StackResampler::make_WtW_StS_matrices(const thrust::device_vector<float>& Wir,
unsigned int mn,
unsigned int nmat,
const thrust::device_vector<float>& StS,
bool sync,
thrust::device_vector<float>& WtW) const EddyTry
{
dim3 block = _threads_per_block_WtW_StS;
EddyMatrixKernels::KtK<<<nmat,block>>>(thrust::raw_pointer_cast(Wir.data()),mn,mn,nmat,
thrust::raw_pointer_cast(StS.data()),1.0,true,
thrust::raw_pointer_cast(WtW.data()));
if (sync) EddyKernels::CudaSync("KtK_Kernels::KtK");
return;
} EddyCatch
void StackResampler::solve_for_c_hat(
const thrust::device_vector<float>& WtW,
const thrust::device_vector<float>& Wty,
unsigned int n,
unsigned int nmat,
bool sync,
thrust::device_vector<float>& chat) const EddyTry
{
thrust::device_vector<float> Qt(nmat*n*n);
thrust::device_vector<float> R(nmat*n*n);
size_t sh_mem_sz = 2*n*sizeof(float);
int tpb = _threads_per_block_QR;
EddyMatrixKernels::QR<<<nmat,tpb,sh_mem_sz>>>(thrust::raw_pointer_cast(WtW.data()),n,n,nmat,
thrust::raw_pointer_cast(Qt.data()),
thrust::raw_pointer_cast(R.data()));
if (sync) EddyKernels::CudaSync("QR_Kernels::QR");
tpb = _threads_per_block_Solve;
EddyMatrixKernels::Solve<<<nmat,tpb>>>(thrust::raw_pointer_cast(Qt.data()),
thrust::raw_pointer_cast(R.data()),
thrust::raw_pointer_cast(Wty.data()),n,n,nmat,
thrust::raw_pointer_cast(chat.data()));
if (sync) EddyKernels::CudaSync("QR_Kernels::Solve");
return;
} EddyCatch
void StackResampler::make_y_hat_vectors(
const thrust::device_vector<float>& W,
const thrust::device_vector<float>& chat,
unsigned int mn,
unsigned int nvec,
bool sync,
thrust::device_vector<float>& yhat) const EddyTry
{
int tpb = _threads_per_block_yhat;
EddyMatrixKernels::Ab<<<nvec,tpb>>>(thrust::raw_pointer_cast(W.data()),
thrust::raw_pointer_cast(chat.data()),
mn,mn,1,nvec,thrust::raw_pointer_cast(yhat.data()));
if (sync) EddyKernels::CudaSync("EddyMatrixKernels::Ab");
return;
} EddyCatch
void StackResampler::transfer_y_hat_to_volume(
const thrust::device_vector<float>& yhat,
unsigned int xzp,
bool sync,
EDDY::CudaVolume& ovol) const EddyTry
{
int tpb = _threads_per_block_transfer;
int nblocks = (ovol.Size(0)%tpb) ? ovol.Size(0) / tpb + 1 : ovol.Size(0) / tpb;
EddyKernels::transfer_y_hat_to_volume<<<nblocks,tpb>>>(thrust::raw_pointer_cast(yhat.data()),ovol.Size(0),
ovol.Size(1),ovol.Size(2),xzp,ovol.GetPtr());
if (sync) EddyKernels::CudaSync("EddyMatrixKernels::transfer_y_hat_to_volume");
return;
} EddyCatch
void StackResampler::write_matrix(const thrust::device_vector<float>& mats,
unsigned int offs,
unsigned int m,
unsigned int n,
const std::string& fname) const EddyTry
{
thrust::device_vector<float>::const_iterator first = mats.begin() + offs;
thrust::device_vector<float>::const_iterator last = mats.begin() + offs + m*n;
thrust::host_vector<float> mat(first,last);
NEWMAT::Matrix newmat(m,n);
for (unsigned int i=0; i<m; i++) {
for (unsigned int j=0; j<n; j++) {
newmat(i+1,j+1) = mat[rfindx(i,j,m)];
}
}
MISCMATHS::write_ascii_matrix(fname+std::string(".txt"),newmat);
} EddyCatch
void StackResampler::write_matrices(const thrust::device_vector<float>& mats,
unsigned int nmat,
unsigned int m,
unsigned int n,
const std::string& basefname) const EddyTry
{
char fname[256];
for (unsigned int f=0; f<nmat; f++) {
sprintf(fname,"%s_%03d",basefname.c_str(),f);
write_matrix(mats,f*m*n,m,n,std::string(fname));
}
} EddyCatch
void StackResampler::write_debug_info_for_pred_resampling(unsigned int x,
unsigned int y,
const std::string& bfname,
const EDDY::CudaVolume& z,
const EDDY::CudaVolume& g,
const EDDY::CudaVolume& p,
const thrust::device_vector<float>& sz,
const thrust::device_vector<float>& W,
const thrust::device_vector<float>& Wir,
const thrust::device_vector<float>& w,
const thrust::device_vector<float>& wp,
const thrust::device_vector<float>& wW,
const thrust::device_vector<float>& Wirtg,
const thrust::device_vector<float>& wWtwp,
const thrust::device_vector<float>& WirtWir,
const thrust::device_vector<float>& wWtwW,
const thrust::device_vector<float>& sum_vec,
const thrust::device_vector<float>& sum_mat,
const thrust::device_vector<float>& c_hat,
const thrust::device_vector<float>& y_hat) const EddyTry
{
unsigned int xs = z.Size(0);
unsigned int ys = z.Size(1);
unsigned int zs = z.Size(2);
NEWIMAGE::volume<float> tmpvol = z.GetVolume();
NEWMAT::ColumnVector tmpvec(zs);
for (unsigned int k=0; k<zs; k++) tmpvec(k+1) = tmpvol(x,y,k);
std::string tmpfname = bfname + "_z";
MISCMATHS::write_ascii_matrix(tmpfname,tmpvec);
tmpvol = g.GetVolume();
for (unsigned int k=0; k<zs; k++) tmpvec(k+1) = tmpvol(x,y,k);
tmpfname = bfname + "_g";
MISCMATHS::write_ascii_matrix(tmpfname,tmpvec);
tmpvol = p.GetVolume();
for (unsigned int k=0; k<zs; k++) tmpvec(k+1) = tmpvol(x,y,k);
tmpfname = bfname + "_p";
MISCMATHS::write_ascii_matrix(tmpfname,tmpvec);
tmpfname = bfname + "_sz";
write_matrix(sz,(y*xs+x)*zs,zs,1,tmpfname);
tmpfname = bfname + "_W";
write_matrix(W,0,zs,zs,tmpfname);
tmpfname = bfname + "_Wir";
write_matrix(Wir,x*zs*zs,zs,zs,tmpfname);
tmpfname = bfname + "_wgt";
write_matrix(w,x*zs,zs,1,tmpfname);
tmpfname = bfname + "_wp";
write_matrix(wp,x*zs,zs,1,tmpfname);
tmpfname = bfname + "_wW";
write_matrix(wW,x*zs*zs,zs,zs,tmpfname);
tmpfname = bfname + "_Wirtg";
write_matrix(Wirtg,x*zs,zs,1,tmpfname);
tmpfname = bfname + "_wWtwp";
write_matrix(wWtwp,x*zs,zs,1,tmpfname);
tmpfname = bfname + "_WirtWir";
write_matrix(WirtWir,x*zs*zs,zs,zs,tmpfname);
tmpfname = bfname + "_wWtwW";
write_matrix(wWtwW,x*zs*zs,zs,zs,tmpfname);
tmpfname = bfname + "_sum_vec";
write_matrix(sum_vec,x*zs,zs,1,tmpfname);
tmpfname = bfname + "_sum_mat";
write_matrix(sum_mat,x*zs*zs,zs,zs,tmpfname);
tmpfname = bfname + "_c_hat";
write_matrix(c_hat,x*zs,zs,1,tmpfname);
tmpfname = bfname + "_y_hat";
write_matrix(y_hat,x*zs,zs,1,tmpfname);
} EddyCatch
}
|
17503bd2adc7224d904e4eaffe42b4611bbb3910.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <primitiv/config.h>
#include <primitiv/devices/cuda/device.h>
#include <primitiv/devices/cuda/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
CUDADEV_KERNEL_FW_X_CONST(subtract_const_r, px[i] - k);
CUDADEV_KERNEL_BW_X_CONST(subtract_const_r, pgy[i]);
CUDADEV_KERNEL_FW_X_SCALAR_R(subtract_scalar_r, ::__fsub_rn);
CUDADEV_KERNEL_FW_X_CONST(subtract_const_l, k - px[i]);
CUDADEV_KERNEL_BW_X_CONST(subtract_const_l, -pgy[i]);
CUDADEV_KERNEL_FW_X_SCALAR_L(subtract_scalar_l, ::__fsub_rn);
CUDADEV_KERNEL_FW_AB(subtract, ::__fsub_rn);
__global__ void subtract_bw_dev(
const float *, const float *, const float *, const float *pgy,
std::uint32_t size, std::uint32_t mba, std::uint32_t mbb,
float *pga, float *pgb) {
const std::uint32_t i = IDX;
const std::uint32_t shift = blockIdx.y * size;
if (i < size) {
const float gy = pgy[i + shift];
::atomicAdd(pga + i + mba * shift, gy);
::atomicAdd(pgb + i + mbb * shift, -gy);
}
}
} // namespace
namespace primitiv {
namespace devices {
CUDADEV_FW_X_CONST(subtract_const_r);
CUDADEV_BW_X_CONST(subtract_const_r);
CUDADEV_FW_X_CONST(subtract_const_l);
CUDADEV_BW_X_CONST(subtract_const_l);
CUDADEV_FW_X_SCALAR(subtract_scalar_r);
CUDADEV_FW_X_SCALAR(subtract_scalar_l);
CUDADEV_FW_AB(subtract);
CUDADEV_BW_AB(subtract);
} // namespace devices
} // namespace primitiv
| 17503bd2adc7224d904e4eaffe42b4611bbb3910.cu | #include <primitiv/config.h>
#include <primitiv/devices/cuda/device.h>
#include <primitiv/devices/cuda/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
CUDADEV_KERNEL_FW_X_CONST(subtract_const_r, px[i] - k);
CUDADEV_KERNEL_BW_X_CONST(subtract_const_r, pgy[i]);
CUDADEV_KERNEL_FW_X_SCALAR_R(subtract_scalar_r, ::__fsub_rn);
CUDADEV_KERNEL_FW_X_CONST(subtract_const_l, k - px[i]);
CUDADEV_KERNEL_BW_X_CONST(subtract_const_l, -pgy[i]);
CUDADEV_KERNEL_FW_X_SCALAR_L(subtract_scalar_l, ::__fsub_rn);
CUDADEV_KERNEL_FW_AB(subtract, ::__fsub_rn);
__global__ void subtract_bw_dev(
const float *, const float *, const float *, const float *pgy,
std::uint32_t size, std::uint32_t mba, std::uint32_t mbb,
float *pga, float *pgb) {
const std::uint32_t i = IDX;
const std::uint32_t shift = blockIdx.y * size;
if (i < size) {
const float gy = pgy[i + shift];
::atomicAdd(pga + i + mba * shift, gy);
::atomicAdd(pgb + i + mbb * shift, -gy);
}
}
} // namespace
namespace primitiv {
namespace devices {
CUDADEV_FW_X_CONST(subtract_const_r);
CUDADEV_BW_X_CONST(subtract_const_r);
CUDADEV_FW_X_CONST(subtract_const_l);
CUDADEV_BW_X_CONST(subtract_const_l);
CUDADEV_FW_X_SCALAR(subtract_scalar_r);
CUDADEV_FW_X_SCALAR(subtract_scalar_l);
CUDADEV_FW_AB(subtract);
CUDADEV_BW_AB(subtract);
} // namespace devices
} // namespace primitiv
|
f2665fb16d67feac26cf04eff78f0b8d117e0efe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "common/inc/helper_math.h"
#include "cudahdr.h"
#include <stdio.h>
#include <vector>
const int blockperthread = 512;
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x + blockIdx.x * blockperthread;
c[i] = a[i] + b[i];
}
__host__ __device__
float4 mul(const float4 *m, const float4 &v)
{
float4 r;
r.x = dot(v, m[0]);
r.y = dot(v, m[1]);
r.z = dot(v, m[2]);
r.w = 1.0f;
return r;
}
__host__ __device__
float4 mul(const float4 &v, const float4 *m)
{
float4 r;
r.x = dot(v, m[0]);
r.y = dot(v, m[1]);
r.z = dot(v, m[2]);
r.w = 1.0f;
return r;
}
__host__ __device__ float4 hable(float4 x)
{
const float A = 0.15, B = 0.50, C = 0.10, D = 0.20, E = 0.02, F = 0.30;
return ((x * (A*x + (C*B)) + (D*E)) / (x * (A*x + B) + (D*F))) - E / F;
}
__host__ __device__ float4 HDRToneMapping(float4 rgb, float LuminanceScale)
{
float4 HABLE_DIV = hable(make_float4(11.2,11.2,11.2,11.2));
float4 rgba = hable(rgb* LuminanceScale) / HABLE_DIV;
return rgba;
}
__host__ __device__ float4 transformPrimaries(float4 rgb, int primaries,int disprimaries, float4*TransPrimaries)
{
if (primaries != disprimaries)
{
return fmaxf(mul(rgb, TransPrimaries),make_float4(0,0,0 ,0));
}
else
{
return rgb;
}
}
__host__ __device__ float4 toneMapping(float4 rgb, int transfer,int distransfer, float LuminanceScale)
{
if (distransfer == transfer)
{
return rgb;
}
if (distransfer == 1 || distransfer == 4)
{
if (transfer == 16 || transfer == 18)
{
return HDRToneMapping(rgb, LuminanceScale);
}
else
{
return rgb * LuminanceScale;
}
}
else
{
return rgb * LuminanceScale;
}
}
//
__host__ __device__ float inverse_HLG(float x)
{
const float B67_a = 0.17883277;
const float B67_b = 0.28466892;
const float B67_c = 0.55991073;
const float B67_inv_r2 = 4.0;
if (x <= 0.5)
x = x * x * B67_inv_r2;
else
x = exp((x - B67_c) / B67_a) + B67_b;
return x;
}
//
__host__ __device__ float LineToHLG(float Lc)
{
const double a = 0.17883277;
const double b = 0.28466892;
const double c = 0.55991073;
return (0.0 > Lc) ? 0.0 :
(Lc <= 1.0 / 12.0 ? sqrt(3.0 * Lc) : a * log(12.0 * Lc - b) + c);
}
__host__ __device__ float4 ST2084TOLinear(float4 rgb)
{
const float ST2084_m1 = 2610.0 / (4096.0 * 4);
const float ST2084_m2 = (2523.0 / 4096.0) * 128.0;
const float ST2084_c1 = 3424.0 / 4096.0;
const float ST2084_c2 = (2413.0 / 4096.0) * 32.0;
const float ST2084_c3 = (2392.0 / 4096.0) * 32.0;
rgb = fpowf(fmaxf(rgb, make_float4(0,0,0, 0)), 1.0 / ST2084_m2);
rgb = fmaxf(rgb - ST2084_c1, make_float4(0,0,0,0)) / (ST2084_c2 - ST2084_c3 * rgb);
rgb = fpowf(rgb, 1.0 / ST2084_m1);
return rgb * 10000;
}
//
__host__ __device__ float4 HLGTOSenceLinear(float4 rgb)
{
rgb.x= inverse_HLG(rgb.x);
rgb.y = inverse_HLG(rgb.y);
rgb.z = inverse_HLG(rgb.z);
return rgb;
}
// display
__host__ __device__ float4 HLGTOLinear(float4 rgb)
{
const float alpha_gain = 2000;
rgb.x = inverse_HLG(rgb.x);
rgb.y = inverse_HLG(rgb.y);
rgb.z = inverse_HLG(rgb.z);
// display
float3 ootf_2020 = make_float3(0.2627, 0.6780, 0.0593);
float ootf_ys = alpha_gain * dot(ootf_2020, make_float3(rgb.x, rgb.y, rgb.z));
return rgb * powf(ootf_ys, 1.200 - 1.0);
}
__host__ __device__ float4 BT709TOLinear(float4 rgb)
{
return fpowf(rgb, 1.0 / 0.45);
}
__host__ __device__ float4 BT470M_SRGB_TOLinear(float4 rgb)
{
return fpowf(rgb, 2.2);
}
__host__ __device__ float4 BT470BGTOLinear(float4 rgb)
{
return fpowf(rgb, 2.8);
}
__host__ __device__ float4 LineTOSRGB(float4 rgb)
{
return fpowf(rgb, 1.0 / 2.2);
}
__host__ __device__ float4 LineTOST2084(float4 rgb)
{
const float ST2084_m1 = 2610.0 / (4096.0 * 4);
const float ST2084_m2 = (2523.0 / 4096.0) * 128.0;
const float ST2084_c1 = 3424.0 / 4096.0;
const float ST2084_c2 = (2413.0 / 4096.0) * 32.0;
const float ST2084_c3 = (2392.0 / 4096.0) * 32.0;
rgb = fpowf(rgb / 10000, ST2084_m1);
rgb = (ST2084_c1 + ST2084_c2 * rgb) / (1 + ST2084_c3 * rgb);
rgb = fpowf(rgb, ST2084_m2);
return rgb;
}
__host__ __device__ float4 PQToHLG(float4 rgb, float LuminanceScale)
{
float r = rgb.x / LuminanceScale / 10000.0 * 10;
float g = rgb.y / LuminanceScale / 10000.0 * 10;
float b = rgb.z / LuminanceScale / 10000.0 * 10;
r = r > 1.0 ? 1.0 : r;
g = g > 1.0 ? 1.0 : g;
b = b > 1.0 ? 1.0 : b;
float a = rgb.w;
// display
// display
if (0)
{
float3 ootf_2020 = make_float3(0.2627, 0.6780, 0.0593);
float ootf_ys = dot(ootf_2020, make_float3(rgb.x, rgb.y,rgb.z));
ootf_ys = powf(ootf_ys, (1.0 - 1.2) / 1.200);
float hlgr = r * ootf_ys;
float hlgg = g * ootf_ys;
float hlgb = b * ootf_ys;
float hlga = a;
hlgr = LineToHLG(hlgr);
hlgg = LineToHLG(hlgg);
hlgb = LineToHLG(hlgb);
return make_float4(hlgr, hlgg, hlgb, hlga);
}
else
{
r = LineToHLG(r);
g = LineToHLG(g);
b = LineToHLG(b);
return make_float4(r, g, b, a);
}
}
__host__ __device__ float4 SDRToHLG(float4 rgb, float LuminanceScale)
{
float a = rgb.w;
rgb.x = 0.265 *rgb.x * 2/ LuminanceScale;
rgb.y = 0.265 *rgb.y * 2 / LuminanceScale;
rgb.z = 0.265 *rgb.z * 2 / LuminanceScale;
float ootf_ys = 1;
float r = rgb.x > 1.0 ? 1.0 : rgb.x;
float g = rgb.y > 1.0 ? 1.0 : rgb.y;
float b = rgb.z > 1.0 ? 1.0 : rgb.z;
float hlgr = r * ootf_ys;
float hlgg = g * ootf_ys;
float hlgb = b * ootf_ys;
hlgr = LineToHLG(hlgr);
hlgg = LineToHLG(hlgg);
hlgb = LineToHLG(hlgb);
float hlga = a;
return make_float4(hlgr, hlgg, hlgb, hlga);
}
__host__ __device__ float4 LinerToSTDB67(float4 hlg, int transfer, int distransfer, float LuminanceScale)
{
if (transfer == distransfer)
{
return hlg;
}
else
{
if (transfer == 16)
{
return PQToHLG(hlg, LuminanceScale);
}
else
{
return SDRToHLG(hlg, LuminanceScale);
}
}
}
__host__ __device__ float4 linearToDisplay(float4 rgb, int transfer, int distransfer, float LuminanceScale)
{
if (distransfer == transfer)
{
return rgb;
}
else if (distransfer == 16)
{
return LineTOST2084(rgb);
}
else if (distransfer == 18)
{
return LinerToSTDB67(rgb, transfer, distransfer, LuminanceScale);
}
else if (distransfer == 1)
{
return fpowf(rgb,1.0/2.2);
}
else if (distransfer == 4)
{
return fpowf(rgb, 1.0 / 2.2);
}
else
{
return rgb;
}
}
__host__ __device__ float4 sourcetolinekernel(float4 rgb, int transfer, int distransfer)
{
if (transfer == distransfer)
{
return rgb;
}
else if (transfer == 8) //line
{
return rgb;
}
else if (transfer == 16) // pq
{
return ST2084TOLinear(rgb);
}
else if (transfer == 18) // hlg
{
return HLGTOLinear(rgb);
}
else if (transfer == 1) // bt709
{
return BT709TOLinear(rgb);
}
else if (transfer == 4)
{
return BT470M_SRGB_TOLinear(rgb);
}
else if (transfer == 5)
{
return BT470BGTOLinear(rgb);
}
else
{
return rgb;
}
}
__host__ __device__ float4 Render2RGBA(float4 rgb, int transfer, int distransfer, int primary, int disprimary, float LuminanceScale, float4 *TransPrimaries)
{
float a = rgb.w;
rgb.w = 0;
rgb = sourcetolinekernel(rgb, transfer, distransfer);
rgb = transformPrimaries(rgb, primary, disprimary, TransPrimaries);
rgb = toneMapping(rgb, transfer, distransfer, LuminanceScale);
rgb = linearToDisplay(rgb, transfer, distransfer, LuminanceScale);
rgb.w = a;
return rgb;
}
__host__ __device__ float4 RGBA2yuv(float4 rgb, float4 *rgb2yuv)
{
float4 yuva = mul(rgb, rgb2yuv);
float y = clamp(yuva.x, 0.0, 1.0);
float u = clamp(yuva.y, 0.0, 1.0);
float v = clamp(yuva.z, 0.0, 1.0);
return make_float4(y, u, v, 1.0);
}
__host__ __device__ float4 yuv2rgb(float4 yuv, float4 *yuv2rgb)
{
float4 yuva = mul(yuv, yuv2rgb);
float r = clamp(yuva.x, 0.0, 1.0);
float g = clamp(yuva.y, 0.0, 1.0);
float b = clamp(yuva.z, 0.0, 1.0);
return make_float4(r, g, b, 1.0);
}
__global__ void yuv420p10torgbakernel(unsigned short *dst, const unsigned short *y, const unsigned short *u, const unsigned short *v, const float4 *matrix,const float4 *white, int width,int height)
{
int index = threadIdx.x + blockIdx.x * blockperthread;
int xpos = index % width;
int ypos = index / width ;
xpos = xpos / 2;
ypos = ypos / 2;
int uvindex = xpos + ypos * width / 2;
float y0 = y[index]/1023.0;
float u0 = u[uvindex] / 1023.0;
float v0 = v[uvindex] / 1023.0;
float4 rgba = mul(make_float4(y0,u0,v0,1), white);
rgba = fmaxf(mul(rgba, matrix), make_float4(0,0,0,0));
float r0 = clamp(rgba.x, 0.0, 1.0);;
float g0 = clamp(rgba.y, 0.0, 1.0);;
float b0 = clamp(rgba.z, 0.0, 1.0);
dst[index * 4 + 0] = (unsigned short)(r0 *1023);
dst[index * 4 + 1] = (unsigned short)(g0 * 1023);
dst[index * 4 + 2] = (unsigned short)(b0 * 1023);
dst[index * 4 + 3] = 1 * 1023;
}
__global__ void yuvnv12p10torgbakernel(unsigned short *rgba, const unsigned short *y, const unsigned short *u, const unsigned short *v, const float *matrix, int width, int height)
{
int index = threadIdx.x + blockIdx.x * blockperthread;
int xpos = index % width;
int ypos = index / (width *height);
xpos = xpos / 2;
ypos = ypos / 2;
int uvindex = xpos + ypos * height / 2;
float y0 = y[index] / 1023.0;
float u0 = u[uvindex] / 1023.0;
float v0 = v[uvindex] / 1023.0;
float r0 = matrix[0] * y0 + matrix[1] * u0 + matrix[2] * v0 + matrix[3];
float g0 = matrix[4] * y0 + matrix[5] * u0 + matrix[6] * v0 + matrix[7];
float b0 = matrix[8] * y0 + matrix[9] * u0 + matrix[10] * v0 + matrix[11];
r0 = clamp(r0, 0.0, 1.0);
g0 = clamp(g0, 0.0, 1.0);
b0 = clamp(b0, 0.0, 1.0);
rgba[index * 4 + 0] = r0 * 1023;
rgba[index * 4 + 1] = g0 * 1023;
rgba[index * 4 + 2] = v0 * 1023;
}
__global__ void yuv444p10torgbkernel(unsigned short *rgba, const unsigned short *y, const unsigned short *u, const unsigned short *v, const float *matrix, int width, int height)
{
int index = threadIdx.x + blockIdx.x * blockperthread;
int uvindex = index;
float y0 = y[index] / 1023.0;
float u0 = u[uvindex] / 1023.0;
float v0 = v[uvindex] / 1023.0;
float r0 = matrix[0] * y0 + matrix[1] * u0 + matrix[2] * v0 + matrix[3];
float g0 = matrix[4] * y0 + matrix[5] * u0 + matrix[6] * v0 + matrix[7];
float b0 = matrix[8] * y0 + matrix[9] * u0 + matrix[10] * v0 + matrix[11];
r0 = clamp(r0, 0.0, 1.0);
g0 = clamp(g0, 0.0, 1.0);
b0 = clamp(b0, 0.0, 1.0);
rgba[index * 4 + 0] = r0 * 1023;
rgba[index * 4 + 1] = g0 * 1023;
rgba[index * 4 + 2] = v0 * 1023;
}
__global__ void yuv422p10torgbakernel(unsigned short *rgba, const unsigned short *y, const unsigned short *u, const unsigned short *v, const float *matrix, int width, int height)
{
int index = threadIdx.x + blockIdx.x * blockperthread;
int xpos = index % width;
int ypos = index / width;
xpos = xpos / 2;
int uvindex = xpos + ypos * height;
float y0 = y[index] / 1023.0;
float u0 = u[uvindex] / 1023.0;
float v0 = v[uvindex] / 1023.0;
float r0 = matrix[0] * y0 + matrix[1] * u0 + matrix[2] * v0 + matrix[3];
float g0 = matrix[4] * y0 + matrix[5] * u0 + matrix[6] * v0 + matrix[7];
float b0 = matrix[8] * y0 + matrix[9] * u0 + matrix[10] * v0 + matrix[11];
r0 = clamp(r0, 0.0, 1.0);
g0 = clamp(g0, 0.0, 1.0);
b0 = clamp(b0, 0.0, 1.0);
rgba[index * 4 + 0] = r0 * 1023;
rgba[index * 4 + 1] = g0 * 1023;
rgba[index * 4 + 2] = v0 * 1023;
}
__global__ void rgbtoyuv420p10(int *c, const int *a, const float *b)
{
int i = threadIdx.x + blockIdx.x * blockperthread;
c[i] = a[i] + b[i];
}
__global__ void rgbtoyuv444p10(int *c, const int *a, const float *b)
{
int i = threadIdx.x + blockIdx.x * blockperthread;
c[i] = a[i] + b[i];
}
__global__ void rgba10resize(unsigned short *dst, const unsigned short *src, int srcwidth,int srcheight,int dstwidth,int dstheight,int xstart,int ystart, float scale)
{
int i = threadIdx.x + blockIdx.x * blockperthread;
int xpos = i % dstwidth;
int ypos = i / dstwidth;
float xposorg = xpos * scale;
float yposorg = ypos * scale;
int xx = xposorg;
int yy = yposorg;
float r = src[yy * srcwidth * 4 + xx *4 + 0]/1023;
float g = src[yy * srcwidth * 4 + xx * 4 + 1] / 1023;
float b = src[yy * srcwidth * 4 + xx * 4 + 2] / 1023;
float a = src[yy * srcwidth * 4 + xx * 4 + 3] / 1023;
dst[ystart * dstwidth * 4 + xstart *4 + i * 4 + 0] = r * 1023;
dst[ystart * dstwidth * 4 + xstart * 4 + i * 4 + 1] = g*1023;
dst[ystart * dstwidth * 4 + xstart * 4 + i * 4 + 2] = b*1023;
dst[ystart * dstwidth * 4 + xstart * 4 + i * 4 + 3] = a*1023;
}
__global__ void rgba10resize2YUV42010(unsigned short *dsty, unsigned short *dstuv, const unsigned short *src, int srcwidth, int srcheight,
int dstwidth, int dstheight, int xstart, int ystart, float scale, int transfer, int distransfer, int primary, int disprimary,
float LuminanceScale, float4 *TransPrimaries,float4 *rgb2yuv)
{
int i = threadIdx.x + blockIdx.x * blockperthread;
if (i >= dstwidth * dstheight)
{
return;
}
int xpos = i % dstwidth;
int ypos = i / dstwidth;
float xposorg = xpos * scale;
float yposorg = ypos * scale;
int xx = xposorg;
int yy = yposorg;
if (xx >= srcwidth || yy >= srcheight)
{
return;
}
float r = src[yy * srcwidth * 4 + xx * 4 + 0] / 1023.0;
float g = src[yy * srcwidth * 4 + xx * 4 + 1] / 1023.0;
float b = src[yy * srcwidth * 4 + xx * 4 + 2] / 1023.0;
float4 rgba = make_float4(r, g, b, 1);
rgba = Render2RGBA(rgba, transfer, distransfer, primary, disprimary, LuminanceScale, TransPrimaries);
float4 yuva = RGBA2yuv(rgba, rgb2yuv);
dsty[i] = clamp(yuva.x, 0.0, 1.0) * 1023;
if (1)
{
//float a = src[yy * srcwidth * 4 + xx * 4 + 3] / 1023.0;
if (xpos % 2 == 0 && ypos % 2 == 0)
{
dstuv[ypos * dstwidth / 2 + xpos + 0] = clamp(yuva.y, 0.0, 1.0) * 1023;
dstuv[ypos * dstwidth / 2 + xpos + 1] = clamp(yuva.z, 0.0, 1.0) * 1023;
}
}
}
void yuv420p10tonv12p10resize(unsigned short *NV12, const unsigned short *y,
const unsigned short *u, const unsigned short *v, const float *yuv2rgb, const float *white, float * TransferPirmary, const float *rgb2yuv, int srcwidth, int srcheight,
int dstwidth, int dstheight, int transfer, int distranfer, int primary, int disprimary, float luama)
{
int nGpu = 0;
hipError_t res = hipGetDeviceCount(&nGpu);
if (res != hipSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return;
}
std::vector< hipDeviceProp_t> vProp;
for (int i = 0; i < nGpu; i++)
{
hipDeviceProp_t deviceprop;
res = hipGetDeviceProperties(&deviceprop, i);
if (res == hipSuccess)
{
vProp.push_back(deviceprop);
}
}
hipError_t cudaStatus;
unsigned short * ydevice = 0;
unsigned short *udevice = 0;
unsigned short *vdevice = 0;
unsigned short *rgbdevice = 0;
unsigned short *nv1210device = 0;
float4 *yuv2rgbmatrixdevice = 0;
float4 *whitedevice = 0;
float4 *rgb2yuvmatrixdevice = 0;
float4 *transferprimarymatrixdevice = 0;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&yuv2rgbmatrixdevice, 4 * sizeof(float4));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&transferprimarymatrixdevice, 4 * sizeof(float4));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&rgb2yuvmatrixdevice, 4 * sizeof(float4));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&whitedevice, 4 * sizeof(float4));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&rgbdevice, srcwidth *srcheight * 4 * sizeof(unsigned short));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&nv1210device, dstwidth *dstheight * 3/2 * sizeof(unsigned short));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&ydevice, srcwidth *srcheight * sizeof(unsigned short));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&udevice, srcwidth *srcheight / 4 * sizeof(unsigned short));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&vdevice, srcwidth *srcheight / 4 * sizeof(unsigned short));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(yuv2rgbmatrixdevice, yuv2rgb, 16 * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(rgb2yuvmatrixdevice, rgb2yuv, 16 * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(transferprimarymatrixdevice, TransferPirmary, 16 * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(whitedevice, white, 16 * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(ydevice, y, srcwidth *srcheight * sizeof(unsigned short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(udevice, u, srcwidth *srcheight / 4 * sizeof(unsigned short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(vdevice, v, srcwidth *srcheight / 4 * sizeof(unsigned short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
int nsize = srcwidth * srcheight;
unsigned int grid = (nsize + blockperthread - 1) / blockperthread;
dim3 ngrid = { grid };
unsigned int perthread = blockperthread;
dim3 nthread = { perthread };
// Launch a kernel on the GPU with one thread for each element.
yuv420p10torgbakernel << <grid, perthread >> > (rgbdevice, ydevice, udevice, vdevice, yuv2rgbmatrixdevice, whitedevice, srcwidth, srcheight);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
nsize = dstwidth * dstheight;
grid = (nsize + blockperthread - 1) / blockperthread;
ngrid = { grid };
unsigned short *dsty = nv1210device;
unsigned short *dstuv = nv1210device + dstwidth * dstheight;
float scale = 1.0 * srcwidth / dstwidth;
rgba10resize2YUV42010 << <grid, perthread >> > (dsty, dstuv,rgbdevice, srcwidth,srcheight,dstwidth,dstheight,0,0,scale,transfer,distranfer,
primary, disprimary,luama,transferprimarymatrixdevice, rgb2yuvmatrixdevice);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(NV12, nv1210device, dstheight *dstwidth * 3 / 2* sizeof(unsigned short), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(rgbdevice);
hipFree(nv1210device);
hipFree(ydevice);
hipFree(udevice);
hipFree(vdevice);
hipFree(yuv2rgbmatrixdevice);
hipFree(rgb2yuvmatrixdevice);
hipFree(whitedevice);
hipFree(transferprimarymatrixdevice);
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceReset failed!");
return;
}
}
void yuv420p10torgba(unsigned short *rgba, const unsigned short *y,
const unsigned short *u, const unsigned short *v, const float *matrix, const float *white,int width, int height)
{
int nGpu = 0;
hipError_t res = hipGetDeviceCount(&nGpu);
if (res != hipSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return ;
}
std::vector< hipDeviceProp_t> vProp;
for (int i = 0; i < nGpu; i++)
{
hipDeviceProp_t deviceprop;
res = hipGetDeviceProperties(&deviceprop, i);
if (res == hipSuccess)
{
vProp.push_back(deviceprop);
}
}
hipError_t cudaStatus;
unsigned short * ydevice = 0;
unsigned short *udevice = 0;
unsigned short *vdevice = 0;
unsigned short *rgbdevice = 0;
float4 *matrixdevice = 0;
float4 *whitedevice = 0;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&matrixdevice,4 * sizeof(float4));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&whitedevice, 4 * sizeof(float4));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&rgbdevice, width *height *4* sizeof(unsigned short));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&ydevice, width *height * sizeof(unsigned short));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&udevice, width *height/4 * sizeof(unsigned short));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&vdevice, width *height / 4 * sizeof(unsigned short));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(matrixdevice, matrix, 16 * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(whitedevice, white, 16 * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(ydevice, y, width *height * sizeof(unsigned short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(udevice, u, width *height / 4 * sizeof(unsigned short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(vdevice, v, width *height / 4 * sizeof(unsigned short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
int nsize = width * height;
unsigned int grid = (nsize + blockperthread - 1) / blockperthread;
dim3 ngrid = { grid};
unsigned int perthread = blockperthread;
dim3 nthread = { perthread };
// Launch a kernel on the GPU with one thread for each element.
yuv420p10torgbakernel << <grid, perthread >> > (rgbdevice,ydevice,udevice,vdevice, matrixdevice,whitedevice, width, height);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(rgba, rgbdevice, width *height * 4 * sizeof(unsigned short), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(rgbdevice);
hipFree(ydevice);
hipFree(udevice);
hipFree(vdevice);
hipFree(matrixdevice);
hipFree(whitedevice);
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceReset failed!");
return ;
}
}
#ifdef EXE
const long arraySize = 3840 * 2160;
int main()
{
int nGpu = 0;
hipError_t res = hipGetDeviceCount(&nGpu);
if (res != hipSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
std::vector< hipDeviceProp_t> vProp;
for (int i = 0 ;i < nGpu; i++)
{
hipDeviceProp_t deviceprop;
res = hipGetDeviceProperties(&deviceprop, i);
if (res == hipSuccess)
{
vProp.push_back(deviceprop);
}
}
int *a = new int[arraySize];
int *b = new int[arraySize];
int *c = new int[arraySize];
for (int i = 0; i < arraySize; i++)
{
a[i] = 0;
b[i] = 1;
c[i] = 0;
}
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
for (int i = 0; i < arraySize; i++)
{
if (c[i] != a[i] + b[i])
{
fprintf(stderr, "addWithCuda error----id is:%d!", i);
}
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
dim3 ngrid = { (arraySize + blockperthread-1) / blockperthread , 1,1};
int perthread = blockperthread;
dim3 nthread = { 512,1,1 };
// Launch a kernel on the GPU with one thread for each element.
addKernel<< <ngrid, nthread >> >(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
#endif | f2665fb16d67feac26cf04eff78f0b8d117e0efe.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "common/inc/helper_math.h"
#include "cudahdr.h"
#include <stdio.h>
#include <vector>
const int blockperthread = 512;
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x + blockIdx.x * blockperthread;
c[i] = a[i] + b[i];
}
__host__ __device__
float4 mul(const float4 *m, const float4 &v)
{
float4 r;
r.x = dot(v, m[0]);
r.y = dot(v, m[1]);
r.z = dot(v, m[2]);
r.w = 1.0f;
return r;
}
__host__ __device__
float4 mul(const float4 &v, const float4 *m)
{
float4 r;
r.x = dot(v, m[0]);
r.y = dot(v, m[1]);
r.z = dot(v, m[2]);
r.w = 1.0f;
return r;
}
__host__ __device__ float4 hable(float4 x)
{
const float A = 0.15, B = 0.50, C = 0.10, D = 0.20, E = 0.02, F = 0.30;
return ((x * (A*x + (C*B)) + (D*E)) / (x * (A*x + B) + (D*F))) - E / F;
}
__host__ __device__ float4 HDRToneMapping(float4 rgb, float LuminanceScale)
{
float4 HABLE_DIV = hable(make_float4(11.2,11.2,11.2,11.2));
float4 rgba = hable(rgb* LuminanceScale) / HABLE_DIV;
return rgba;
}
__host__ __device__ float4 transformPrimaries(float4 rgb, int primaries,int disprimaries, float4*TransPrimaries)
{
if (primaries != disprimaries)
{
return fmaxf(mul(rgb, TransPrimaries),make_float4(0,0,0 ,0));
}
else
{
return rgb;
}
}
__host__ __device__ float4 toneMapping(float4 rgb, int transfer,int distransfer, float LuminanceScale)
{
if (distransfer == transfer)
{
return rgb;
}
if (distransfer == 1 || distransfer == 4)
{
if (transfer == 16 || transfer == 18)
{
return HDRToneMapping(rgb, LuminanceScale);
}
else
{
return rgb * LuminanceScale;
}
}
else
{
return rgb * LuminanceScale;
}
}
// 电光转换函数 这里转换为自然光
__host__ __device__ float inverse_HLG(float x)
{
const float B67_a = 0.17883277;
const float B67_b = 0.28466892;
const float B67_c = 0.55991073;
const float B67_inv_r2 = 4.0;
if (x <= 0.5)
x = x * x * B67_inv_r2;
else
x = exp((x - B67_c) / B67_a) + B67_b;
return x;
}
// 自然光转换为电信号
__host__ __device__ float LineToHLG(float Lc)
{
const double a = 0.17883277;
const double b = 0.28466892;
const double c = 0.55991073;
return (0.0 > Lc) ? 0.0 :
(Lc <= 1.0 / 12.0 ? sqrt(3.0 * Lc) : a * log(12.0 * Lc - b) + c);
}
__host__ __device__ float4 ST2084TOLinear(float4 rgb)
{
const float ST2084_m1 = 2610.0 / (4096.0 * 4);
const float ST2084_m2 = (2523.0 / 4096.0) * 128.0;
const float ST2084_c1 = 3424.0 / 4096.0;
const float ST2084_c2 = (2413.0 / 4096.0) * 32.0;
const float ST2084_c3 = (2392.0 / 4096.0) * 32.0;
rgb = fpowf(fmaxf(rgb, make_float4(0,0,0, 0)), 1.0 / ST2084_m2);
rgb = fmaxf(rgb - ST2084_c1, make_float4(0,0,0,0)) / (ST2084_c2 - ST2084_c3 * rgb);
rgb = fpowf(rgb, 1.0 / ST2084_m1);
return rgb * 10000;
}
// 这里转换为自然 光
__host__ __device__ float4 HLGTOSenceLinear(float4 rgb)
{
rgb.x= inverse_HLG(rgb.x);
rgb.y = inverse_HLG(rgb.y);
rgb.z = inverse_HLG(rgb.z);
return rgb;
}
// 这里转换为display 光
__host__ __device__ float4 HLGTOLinear(float4 rgb)
{
const float alpha_gain = 2000;
rgb.x = inverse_HLG(rgb.x);
rgb.y = inverse_HLG(rgb.y);
rgb.z = inverse_HLG(rgb.z);
// 下面是光光转换函数 转换为display 光
float3 ootf_2020 = make_float3(0.2627, 0.6780, 0.0593);
float ootf_ys = alpha_gain * dot(ootf_2020, make_float3(rgb.x, rgb.y, rgb.z));
return rgb * powf(ootf_ys, 1.200 - 1.0);
}
__host__ __device__ float4 BT709TOLinear(float4 rgb)
{
return fpowf(rgb, 1.0 / 0.45);
}
__host__ __device__ float4 BT470M_SRGB_TOLinear(float4 rgb)
{
return fpowf(rgb, 2.2);
}
__host__ __device__ float4 BT470BGTOLinear(float4 rgb)
{
return fpowf(rgb, 2.8);
}
__host__ __device__ float4 LineTOSRGB(float4 rgb)
{
return fpowf(rgb, 1.0 / 2.2);
}
__host__ __device__ float4 LineTOST2084(float4 rgb)
{
const float ST2084_m1 = 2610.0 / (4096.0 * 4);
const float ST2084_m2 = (2523.0 / 4096.0) * 128.0;
const float ST2084_c1 = 3424.0 / 4096.0;
const float ST2084_c2 = (2413.0 / 4096.0) * 32.0;
const float ST2084_c3 = (2392.0 / 4096.0) * 32.0;
rgb = fpowf(rgb / 10000, ST2084_m1);
rgb = (ST2084_c1 + ST2084_c2 * rgb) / (1 + ST2084_c3 * rgb);
rgb = fpowf(rgb, ST2084_m2);
return rgb;
}
__host__ __device__ float4 PQToHLG(float4 rgb, float LuminanceScale)
{
float r = rgb.x / LuminanceScale / 10000.0 * 10;
float g = rgb.y / LuminanceScale / 10000.0 * 10;
float b = rgb.z / LuminanceScale / 10000.0 * 10;
r = r > 1.0 ? 1.0 : r;
g = g > 1.0 ? 1.0 : g;
b = b > 1.0 ? 1.0 : b;
float a = rgb.w;
// 上面得到的是display光
// 逆光光转换函数 将display 光转换为自然光
if (0)
{
float3 ootf_2020 = make_float3(0.2627, 0.6780, 0.0593);
float ootf_ys = dot(ootf_2020, make_float3(rgb.x, rgb.y,rgb.z));
ootf_ys = powf(ootf_ys, (1.0 - 1.2) / 1.200);
float hlgr = r * ootf_ys;
float hlgg = g * ootf_ys;
float hlgb = b * ootf_ys;
float hlga = a;
hlgr = LineToHLG(hlgr);
hlgg = LineToHLG(hlgg);
hlgb = LineToHLG(hlgb);
return make_float4(hlgr, hlgg, hlgb, hlga);
}
else
{
r = LineToHLG(r);
g = LineToHLG(g);
b = LineToHLG(b);
return make_float4(r, g, b, a);
}
}
__host__ __device__ float4 SDRToHLG(float4 rgb, float LuminanceScale)
{
float a = rgb.w;
rgb.x = 0.265 *rgb.x * 2/ LuminanceScale;
rgb.y = 0.265 *rgb.y * 2 / LuminanceScale;
rgb.z = 0.265 *rgb.z * 2 / LuminanceScale;
float ootf_ys = 1;
float r = rgb.x > 1.0 ? 1.0 : rgb.x;
float g = rgb.y > 1.0 ? 1.0 : rgb.y;
float b = rgb.z > 1.0 ? 1.0 : rgb.z;
float hlgr = r * ootf_ys;
float hlgg = g * ootf_ys;
float hlgb = b * ootf_ys;
hlgr = LineToHLG(hlgr);
hlgg = LineToHLG(hlgg);
hlgb = LineToHLG(hlgb);
float hlga = a;
return make_float4(hlgr, hlgg, hlgb, hlga);
}
__host__ __device__ float4 LinerToSTDB67(float4 hlg, int transfer, int distransfer, float LuminanceScale)
{
if (transfer == distransfer)
{
return hlg;
}
else
{
if (transfer == 16)
{
return PQToHLG(hlg, LuminanceScale);
}
else
{
return SDRToHLG(hlg, LuminanceScale);
}
}
}
__host__ __device__ float4 linearToDisplay(float4 rgb, int transfer, int distransfer, float LuminanceScale)
{
if (distransfer == transfer)
{
return rgb;
}
else if (distransfer == 16)
{
return LineTOST2084(rgb);
}
else if (distransfer == 18)
{
return LinerToSTDB67(rgb, transfer, distransfer, LuminanceScale);
}
else if (distransfer == 1)
{
return fpowf(rgb,1.0/2.2);
}
else if (distransfer == 4)
{
return fpowf(rgb, 1.0 / 2.2);
}
else
{
return rgb;
}
}
__host__ __device__ float4 sourcetolinekernel(float4 rgb, int transfer, int distransfer)
{
if (transfer == distransfer)
{
return rgb;
}
else if (transfer == 8) //line
{
return rgb;
}
else if (transfer == 16) // pq
{
return ST2084TOLinear(rgb);
}
else if (transfer == 18) // hlg
{
return HLGTOLinear(rgb);
}
else if (transfer == 1) // bt709
{
return BT709TOLinear(rgb);
}
else if (transfer == 4)
{
return BT470M_SRGB_TOLinear(rgb);
}
else if (transfer == 5)
{
return BT470BGTOLinear(rgb);
}
else
{
return rgb;
}
}
__host__ __device__ float4 Render2RGBA(float4 rgb, int transfer, int distransfer, int primary, int disprimary, float LuminanceScale, float4 *TransPrimaries)
{
float a = rgb.w;
rgb.w = 0;
rgb = sourcetolinekernel(rgb, transfer, distransfer);
rgb = transformPrimaries(rgb, primary, disprimary, TransPrimaries);
rgb = toneMapping(rgb, transfer, distransfer, LuminanceScale);
rgb = linearToDisplay(rgb, transfer, distransfer, LuminanceScale);
rgb.w = a;
return rgb;
}
__host__ __device__ float4 RGBA2yuv(float4 rgb, float4 *rgb2yuv)
{
float4 yuva = mul(rgb, rgb2yuv);
float y = clamp(yuva.x, 0.0, 1.0);
float u = clamp(yuva.y, 0.0, 1.0);
float v = clamp(yuva.z, 0.0, 1.0);
return make_float4(y, u, v, 1.0);
}
__host__ __device__ float4 yuv2rgb(float4 yuv, float4 *yuv2rgb)
{
float4 yuva = mul(yuv, yuv2rgb);
float r = clamp(yuva.x, 0.0, 1.0);
float g = clamp(yuva.y, 0.0, 1.0);
float b = clamp(yuva.z, 0.0, 1.0);
return make_float4(r, g, b, 1.0);
}
__global__ void yuv420p10torgbakernel(unsigned short *dst, const unsigned short *y, const unsigned short *u, const unsigned short *v, const float4 *matrix,const float4 *white, int width,int height)
{
int index = threadIdx.x + blockIdx.x * blockperthread;
int xpos = index % width;
int ypos = index / width ;
xpos = xpos / 2;
ypos = ypos / 2;
int uvindex = xpos + ypos * width / 2;
float y0 = y[index]/1023.0;
float u0 = u[uvindex] / 1023.0;
float v0 = v[uvindex] / 1023.0;
float4 rgba = mul(make_float4(y0,u0,v0,1), white);
rgba = fmaxf(mul(rgba, matrix), make_float4(0,0,0,0));
float r0 = clamp(rgba.x, 0.0, 1.0);;
float g0 = clamp(rgba.y, 0.0, 1.0);;
float b0 = clamp(rgba.z, 0.0, 1.0);
dst[index * 4 + 0] = (unsigned short)(r0 *1023);
dst[index * 4 + 1] = (unsigned short)(g0 * 1023);
dst[index * 4 + 2] = (unsigned short)(b0 * 1023);
dst[index * 4 + 3] = 1 * 1023;
}
__global__ void yuvnv12p10torgbakernel(unsigned short *rgba, const unsigned short *y, const unsigned short *u, const unsigned short *v, const float *matrix, int width, int height)
{
int index = threadIdx.x + blockIdx.x * blockperthread;
int xpos = index % width;
int ypos = index / (width *height);
xpos = xpos / 2;
ypos = ypos / 2;
int uvindex = xpos + ypos * height / 2;
float y0 = y[index] / 1023.0;
float u0 = u[uvindex] / 1023.0;
float v0 = v[uvindex] / 1023.0;
float r0 = matrix[0] * y0 + matrix[1] * u0 + matrix[2] * v0 + matrix[3];
float g0 = matrix[4] * y0 + matrix[5] * u0 + matrix[6] * v0 + matrix[7];
float b0 = matrix[8] * y0 + matrix[9] * u0 + matrix[10] * v0 + matrix[11];
r0 = clamp(r0, 0.0, 1.0);
g0 = clamp(g0, 0.0, 1.0);
b0 = clamp(b0, 0.0, 1.0);
rgba[index * 4 + 0] = r0 * 1023;
rgba[index * 4 + 1] = g0 * 1023;
rgba[index * 4 + 2] = v0 * 1023;
}
__global__ void yuv444p10torgbkernel(unsigned short *rgba, const unsigned short *y, const unsigned short *u, const unsigned short *v, const float *matrix, int width, int height)
{
int index = threadIdx.x + blockIdx.x * blockperthread;
int uvindex = index;
float y0 = y[index] / 1023.0;
float u0 = u[uvindex] / 1023.0;
float v0 = v[uvindex] / 1023.0;
float r0 = matrix[0] * y0 + matrix[1] * u0 + matrix[2] * v0 + matrix[3];
float g0 = matrix[4] * y0 + matrix[5] * u0 + matrix[6] * v0 + matrix[7];
float b0 = matrix[8] * y0 + matrix[9] * u0 + matrix[10] * v0 + matrix[11];
r0 = clamp(r0, 0.0, 1.0);
g0 = clamp(g0, 0.0, 1.0);
b0 = clamp(b0, 0.0, 1.0);
rgba[index * 4 + 0] = r0 * 1023;
rgba[index * 4 + 1] = g0 * 1023;
rgba[index * 4 + 2] = v0 * 1023;
}
__global__ void yuv422p10torgbakernel(unsigned short *rgba, const unsigned short *y, const unsigned short *u, const unsigned short *v, const float *matrix, int width, int height)
{
int index = threadIdx.x + blockIdx.x * blockperthread;
int xpos = index % width;
int ypos = index / width;
xpos = xpos / 2;
int uvindex = xpos + ypos * height;
float y0 = y[index] / 1023.0;
float u0 = u[uvindex] / 1023.0;
float v0 = v[uvindex] / 1023.0;
float r0 = matrix[0] * y0 + matrix[1] * u0 + matrix[2] * v0 + matrix[3];
float g0 = matrix[4] * y0 + matrix[5] * u0 + matrix[6] * v0 + matrix[7];
float b0 = matrix[8] * y0 + matrix[9] * u0 + matrix[10] * v0 + matrix[11];
r0 = clamp(r0, 0.0, 1.0);
g0 = clamp(g0, 0.0, 1.0);
b0 = clamp(b0, 0.0, 1.0);
rgba[index * 4 + 0] = r0 * 1023;
rgba[index * 4 + 1] = g0 * 1023;
rgba[index * 4 + 2] = v0 * 1023;
}
__global__ void rgbtoyuv420p10(int *c, const int *a, const float *b)
{
int i = threadIdx.x + blockIdx.x * blockperthread;
c[i] = a[i] + b[i];
}
__global__ void rgbtoyuv444p10(int *c, const int *a, const float *b)
{
int i = threadIdx.x + blockIdx.x * blockperthread;
c[i] = a[i] + b[i];
}
__global__ void rgba10resize(unsigned short *dst, const unsigned short *src, int srcwidth,int srcheight,int dstwidth,int dstheight,int xstart,int ystart, float scale)
{
int i = threadIdx.x + blockIdx.x * blockperthread;
int xpos = i % dstwidth;
int ypos = i / dstwidth;
float xposorg = xpos * scale;
float yposorg = ypos * scale;
int xx = xposorg;
int yy = yposorg;
float r = src[yy * srcwidth * 4 + xx *4 + 0]/1023;
float g = src[yy * srcwidth * 4 + xx * 4 + 1] / 1023;
float b = src[yy * srcwidth * 4 + xx * 4 + 2] / 1023;
float a = src[yy * srcwidth * 4 + xx * 4 + 3] / 1023;
dst[ystart * dstwidth * 4 + xstart *4 + i * 4 + 0] = r * 1023;
dst[ystart * dstwidth * 4 + xstart * 4 + i * 4 + 1] = g*1023;
dst[ystart * dstwidth * 4 + xstart * 4 + i * 4 + 2] = b*1023;
dst[ystart * dstwidth * 4 + xstart * 4 + i * 4 + 3] = a*1023;
}
__global__ void rgba10resize2YUV42010(unsigned short *dsty, unsigned short *dstuv, const unsigned short *src, int srcwidth, int srcheight,
int dstwidth, int dstheight, int xstart, int ystart, float scale, int transfer, int distransfer, int primary, int disprimary,
float LuminanceScale, float4 *TransPrimaries,float4 *rgb2yuv)
{
int i = threadIdx.x + blockIdx.x * blockperthread;
if (i >= dstwidth * dstheight)
{
return;
}
int xpos = i % dstwidth;
int ypos = i / dstwidth;
float xposorg = xpos * scale;
float yposorg = ypos * scale;
int xx = xposorg;
int yy = yposorg;
if (xx >= srcwidth || yy >= srcheight)
{
return;
}
float r = src[yy * srcwidth * 4 + xx * 4 + 0] / 1023.0;
float g = src[yy * srcwidth * 4 + xx * 4 + 1] / 1023.0;
float b = src[yy * srcwidth * 4 + xx * 4 + 2] / 1023.0;
float4 rgba = make_float4(r, g, b, 1);
rgba = Render2RGBA(rgba, transfer, distransfer, primary, disprimary, LuminanceScale, TransPrimaries);
float4 yuva = RGBA2yuv(rgba, rgb2yuv);
dsty[i] = clamp(yuva.x, 0.0, 1.0) * 1023;
if (1)
{
//float a = src[yy * srcwidth * 4 + xx * 4 + 3] / 1023.0;
if (xpos % 2 == 0 && ypos % 2 == 0)
{
dstuv[ypos * dstwidth / 2 + xpos + 0] = clamp(yuva.y, 0.0, 1.0) * 1023;
dstuv[ypos * dstwidth / 2 + xpos + 1] = clamp(yuva.z, 0.0, 1.0) * 1023;
}
}
}
void yuv420p10tonv12p10resize(unsigned short *NV12, const unsigned short *y,
const unsigned short *u, const unsigned short *v, const float *yuv2rgb, const float *white, float * TransferPirmary, const float *rgb2yuv, int srcwidth, int srcheight,
int dstwidth, int dstheight, int transfer, int distranfer, int primary, int disprimary, float luama)
{
int nGpu = 0;
cudaError_t res = cudaGetDeviceCount(&nGpu);
if (res != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return;
}
std::vector< cudaDeviceProp> vProp;
for (int i = 0; i < nGpu; i++)
{
cudaDeviceProp deviceprop;
res = cudaGetDeviceProperties(&deviceprop, i);
if (res == cudaSuccess)
{
vProp.push_back(deviceprop);
}
}
cudaError_t cudaStatus;
unsigned short * ydevice = 0;
unsigned short *udevice = 0;
unsigned short *vdevice = 0;
unsigned short *rgbdevice = 0;
unsigned short *nv1210device = 0;
float4 *yuv2rgbmatrixdevice = 0;
float4 *whitedevice = 0;
float4 *rgb2yuvmatrixdevice = 0;
float4 *transferprimarymatrixdevice = 0;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&yuv2rgbmatrixdevice, 4 * sizeof(float4));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&transferprimarymatrixdevice, 4 * sizeof(float4));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&rgb2yuvmatrixdevice, 4 * sizeof(float4));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&whitedevice, 4 * sizeof(float4));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&rgbdevice, srcwidth *srcheight * 4 * sizeof(unsigned short));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&nv1210device, dstwidth *dstheight * 3/2 * sizeof(unsigned short));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&ydevice, srcwidth *srcheight * sizeof(unsigned short));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&udevice, srcwidth *srcheight / 4 * sizeof(unsigned short));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&vdevice, srcwidth *srcheight / 4 * sizeof(unsigned short));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(yuv2rgbmatrixdevice, yuv2rgb, 16 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(rgb2yuvmatrixdevice, rgb2yuv, 16 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(transferprimarymatrixdevice, TransferPirmary, 16 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(whitedevice, white, 16 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(ydevice, y, srcwidth *srcheight * sizeof(unsigned short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(udevice, u, srcwidth *srcheight / 4 * sizeof(unsigned short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(vdevice, v, srcwidth *srcheight / 4 * sizeof(unsigned short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
int nsize = srcwidth * srcheight;
unsigned int grid = (nsize + blockperthread - 1) / blockperthread;
dim3 ngrid = { grid };
unsigned int perthread = blockperthread;
dim3 nthread = { perthread };
// Launch a kernel on the GPU with one thread for each element.
yuv420p10torgbakernel << <grid, perthread >> > (rgbdevice, ydevice, udevice, vdevice, yuv2rgbmatrixdevice, whitedevice, srcwidth, srcheight);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
nsize = dstwidth * dstheight;
grid = (nsize + blockperthread - 1) / blockperthread;
ngrid = { grid };
unsigned short *dsty = nv1210device;
unsigned short *dstuv = nv1210device + dstwidth * dstheight;
float scale = 1.0 * srcwidth / dstwidth;
rgba10resize2YUV42010 << <grid, perthread >> > (dsty, dstuv,rgbdevice, srcwidth,srcheight,dstwidth,dstheight,0,0,scale,transfer,distranfer,
primary, disprimary,luama,transferprimarymatrixdevice, rgb2yuvmatrixdevice);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(NV12, nv1210device, dstheight *dstwidth * 3 / 2* sizeof(unsigned short), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(rgbdevice);
cudaFree(nv1210device);
cudaFree(ydevice);
cudaFree(udevice);
cudaFree(vdevice);
cudaFree(yuv2rgbmatrixdevice);
cudaFree(rgb2yuvmatrixdevice);
cudaFree(whitedevice);
cudaFree(transferprimarymatrixdevice);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceReset failed!");
return;
}
}
void yuv420p10torgba(unsigned short *rgba, const unsigned short *y,
const unsigned short *u, const unsigned short *v, const float *matrix, const float *white,int width, int height)
{
int nGpu = 0;
cudaError_t res = cudaGetDeviceCount(&nGpu);
if (res != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return ;
}
std::vector< cudaDeviceProp> vProp;
for (int i = 0; i < nGpu; i++)
{
cudaDeviceProp deviceprop;
res = cudaGetDeviceProperties(&deviceprop, i);
if (res == cudaSuccess)
{
vProp.push_back(deviceprop);
}
}
cudaError_t cudaStatus;
unsigned short * ydevice = 0;
unsigned short *udevice = 0;
unsigned short *vdevice = 0;
unsigned short *rgbdevice = 0;
float4 *matrixdevice = 0;
float4 *whitedevice = 0;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&matrixdevice,4 * sizeof(float4));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&whitedevice, 4 * sizeof(float4));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&rgbdevice, width *height *4* sizeof(unsigned short));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&ydevice, width *height * sizeof(unsigned short));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&udevice, width *height/4 * sizeof(unsigned short));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&vdevice, width *height / 4 * sizeof(unsigned short));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(matrixdevice, matrix, 16 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(whitedevice, white, 16 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(ydevice, y, width *height * sizeof(unsigned short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(udevice, u, width *height / 4 * sizeof(unsigned short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(vdevice, v, width *height / 4 * sizeof(unsigned short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
int nsize = width * height;
unsigned int grid = (nsize + blockperthread - 1) / blockperthread;
dim3 ngrid = { grid};
unsigned int perthread = blockperthread;
dim3 nthread = { perthread };
// Launch a kernel on the GPU with one thread for each element.
yuv420p10torgbakernel << <grid, perthread >> > (rgbdevice,ydevice,udevice,vdevice, matrixdevice,whitedevice, width, height);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(rgba, rgbdevice, width *height * 4 * sizeof(unsigned short), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(rgbdevice);
cudaFree(ydevice);
cudaFree(udevice);
cudaFree(vdevice);
cudaFree(matrixdevice);
cudaFree(whitedevice);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceReset failed!");
return ;
}
}
#ifdef EXE
const long arraySize = 3840 * 2160;
int main()
{
int nGpu = 0;
cudaError_t res = cudaGetDeviceCount(&nGpu);
if (res != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
std::vector< cudaDeviceProp> vProp;
for (int i = 0 ;i < nGpu; i++)
{
cudaDeviceProp deviceprop;
res = cudaGetDeviceProperties(&deviceprop, i);
if (res == cudaSuccess)
{
vProp.push_back(deviceprop);
}
}
int *a = new int[arraySize];
int *b = new int[arraySize];
int *c = new int[arraySize];
for (int i = 0; i < arraySize; i++)
{
a[i] = 0;
b[i] = 1;
c[i] = 0;
}
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
for (int i = 0; i < arraySize; i++)
{
if (c[i] != a[i] + b[i])
{
fprintf(stderr, "addWithCuda error----id is:%d!", i);
}
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
dim3 ngrid = { (arraySize + blockperthread-1) / blockperthread , 1,1};
int perthread = blockperthread;
dim3 nthread = { 512,1,1 };
// Launch a kernel on the GPU with one thread for each element.
addKernel<< <ngrid, nthread >> >(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
#endif |
c049677addf8eda8186edf17f796e00c74a51ce5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zcompact.cu normal z -> s, Fri Sep 11 18:29:42 2015
@author Stan Tomov
*/
#include "common_magmasparse.h"
#define NB 64
/* =====================================================================
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread handles one row, iterating across all columns.
*/
__global__ void
scompact_kernel(
int m, int n,
float *dA,
int ldda,
float *dnorms,
float tol,
magma_int_t *active,
magma_int_t *cBlock)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (dnorms[j] > tol && active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
else if (i==0)
active[j] = 0;
}
}
if (i==0)
*cBlock = cBlockSize;
}
__global__ void
scompactactive_kernel(
int m,
int n,
float *dA,
int ldda,
magma_int_t *active)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and
compacts them into the cBlock size<=n vectors that have norms > tol.
The active mask array has 1 or 0, showing if a vector remained or not
in the compacted resulting set of vectors.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX REAL array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dnorms REAL array, dimension N
The norms of the N vectors in dA
@param[in]
tol DOUBLE PRECISON
The tolerance value used in the criteria to compact or not.
@param[in,out]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in,out]
cBlock magmaInt_ptr
The number of vectors that remain in dA (i.e., with norms > tol).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_scompact(
magma_int_t m,
magma_int_t n,
magmaFloat_ptr dA,
magma_int_t ldda,
magmaFloat_ptr dnorms,
float tol,
magmaInt_ptr active,
magmaInt_ptr cBlock,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
hipLaunchKernelGGL(( scompact_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, dA, ldda, dnorms, tol, active, active+n );
magma_igetvector( 1, active+n, 1, cBlock, 1 );
return info;
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an
array of 1s and 0sindicating which vectors to compact (for 1s) and
which to disregard (for 0s).
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX REAL array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_scompactActive(
magma_int_t m,
magma_int_t n,
magmaFloat_ptr dA,
magma_int_t ldda,
magmaInt_ptr active,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
hipLaunchKernelGGL(( scompactactive_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, dA, ldda, active);
return info;
}
/* ===================================================================== */
| c049677addf8eda8186edf17f796e00c74a51ce5.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zcompact.cu normal z -> s, Fri Sep 11 18:29:42 2015
@author Stan Tomov
*/
#include "common_magmasparse.h"
#define NB 64
/* =====================================================================
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread handles one row, iterating across all columns.
*/
__global__ void
scompact_kernel(
int m, int n,
float *dA,
int ldda,
float *dnorms,
float tol,
magma_int_t *active,
magma_int_t *cBlock)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (dnorms[j] > tol && active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
else if (i==0)
active[j] = 0;
}
}
if (i==0)
*cBlock = cBlockSize;
}
__global__ void
scompactactive_kernel(
int m,
int n,
float *dA,
int ldda,
magma_int_t *active)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and
compacts them into the cBlock size<=n vectors that have norms > tol.
The active mask array has 1 or 0, showing if a vector remained or not
in the compacted resulting set of vectors.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX REAL array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dnorms REAL array, dimension N
The norms of the N vectors in dA
@param[in]
tol DOUBLE PRECISON
The tolerance value used in the criteria to compact or not.
@param[in,out]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in,out]
cBlock magmaInt_ptr
The number of vectors that remain in dA (i.e., with norms > tol).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_scompact(
magma_int_t m,
magma_int_t n,
magmaFloat_ptr dA,
magma_int_t ldda,
magmaFloat_ptr dnorms,
float tol,
magmaInt_ptr active,
magmaInt_ptr cBlock,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
scompact_kernel<<< grid, threads, 0, queue >>>(
m, n, dA, ldda, dnorms, tol, active, active+n );
magma_igetvector( 1, active+n, 1, cBlock, 1 );
return info;
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an
array of 1s and 0sindicating which vectors to compact (for 1s) and
which to disregard (for 0s).
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX REAL array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_scompactActive(
magma_int_t m,
magma_int_t n,
magmaFloat_ptr dA,
magma_int_t ldda,
magmaInt_ptr active,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
scompactactive_kernel<<< grid, threads, 0, queue >>>(
m, n, dA, ldda, active);
return info;
}
/* ===================================================================== */
|
8d6d76e0b84dfc11fa64f6fb8cd200522d8ebdd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include "matvec.h"
#include "constants.h"
#include "struct.h"
__global__ void duRigidVelocityKernel(double *d_duRVMat, double *d_difIniMat,
double *d_ctlCumMat, double *d_lftMat, int rgdNdeNum)
{
int rgdNdeIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( rgdNdeIdx < rgdNdeNum )
{
double angCumVal = d_ctlCumMat[rgdNdeIdx];
double cosVal = cos(angCumVal);
double sinVal = sin(angCumVal);
vector difIniVec, lftVec;
getVector(difIniVec, d_difIniMat, rgdNdeIdx, rgdNdeNum);
getVector(lftVec, d_lftMat, rgdNdeIdx, rgdNdeNum);
double duAngVal = lftVec.x * (-sinVal * difIniVec.x - cosVal * difIniVec.y)
+ lftVec.y * ( cosVal * difIniVec.x - sinVal * difIniVec.y);
d_duRVMat[ rgdNdeIdx] = duAngVal;
d_duRVMat[ rgdNdeNum + rgdNdeIdx] = lftVec.x;
d_duRVMat[2 * rgdNdeNum + rgdNdeIdx] = lftVec.y;
}
return;
}
void duRigidVelocity(double *d_duRVMat, double *d_ctlCumMat, double *d_lftMat, fcndata &fcnObj)
{
int rgdNdeNum = fcnObj.prm.rgdNdeNum;
int blkNum = (rgdNdeNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( duRigidVelocityKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_duRVMat, fcnObj.prm.d_difIniMat,
d_ctlCumMat, d_lftMat, rgdNdeNum);
return;
}
| 8d6d76e0b84dfc11fa64f6fb8cd200522d8ebdd5.cu | #include <cmath>
#include "matvec.h"
#include "constants.h"
#include "struct.h"
__global__ void duRigidVelocityKernel(double *d_duRVMat, double *d_difIniMat,
double *d_ctlCumMat, double *d_lftMat, int rgdNdeNum)
{
int rgdNdeIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( rgdNdeIdx < rgdNdeNum )
{
double angCumVal = d_ctlCumMat[rgdNdeIdx];
double cosVal = cos(angCumVal);
double sinVal = sin(angCumVal);
vector difIniVec, lftVec;
getVector(difIniVec, d_difIniMat, rgdNdeIdx, rgdNdeNum);
getVector(lftVec, d_lftMat, rgdNdeIdx, rgdNdeNum);
double duAngVal = lftVec.x * (-sinVal * difIniVec.x - cosVal * difIniVec.y)
+ lftVec.y * ( cosVal * difIniVec.x - sinVal * difIniVec.y);
d_duRVMat[ rgdNdeIdx] = duAngVal;
d_duRVMat[ rgdNdeNum + rgdNdeIdx] = lftVec.x;
d_duRVMat[2 * rgdNdeNum + rgdNdeIdx] = lftVec.y;
}
return;
}
void duRigidVelocity(double *d_duRVMat, double *d_ctlCumMat, double *d_lftMat, fcndata &fcnObj)
{
int rgdNdeNum = fcnObj.prm.rgdNdeNum;
int blkNum = (rgdNdeNum - 1) / BLKDIM + 1;
duRigidVelocityKernel <<<blkNum, BLKDIM>>> (d_duRVMat, fcnObj.prm.d_difIniMat,
d_ctlCumMat, d_lftMat, rgdNdeNum);
return;
}
|
1b509e04bd42ed95f7fa92f3db8fd0393ae1f3bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define WARPSIZE 32
void __global__ k_find_block_bounds(
const int N,
const int D,
const int T,
const double *coords,
double *block_bounds_ctr,
double *block_bounds_ext) {
const int tile_idx = blockDim.x*blockIdx.x + threadIdx.x;
if(tile_idx >= T) {
return;
}
for(int d=0; d < D; d++) {
double ci_min = 9999999;
double ci_max = -9999999;
for(int i=0; i < WARPSIZE; i++) {
int atom_i_idx = tile_idx*WARPSIZE + i;
if(atom_i_idx < N) {
double ci = coords[atom_i_idx*D + d];
ci_min = ci < ci_min ? ci : ci_min;
ci_max = ci > ci_max ? ci : ci_max;
}
}
block_bounds_ctr[tile_idx*D+d] = (ci_max + ci_min)/2.0;
block_bounds_ext[tile_idx*D+d] = ci_max - ci_min;
}
}
| 1b509e04bd42ed95f7fa92f3db8fd0393ae1f3bc.cu |
#define WARPSIZE 32
void __global__ k_find_block_bounds(
const int N,
const int D,
const int T,
const double *coords,
double *block_bounds_ctr,
double *block_bounds_ext) {
const int tile_idx = blockDim.x*blockIdx.x + threadIdx.x;
if(tile_idx >= T) {
return;
}
for(int d=0; d < D; d++) {
double ci_min = 9999999;
double ci_max = -9999999;
for(int i=0; i < WARPSIZE; i++) {
int atom_i_idx = tile_idx*WARPSIZE + i;
if(atom_i_idx < N) {
double ci = coords[atom_i_idx*D + d];
ci_min = ci < ci_min ? ci : ci_min;
ci_max = ci > ci_max ? ci : ci_max;
}
}
block_bounds_ctr[tile_idx*D+d] = (ci_max + ci_min)/2.0;
block_bounds_ext[tile_idx*D+d] = ci_max - ci_min;
}
}
|
40156da68f70901939b665ec87be0281c1ac7504.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}[double or complex double]
* Create row, column, value vectors from sparse/dense matrix [row, column, value]=CuMatlab_find(Sparse/Dense(X))
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "SPARSEHELPER.h"
#include "ERRORCHK.h"
// Input Arguments
#define INPUTMATRIX prhs[0]
// Output Arguments
#define ROW_SORT plhs[0]
#define COL_SORT plhs[1]
#define VAL_SORT plhs[2]
extern "C" static void mexCuMatlab_findZ(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be one.";
char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be three.";
if ((nrhs!=1)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg);
}
if ((nlhs!=3)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg);
}
char *input_buf0;
input_buf0 = mxArrayToString(INPUTMATRIX);
if ((mxIsChar(INPUTMATRIX))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0);
}
if (mxIsGPUArray(INPUTMATRIX)) {
mxGPUArray const *INPUTMATRIXGPU;
int numARows, numAColumns;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
INPUTMATRIXGPU = mxGPUCreateFromMxArray(INPUTMATRIX);
if(mxGPUIsSparse(INPUTMATRIXGPU)==1) {
//if (mxGPUGetClassID(INPUTMATRIXGPU) != mxDOUBLE_CLASS ) {
// mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
// "Invalid input to MEX file, input(FIRST ARGUMENT) must be double precision.");
// }
const mwSize *dimsGPU;
dimsGPU=mxGPUGetDimensions(INPUTMATRIXGPU);
numARows = (int)dimsGPU[0]; /* gets number of rows of A */
numAColumns = (int)dimsGPU[1]; /* gets number of columns of A */
mwIndex nnz1;
mxArray * tempx = mxGPUCreateMxArrayOnCPU(INPUTMATRIXGPU);
nnz1 = *(mxGetJc(tempx) + numAColumns);
//nnz1=(mwSize)ceil(numARows*numAColumns);
int nnz= (int)nnz1;
mxArray *row_sort =mxCreateNumericMatrix(nnz, 1, mxDOUBLE_CLASS, mxREAL);
double *pointerrow = (double *)mxGetDoubles(row_sort);
Ir_DataGetSetDXY(tempx , pointerrow, nnz);
mxArray *col_sort =mxCreateNumericMatrix(nnz, 1, mxDOUBLE_CLASS, mxREAL);
double *pointercol = (double *)mxGetDoubles(col_sort);
Jc_GetSetDXY(tempx , pointercol);
hipDoubleComplex *pointerval = (hipDoubleComplex *)mxGetComplexDoubles(tempx);
size_t pivot_dimensionsrow[1] = {nnz};
size_t pivot_dimensionscolumn[1] = {nnz};
size_t pivot_dimensionsvalue[1] = {nnz};
mxGPUArray *row_sortC = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *xrow_sortC=(double*)mxGPUGetData(row_sortC);
gpuErrchk(hipMemcpy(xrow_sortC, pointerrow, nnz * sizeof(*xrow_sortC), hipMemcpyHostToDevice));
mxGPUArray *col_sortC = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionscolumn, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *xcol_sortC=(double*)mxGPUGetData(col_sortC);
gpuErrchk(hipMemcpy(xcol_sortC, pointercol, nnz * sizeof(*xcol_sortC), hipMemcpyHostToDevice));
mxGPUArray *val_sortC = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *xval_sortC=(hipDoubleComplex *)mxGPUGetData(val_sortC);
gpuErrchk(hipMemcpy(xval_sortC, pointerval, nnz * sizeof(*xval_sortC), hipMemcpyHostToDevice));
ROW_SORT = mxGPUCreateMxArrayOnGPU(row_sortC);
COL_SORT = mxGPUCreateMxArrayOnGPU(col_sortC);
VAL_SORT = mxGPUCreateMxArrayOnGPU(val_sortC);
mxGPUDestroyGPUArray(row_sortC);
mxGPUDestroyGPUArray(col_sortC);
mxGPUDestroyGPUArray(val_sortC);
mxGPUDestroyGPUArray(INPUTMATRIXGPU);
mxDestroyArray(tempx);
mxDestroyArray(row_sort);
mxDestroyArray(col_sort);
}
else{
const mwSize *dimsA;
dimsA=mxGPUGetDimensions(INPUTMATRIXGPU);
numARows = (int)dimsA[0]; /* gets number of rows of A */
numAColumns = (int)dimsA[1]; /* gets number of columns of A */
//if (mxGPUGetClassID(INPUTMATRIXGPU) != mxDOUBLE_CLASS) {
// mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
// "Invalid input to MEX file, input(FIRST ARGUMENT) must be double precision.");
// }
hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle));
hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE);
hipDoubleComplex *d_A_dense;
d_A_dense = (hipDoubleComplex *)(mxGPUGetDataReadOnly(INPUTMATRIXGPU));
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
size_t pivot_pervect[1] = {numARows};
mxGPUArray *PerVect = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect);
cusparseSafeCall(hipsparseZnnz(handle, HIPSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
size_t pivot_rowindi[1] = {numARows + 1};
mxGPUArray *RowIndi = mxGPUCreateGPUArray(1, (mwSize*) pivot_rowindi, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int*)mxGPUGetData(RowIndi);
size_t pivot_dimensionsrow[1] = {nnzA};
size_t pivot_dimensionscolumn[1] = {nnzA};
size_t pivot_dimensionsvalue[1] = {nnzA};
mxGPUArray *row_sort = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
mxGPUArray *col_sort = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionscolumn, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
mxGPUArray *val_sort = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
int *d_col_sort = (int*)mxGPUGetData(col_sort);
int *d_row_sort = (int*)mxGPUGetData(row_sort);
hipDoubleComplex *d_value_sort = (hipDoubleComplex*)mxGPUGetData(val_sort);
cusparseSafeCall(hipsparseZdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_value_sort, d_A_RowIndices, d_col_sort));
//gpuErrchk(hipFree(d_nnzPerVectorA));
cusparseSafeCall(hipsparseXcsr2coo(handle,
d_A_RowIndices,
nnzA,
numARows,
d_row_sort,
HIPSPARSE_INDEX_BASE_ONE));
//gpuErrchk(hipFree(d_A_RowIndices));
// Sort by rows
int *P = NULL;
void *pBuffer = NULL;
size_t pBufferSizeInBytes = 0;
hipsparseXcoosort_bufferSizeExt(handle, numARows, numAColumns,
nnzA,
d_row_sort,
d_col_sort, &pBufferSizeInBytes);
gpuErrchk(hipMalloc(&pBuffer, sizeof(char)*pBufferSizeInBytes));
gpuErrchk(hipMalloc(&P, sizeof(int)*nnzA));
hipsparseCreateIdentityPermutation(handle, nnzA, P);
cusparseSafeCall(hipsparseXcoosortByColumn(handle, numAColumns, numAColumns,
nnzA,
d_row_sort,
d_col_sort,
P,
pBuffer));
cusparseSafeCall(hipsparseZgthr(handle, nnzA, d_value_sort, d_value_sort, P, HIPSPARSE_INDEX_BASE_ZERO));
ROW_SORT = mxGPUCreateMxArrayOnGPU(row_sort);
COL_SORT = mxGPUCreateMxArrayOnGPU(col_sort);
VAL_SORT = mxGPUCreateMxArrayOnGPU(val_sort);
gpuErrchk(hipFree(pBuffer));
gpuErrchk(hipFree(P));
mxGPUDestroyGPUArray(row_sort);
mxGPUDestroyGPUArray(col_sort);
mxGPUDestroyGPUArray(val_sort);
mxGPUDestroyGPUArray(PerVect);
mxGPUDestroyGPUArray(RowIndi);
mxGPUDestroyGPUArray(INPUTMATRIXGPU);
hipsparseDestroyMatDescr(descrA);
hipsparseDestroy(handle);
} // else
}
else if (!(mxIsGPUArray(INPUTMATRIX))){
// if (mxGetClassID(INPUTMATRIX) != mxDOUBLE_CLASS ) {
// mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
// "Invalid input to MEX file, input(FIRST ARGUMENT) must be double precision.");
// }
if(mxIsSparse(INPUTMATRIX)) {
const mwSize *dimsCPU;
dimsCPU=mxGetDimensions(INPUTMATRIX);
// int numARows = (int)dimsCPU[0]; /* gets number of rows of A */
int numAColumns = (int)dimsCPU[1]; /* gets number of columns of A */
mwIndex nnz1;
nnz1 = *(mxGetJc(INPUTMATRIX) + numAColumns);
int nnz= (int)nnz1;
//const mwSize ndim = 1;
// mwSize dims[ndim];
// dims[0] = nnz;
ROW_SORT = mxCreateNumericMatrix(nnz, 1, mxDOUBLE_CLASS, mxREAL);
double *ROWSORT = (double *)mxGetDoubles(ROW_SORT);
Ir_DataDX(INPUTMATRIX, ROWSORT );
COL_SORT = mxCreateNumericMatrix(nnz, 1, mxDOUBLE_CLASS, mxREAL);
double *COLSORT = (double *)mxGetDoubles(COL_SORT);
Jc_SetDX(INPUTMATRIX, COLSORT);
VAL_SORT = mxCreateNumericMatrix(nnz, 1, mxDOUBLE_CLASS, mxCOMPLEX);
//double *VALSORT = (double *)mxGetPr(VAL_SORT);
//VALSORT =(double *)mxGetPr(INPUTMATRIX);
// VALSORT=0;
//ROWSORT =static_cast<double *> (mxMalloc (nnz * sizeof(double)));
mxComplexDouble * VALSORT= (mxComplexDouble *)mxGetComplexDoubles(VAL_SORT);
VALSORT= (mxComplexDouble *)mxGetComplexDoubles(INPUTMATRIX);
}
else{
int numARows, numAColumns;
numARows = (int)mxGetM(INPUTMATRIX);
numAColumns = (int)mxGetN(INPUTMATRIX);
/* Initialize the MathWorks GPU API. */
mxInitGPU();
hipDoubleComplex *h_A_dense1 ; // The A matrix
h_A_dense1 = (hipDoubleComplex *)mxGetComplexDoubles(INPUTMATRIX);
hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle));
hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE);
//hipDoubleComplex *d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
size_t pivot_dimensionsvalueDA[2] = {numARows, numAColumns};
mxGPUArray *OUTMA = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueDA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *d_A_dense = (hipDoubleComplex *)mxGPUGetData(OUTMA);
gpuErrchk(hipMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), hipMemcpyHostToDevice));
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
size_t pivot_pervect[1] = {numARows};
mxGPUArray *PerVect = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect);
cusparseSafeCall(hipsparseZnnz(handle, HIPSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
size_t pivot_rowindi[1] = {numARows + 1};
mxGPUArray *RowIndi = mxGPUCreateGPUArray(1, (mwSize*) pivot_rowindi, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int*)mxGPUGetData(RowIndi);
size_t pivot_dimensionsrow[1] = {nnzA};
size_t pivot_dimensionscolumn[1] = {nnzA};
size_t pivot_dimensionsvalue[1] = {nnzA};
mxGPUArray *row_sort = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
mxGPUArray *col_sort = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionscolumn, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
mxGPUArray *val_sort = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
int *d_col_sort = (int*)mxGPUGetData(col_sort);
int *d_row_sort = (int*)mxGPUGetData(row_sort);
hipDoubleComplex *d_value_sort = (hipDoubleComplex*)mxGPUGetData(val_sort);
cusparseSafeCall(hipsparseZdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_value_sort, d_A_RowIndices, d_col_sort));
//gpuErrchk(hipFree(d_A_dense));
mxGPUDestroyGPUArray(OUTMA);
cusparseSafeCall(hipsparseXcsr2coo(handle,
d_A_RowIndices,
nnzA,
numARows,
d_row_sort,
HIPSPARSE_INDEX_BASE_ONE));
//gpuErrchk(hipFree(d_A_RowIndices));
// Sort by rows
int *P = NULL;
void *pBuffer = NULL;
size_t pBufferSizeInBytes = 0;
hipsparseXcoosort_bufferSizeExt(handle, numARows, numAColumns,
nnzA,
d_row_sort,
d_col_sort, &pBufferSizeInBytes);
gpuErrchk(hipMalloc(&pBuffer, sizeof(char)*pBufferSizeInBytes));
gpuErrchk(hipMalloc(&P, sizeof(int)*nnzA));
hipsparseCreateIdentityPermutation(handle, nnzA, P);
cusparseSafeCall(hipsparseXcoosortByColumn(handle, numAColumns, numAColumns,
nnzA,
d_row_sort,
d_col_sort,
P,
pBuffer));
cusparseSafeCall(hipsparseZgthr(handle, nnzA, d_value_sort, d_value_sort, P, HIPSPARSE_INDEX_BASE_ZERO));
ROW_SORT = mxGPUCreateMxArrayOnGPU(row_sort);
COL_SORT = mxGPUCreateMxArrayOnGPU(col_sort);
VAL_SORT = mxGPUCreateMxArrayOnGPU(val_sort);
gpuErrchk(hipFree(pBuffer));
gpuErrchk(hipFree(P));
mxGPUDestroyGPUArray(row_sort);
mxGPUDestroyGPUArray(col_sort);
mxGPUDestroyGPUArray(val_sort);
mxGPUDestroyGPUArray(PerVect);
mxGPUDestroyGPUArray(RowIndi);
// mxGPUDestroyGPUArray(INPUTMATRIXGPU);
hipsparseDestroyMatDescr(descrA);
hipsparseDestroy(handle);
}
}
//
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
| 40156da68f70901939b665ec87be0281c1ac7504.cu |
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}[double or complex double]
* Create row, column, value vectors from sparse/dense matrix [row, column, value]=CuMatlab_find(Sparse/Dense(X))
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "SPARSEHELPER.h"
#include "ERRORCHK.h"
// Input Arguments
#define INPUTMATRIX prhs[0]
// Output Arguments
#define ROW_SORT plhs[0]
#define COL_SORT plhs[1]
#define VAL_SORT plhs[2]
extern "C" static void mexCuMatlab_findZ(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be one.";
char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be three.";
if ((nrhs!=1)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg);
}
if ((nlhs!=3)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg);
}
char *input_buf0;
input_buf0 = mxArrayToString(INPUTMATRIX);
if ((mxIsChar(INPUTMATRIX))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0);
}
if (mxIsGPUArray(INPUTMATRIX)) {
mxGPUArray const *INPUTMATRIXGPU;
int numARows, numAColumns;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
INPUTMATRIXGPU = mxGPUCreateFromMxArray(INPUTMATRIX);
if(mxGPUIsSparse(INPUTMATRIXGPU)==1) {
//if (mxGPUGetClassID(INPUTMATRIXGPU) != mxDOUBLE_CLASS ) {
// mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
// "Invalid input to MEX file, input(FIRST ARGUMENT) must be double precision.");
// }
const mwSize *dimsGPU;
dimsGPU=mxGPUGetDimensions(INPUTMATRIXGPU);
numARows = (int)dimsGPU[0]; /* gets number of rows of A */
numAColumns = (int)dimsGPU[1]; /* gets number of columns of A */
mwIndex nnz1;
mxArray * tempx = mxGPUCreateMxArrayOnCPU(INPUTMATRIXGPU);
nnz1 = *(mxGetJc(tempx) + numAColumns);
//nnz1=(mwSize)ceil(numARows*numAColumns);
int nnz= (int)nnz1;
mxArray *row_sort =mxCreateNumericMatrix(nnz, 1, mxDOUBLE_CLASS, mxREAL);
double *pointerrow = (double *)mxGetDoubles(row_sort);
Ir_DataGetSetDXY(tempx , pointerrow, nnz);
mxArray *col_sort =mxCreateNumericMatrix(nnz, 1, mxDOUBLE_CLASS, mxREAL);
double *pointercol = (double *)mxGetDoubles(col_sort);
Jc_GetSetDXY(tempx , pointercol);
cuDoubleComplex *pointerval = (cuDoubleComplex *)mxGetComplexDoubles(tempx);
size_t pivot_dimensionsrow[1] = {nnz};
size_t pivot_dimensionscolumn[1] = {nnz};
size_t pivot_dimensionsvalue[1] = {nnz};
mxGPUArray *row_sortC = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *xrow_sortC=(double*)mxGPUGetData(row_sortC);
gpuErrchk(cudaMemcpy(xrow_sortC, pointerrow, nnz * sizeof(*xrow_sortC), cudaMemcpyHostToDevice));
mxGPUArray *col_sortC = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionscolumn, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *xcol_sortC=(double*)mxGPUGetData(col_sortC);
gpuErrchk(cudaMemcpy(xcol_sortC, pointercol, nnz * sizeof(*xcol_sortC), cudaMemcpyHostToDevice));
mxGPUArray *val_sortC = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *xval_sortC=(cuDoubleComplex *)mxGPUGetData(val_sortC);
gpuErrchk(cudaMemcpy(xval_sortC, pointerval, nnz * sizeof(*xval_sortC), cudaMemcpyHostToDevice));
ROW_SORT = mxGPUCreateMxArrayOnGPU(row_sortC);
COL_SORT = mxGPUCreateMxArrayOnGPU(col_sortC);
VAL_SORT = mxGPUCreateMxArrayOnGPU(val_sortC);
mxGPUDestroyGPUArray(row_sortC);
mxGPUDestroyGPUArray(col_sortC);
mxGPUDestroyGPUArray(val_sortC);
mxGPUDestroyGPUArray(INPUTMATRIXGPU);
mxDestroyArray(tempx);
mxDestroyArray(row_sort);
mxDestroyArray(col_sort);
}
else{
const mwSize *dimsA;
dimsA=mxGPUGetDimensions(INPUTMATRIXGPU);
numARows = (int)dimsA[0]; /* gets number of rows of A */
numAColumns = (int)dimsA[1]; /* gets number of columns of A */
//if (mxGPUGetClassID(INPUTMATRIXGPU) != mxDOUBLE_CLASS) {
// mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
// "Invalid input to MEX file, input(FIRST ARGUMENT) must be double precision.");
// }
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE);
cuDoubleComplex *d_A_dense;
d_A_dense = (cuDoubleComplex *)(mxGPUGetDataReadOnly(INPUTMATRIXGPU));
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
size_t pivot_pervect[1] = {numARows};
mxGPUArray *PerVect = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect);
cusparseSafeCall(cusparseZnnz(handle, CUSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
size_t pivot_rowindi[1] = {numARows + 1};
mxGPUArray *RowIndi = mxGPUCreateGPUArray(1, (mwSize*) pivot_rowindi, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int*)mxGPUGetData(RowIndi);
size_t pivot_dimensionsrow[1] = {nnzA};
size_t pivot_dimensionscolumn[1] = {nnzA};
size_t pivot_dimensionsvalue[1] = {nnzA};
mxGPUArray *row_sort = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
mxGPUArray *col_sort = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionscolumn, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
mxGPUArray *val_sort = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
int *d_col_sort = (int*)mxGPUGetData(col_sort);
int *d_row_sort = (int*)mxGPUGetData(row_sort);
cuDoubleComplex *d_value_sort = (cuDoubleComplex*)mxGPUGetData(val_sort);
cusparseSafeCall(cusparseZdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_value_sort, d_A_RowIndices, d_col_sort));
//gpuErrchk(cudaFree(d_nnzPerVectorA));
cusparseSafeCall(cusparseXcsr2coo(handle,
d_A_RowIndices,
nnzA,
numARows,
d_row_sort,
CUSPARSE_INDEX_BASE_ONE));
//gpuErrchk(cudaFree(d_A_RowIndices));
// Sort by rows
int *P = NULL;
void *pBuffer = NULL;
size_t pBufferSizeInBytes = 0;
cusparseXcoosort_bufferSizeExt(handle, numARows, numAColumns,
nnzA,
d_row_sort,
d_col_sort, &pBufferSizeInBytes);
gpuErrchk(cudaMalloc(&pBuffer, sizeof(char)*pBufferSizeInBytes));
gpuErrchk(cudaMalloc(&P, sizeof(int)*nnzA));
cusparseCreateIdentityPermutation(handle, nnzA, P);
cusparseSafeCall(cusparseXcoosortByColumn(handle, numAColumns, numAColumns,
nnzA,
d_row_sort,
d_col_sort,
P,
pBuffer));
cusparseSafeCall(cusparseZgthr(handle, nnzA, d_value_sort, d_value_sort, P, CUSPARSE_INDEX_BASE_ZERO));
ROW_SORT = mxGPUCreateMxArrayOnGPU(row_sort);
COL_SORT = mxGPUCreateMxArrayOnGPU(col_sort);
VAL_SORT = mxGPUCreateMxArrayOnGPU(val_sort);
gpuErrchk(cudaFree(pBuffer));
gpuErrchk(cudaFree(P));
mxGPUDestroyGPUArray(row_sort);
mxGPUDestroyGPUArray(col_sort);
mxGPUDestroyGPUArray(val_sort);
mxGPUDestroyGPUArray(PerVect);
mxGPUDestroyGPUArray(RowIndi);
mxGPUDestroyGPUArray(INPUTMATRIXGPU);
cusparseDestroyMatDescr(descrA);
cusparseDestroy(handle);
} // else
}
else if (!(mxIsGPUArray(INPUTMATRIX))){
// if (mxGetClassID(INPUTMATRIX) != mxDOUBLE_CLASS ) {
// mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
// "Invalid input to MEX file, input(FIRST ARGUMENT) must be double precision.");
// }
if(mxIsSparse(INPUTMATRIX)) {
const mwSize *dimsCPU;
dimsCPU=mxGetDimensions(INPUTMATRIX);
// int numARows = (int)dimsCPU[0]; /* gets number of rows of A */
int numAColumns = (int)dimsCPU[1]; /* gets number of columns of A */
mwIndex nnz1;
nnz1 = *(mxGetJc(INPUTMATRIX) + numAColumns);
int nnz= (int)nnz1;
//const mwSize ndim = 1;
// mwSize dims[ndim];
// dims[0] = nnz;
ROW_SORT = mxCreateNumericMatrix(nnz, 1, mxDOUBLE_CLASS, mxREAL);
double *ROWSORT = (double *)mxGetDoubles(ROW_SORT);
Ir_DataDX(INPUTMATRIX, ROWSORT );
COL_SORT = mxCreateNumericMatrix(nnz, 1, mxDOUBLE_CLASS, mxREAL);
double *COLSORT = (double *)mxGetDoubles(COL_SORT);
Jc_SetDX(INPUTMATRIX, COLSORT);
VAL_SORT = mxCreateNumericMatrix(nnz, 1, mxDOUBLE_CLASS, mxCOMPLEX);
//double *VALSORT = (double *)mxGetPr(VAL_SORT);
//VALSORT =(double *)mxGetPr(INPUTMATRIX);
// VALSORT=0;
//ROWSORT =static_cast<double *> (mxMalloc (nnz * sizeof(double)));
mxComplexDouble * VALSORT= (mxComplexDouble *)mxGetComplexDoubles(VAL_SORT);
VALSORT= (mxComplexDouble *)mxGetComplexDoubles(INPUTMATRIX);
}
else{
int numARows, numAColumns;
numARows = (int)mxGetM(INPUTMATRIX);
numAColumns = (int)mxGetN(INPUTMATRIX);
/* Initialize the MathWorks GPU API. */
mxInitGPU();
cuDoubleComplex *h_A_dense1 ; // The A matrix
h_A_dense1 = (cuDoubleComplex *)mxGetComplexDoubles(INPUTMATRIX);
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE);
//cuDoubleComplex *d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
size_t pivot_dimensionsvalueDA[2] = {numARows, numAColumns};
mxGPUArray *OUTMA = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueDA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *d_A_dense = (cuDoubleComplex *)mxGPUGetData(OUTMA);
gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), cudaMemcpyHostToDevice));
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
size_t pivot_pervect[1] = {numARows};
mxGPUArray *PerVect = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect);
cusparseSafeCall(cusparseZnnz(handle, CUSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
size_t pivot_rowindi[1] = {numARows + 1};
mxGPUArray *RowIndi = mxGPUCreateGPUArray(1, (mwSize*) pivot_rowindi, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int*)mxGPUGetData(RowIndi);
size_t pivot_dimensionsrow[1] = {nnzA};
size_t pivot_dimensionscolumn[1] = {nnzA};
size_t pivot_dimensionsvalue[1] = {nnzA};
mxGPUArray *row_sort = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
mxGPUArray *col_sort = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionscolumn, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
mxGPUArray *val_sort = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
int *d_col_sort = (int*)mxGPUGetData(col_sort);
int *d_row_sort = (int*)mxGPUGetData(row_sort);
cuDoubleComplex *d_value_sort = (cuDoubleComplex*)mxGPUGetData(val_sort);
cusparseSafeCall(cusparseZdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_value_sort, d_A_RowIndices, d_col_sort));
//gpuErrchk(cudaFree(d_A_dense));
mxGPUDestroyGPUArray(OUTMA);
cusparseSafeCall(cusparseXcsr2coo(handle,
d_A_RowIndices,
nnzA,
numARows,
d_row_sort,
CUSPARSE_INDEX_BASE_ONE));
//gpuErrchk(cudaFree(d_A_RowIndices));
// Sort by rows
int *P = NULL;
void *pBuffer = NULL;
size_t pBufferSizeInBytes = 0;
cusparseXcoosort_bufferSizeExt(handle, numARows, numAColumns,
nnzA,
d_row_sort,
d_col_sort, &pBufferSizeInBytes);
gpuErrchk(cudaMalloc(&pBuffer, sizeof(char)*pBufferSizeInBytes));
gpuErrchk(cudaMalloc(&P, sizeof(int)*nnzA));
cusparseCreateIdentityPermutation(handle, nnzA, P);
cusparseSafeCall(cusparseXcoosortByColumn(handle, numAColumns, numAColumns,
nnzA,
d_row_sort,
d_col_sort,
P,
pBuffer));
cusparseSafeCall(cusparseZgthr(handle, nnzA, d_value_sort, d_value_sort, P, CUSPARSE_INDEX_BASE_ZERO));
ROW_SORT = mxGPUCreateMxArrayOnGPU(row_sort);
COL_SORT = mxGPUCreateMxArrayOnGPU(col_sort);
VAL_SORT = mxGPUCreateMxArrayOnGPU(val_sort);
gpuErrchk(cudaFree(pBuffer));
gpuErrchk(cudaFree(P));
mxGPUDestroyGPUArray(row_sort);
mxGPUDestroyGPUArray(col_sort);
mxGPUDestroyGPUArray(val_sort);
mxGPUDestroyGPUArray(PerVect);
mxGPUDestroyGPUArray(RowIndi);
// mxGPUDestroyGPUArray(INPUTMATRIXGPU);
cusparseDestroyMatDescr(descrA);
cusparseDestroy(handle);
}
}
//
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
|
pr_page_fault_pinned_first_iteration_V100.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////As shown in the second iteration, it does produce L2 cache hits, however, it is much longer. (is it using L2 on the host?)
///////////Meanwhile its L2 cache miss latency is even longer.
///////////Likely appearing sequence of miss: L1 cache hit -> L1 cache miss -> L1 tlb miss -> L2 cache miss -> L2 tlb miss (is there a L3 cache on the host?)
///////////In the first iteration, it seems that the host does not prefetch L1 tlb and L2 tlb on the host side.
///////////However, it seems to prefetch the L2 cache on the host.
//typedef unsigned char byte;
void init_cpu_data(int* A, long long int size, int stride, long long int mod){
for (long long int i = 0; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
for (long long int i = 32; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u32 t1;\n\t"
".reg .u64 t2;\n\t"
".reg .u32 t3;\n\t"
".reg .u32 t4;\n\t"
".reg .u64 t5;\n\t"
".reg .u32 t6;\n\t"
".reg .u64 t7;\n\t"
"cvta.to.shared.u64 t5, %0;\n\t"
"cvt.u32.u64 t6, t5;\n\t"
:: "l"(s_index));////////////////////////////////////cvta.to.global.u64 %rd4, %rd25; needed??
for (int it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory
asm("shl.b32 t1, %3, 2;\n\t"
"cvt.u64.u32 t7, t1;\n\t"
"add.s64 t2, t7, %4;\n\t"
"shl.b32 t3, %6, 2;\n\t"
"add.s32 t4, t3, t6;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"st.shared.u32 [t4], %2;\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "l"(s_index), "r"(it));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, long long int iterations, int *B, int *C, long long int *D, float clock_rate, long long int mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
int counter = 0;
/////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb)
for(int data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(long long int mod2 = 1 * 16 * 1024; mod2 <= 2147483648; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(hipHostMalloc((void**)&CPU_data_in, sizeof(int) * data_size, hipHostMallocDefault));//////////using pinned memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
//checkCudaErrors(hipFree(CPU_data_in));
checkCudaErrors(hipHostFree(CPU_data_in));//////using pinned memory
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(hipFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
| pr_page_fault_pinned_first_iteration_V100.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////As shown in the second iteration, it does produce L2 cache hits, however, it is much longer. (is it using L2 on the host?)
///////////Meanwhile its L2 cache miss latency is even longer.
///////////Likely appearing sequence of miss: L1 cache hit -> L1 cache miss -> L1 tlb miss -> L2 cache miss -> L2 tlb miss (is there a L3 cache on the host?)
///////////In the first iteration, it seems that the host does not prefetch L1 tlb and L2 tlb on the host side.
///////////However, it seems to prefetch the L2 cache on the host.
//typedef unsigned char byte;
void init_cpu_data(int* A, long long int size, int stride, long long int mod){
for (long long int i = 0; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
for (long long int i = 32; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u32 t1;\n\t"
".reg .u64 t2;\n\t"
".reg .u32 t3;\n\t"
".reg .u32 t4;\n\t"
".reg .u64 t5;\n\t"
".reg .u32 t6;\n\t"
".reg .u64 t7;\n\t"
"cvta.to.shared.u64 t5, %0;\n\t"
"cvt.u32.u64 t6, t5;\n\t"
:: "l"(s_index));////////////////////////////////////cvta.to.global.u64 %rd4, %rd25; needed??
for (int it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory
asm("shl.b32 t1, %3, 2;\n\t"
"cvt.u64.u32 t7, t1;\n\t"
"add.s64 t2, t7, %4;\n\t"
"shl.b32 t3, %6, 2;\n\t"
"add.s32 t4, t3, t6;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"st.shared.u32 [t4], %2;\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "l"(s_index), "r"(it));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, long long int iterations, int *B, int *C, long long int *D, float clock_rate, long long int mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
int counter = 0;
/////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb)
for(int data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(long long int mod2 = 1 * 16 * 1024; mod2 <= 2147483648; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(cudaHostAlloc((void**)&CPU_data_in, sizeof(int) * data_size, cudaHostAllocDefault));//////////using pinned memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
//checkCudaErrors(cudaFree(CPU_data_in));
checkCudaErrors(cudaFreeHost(CPU_data_in));//////using pinned memory
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(cudaFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
22c0d7078f30f1d2841e928dec6d99c17b7aff5a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip> // std::setfill, std::setw
#include <string>
#include <omp.h>
#include <mpi.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <hetero_cmdparser.hpp>
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////
#define cudaCheckLastError() { \
hipError_t error = hipGetLastError(); \
int id; hipGetDevice(&id); \
if(error != hipSuccess) { \
printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \
__FILE__,__LINE__, hipGetErrorString(error), id); \
exit(EXIT_FAILURE); \
} \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkReadFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename.c_str(), ios::in|ios::binary); \
if (!fs->is_open()) \
{ \
printf("Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->read(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkWriteFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename, ios::out|ios::binary); \
if (!fs->is_open()) \
{ \
fprintf(stderr, "Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->write(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static void handle_error(int errcode, const char *str)
{
char msg[MPI_MAX_ERROR_STRING];
int resultlen;
MPI_Error_string(errcode, msg, &resultlen);
fprintf(stderr, "%s: %s\n", str, msg);
MPI_Abort(MPI_COMM_WORLD, 1);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
const char* key =
"{ h |help | | print help message }"
"{ i |srcFile | | source of the file }"
"{ halo|halo | | halo size }"
"{ dimx|dimx | | dimension x }"
"{ dimy|dimy | | dimension y }"
"{ dimz|dimz | | dimension z }"
;
////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[])
{
//================================================================================
// Initialize MPI
int rank, size;
char name[MPI_MAX_PROCESSOR_NAME];
int length;
int errCode;
MPI_File fh;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Get_processor_name(name, &length);
printf("This is rank %02d, size %02d, of %s\n", rank, size, name);
MPI_Barrier(MPI_COMM_WORLD);
//-----------------------------------------------------------------------------------------
MPI_Comm comm3d; /* Cartesian communicator */
int dims[3];// = {0, 0}; /* allow MPI to choose grid block dimensions */
int periodic[3];// = {0, 0}; /* domain is non-periodic */
int reorder;// = 1; /* allow processes to be re-ranked */
int coords[3]; /* coordinates of our block in grid */
// int up, down; /* ranks of processes above and below ours */
// int left, right; /* ranks of processes to each side of ours */
//
int master = 0;
int worker;
int numMasters = 1;
int numWorkers = size;
// Parsing the arguments
CommandLineParser cmd(argc, argv, key);
if(rank==master) cmd.printParams();
MPI_Barrier(MPI_COMM_WORLD);
dims[0] = 2;
dims[1] = 2;
dims[2] = 2;
periodic[0] = 0;
periodic[1] = 0;
periodic[2] = 0;
reorder = 1;
// // Set up Cartesian grid of processors. A new communicator is
// // created we get our rank within it.
// MPI_Dims_create(rank, 2, dims); ///This line will not work
MPI_Cart_create(MPI_COMM_WORLD, 3, dims, periodic, reorder, &comm3d );
MPI_Cart_get(comm3d, 3, dims, periodic, coords );
MPI_Comm_rank(comm3d, &rank );
printf("x %d, y %d, z %d, rank %d\n",coords[0],coords[1],coords[2],rank);
MPI_Barrier(MPI_COMM_WORLD);
//-----------------------------------------------------------------------------------------
// Retrieve the information from cmd
string srcFile = cmd.get<string>("srcFile", false);
const int halo = cmd.get<int>("halo", false);
const int dimx = cmd.get<int>("dimx", false);
const int dimy = cmd.get<int>("dimy", false);
const int dimz = cmd.get<int>("dimz", false);
const int total = dimx*dimy*dimz;
// float *h_src = new float[total];
int processIdx_1d = rank;
int3 processIdx_3d = make_int3(coords[0], coords[1], coords[2]);
/// Mimic Pack and Unpack MPI
int3 featureIdx { 0, 0, 0};
int3 processIdx { 0, 0, 0};
int3 processDim { 256, 256, 256};
int3 subDataDim {0, 0, 0};
int3 clusterDim {(dimx/processDim.x + ((dimx%processDim.x)?1:0)),
(dimy/processDim.y + ((dimy%processDim.y)?1:0)),
(dimz/processDim.z + ((dimz%processDim.z)?1:0))};
MPI_Barrier(MPI_COMM_WORLD);
// cout << "Cluster Dimension: " << clusterDim.x << " "
// << clusterDim.y << " "
// << clusterDim.z << " "
// << endl;
MPI_Barrier(MPI_COMM_WORLD);
//-----------------------------------------------------------------------------------------
MPI_Request request;
MPI_Request status;
//Start packing
/// Naive approach, copy to another buffer, then send
int3 index_3d;
double start = MPI_Wtime();
int caught = 0;
if(rank==master)
{
for(processIdx.z=0; processIdx.z<clusterDim.z; processIdx.z++)
{
for(processIdx.y=0; processIdx.y<clusterDim.y; processIdx.y++)
{
for(processIdx.x=0; processIdx.x<clusterDim.x; processIdx.x++)
{
/// !!! First step: Determine size of buffer
for(featureIdx.z=0; featureIdx.z<processDim.z; featureIdx.z++)
{
for(featureIdx.y=0; featureIdx.y<processDim.y; featureIdx.y++)
{
for(featureIdx.x=0; featureIdx.x<processDim.x; featureIdx.x++)
{
//3D global index
index_3d = make_int3(
processIdx.x*processDim.x+featureIdx.x,
processIdx.y*processDim.y+featureIdx.y,
processIdx.z*processDim.z+featureIdx.z);
if(index_3d.x==dimx) break;
}
if(index_3d.y==dimy) break;
}
if(index_3d.z==dimz) break;
}
subDataDim = make_int3(featureIdx.x, featureIdx.y, featureIdx.z);
cout << "Sub problem size: " << subDataDim.x << " " << subDataDim.y << " " << subDataDim.z << endl;
//Second step: copy subdataSize
index_3d = make_int3(
processIdx.x*processDim.x+0,
processIdx.y*processDim.y+0,
processIdx.z*processDim.z+0);
MPI_Datatype mysubarray;
int starts[3] = {index_3d.z, index_3d.y, index_3d.x}; ///!Order is very important
int subsizes[3] = {subDataDim.z, subDataDim.y, subDataDim.x}; ///!Order is very important
int bigsizes[3] = {dimz, dimy, dimx}; ///!Order is very important
MPI_Type_create_subarray(3, bigsizes, subsizes, starts,
MPI_ORDER_C, MPI_FLOAT, &mysubarray);
MPI_Type_commit(&mysubarray);
// for(featureIdx.y=0; featureIdx.y<processDim.y; featureIdx.y++)
// {
// for(featureIdx.x=0; featureIdx.x<processDim.x; featureIdx.x++)
// {
// if(featureIdx.x == 0) // First position of first block
// {
// //2D global index
// index_2d = make_int2(
// processIdx.x*processDim.x+featureIdx.x,
// processIdx.y*processDim.y+featureIdx.y);
// if(index_2d.y<dimy)
// {
// // cout << "Caught " << ++caught << endl;
// memcpy(
// // &tmp[featureIdx.y * processDim.x],
// &tmp[featureIdx.y * subDataDim.x],
// &h_src[index_2d.y*dimx + index_2d.x],
// // processDim.x*sizeof(float));
// subDataDim.x*sizeof(float));
// }
// }
// }
// }
processIdx_1d = processIdx.z * clusterDim.y * clusterDim.x +
processIdx.y * clusterDim.x +
processIdx.x;
cout << processIdx_1d << endl;
/// !!! Send to worker process
// Send the size of message
MPI_Isend(&subDataDim, 1, MPI_LONG_DOUBLE, processIdx_1d, 0, MPI_COMM_WORLD, &request); //Data need to be long enough
cout << "Sent" << endl;
MPI_Type_free(&mysubarray);
}
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
// MPI_Recv(p_src, processDim.x*processDim.y, MPI_FLOAT, master, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&subDataDim, 1, MPI_LONG_DOUBLE, master, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// MPI_Irecv(&subDataDim, 1, MPI_LONG_DOUBLE, master, 0, MPI_COMM_WORLD, &request);
// MPI_Wait(&request, &status);
//MPI_Recv(p_src, subDataDim.x * subDataDim.y, MPI_FLOAT, master, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
cout << "Receive preamble read from " << rank << endl;
cout << "Sub problem size: " << subDataDim.x << " " << subDataDim.y << " " << subDataDim.z << endl;
MPI_Barrier(MPI_COMM_WORLD);
float *p_src;
p_src = (float*)malloc(subDataDim.x*subDataDim.y*subDataDim.z*sizeof(float));
//-----------------------------------------------------------------------------------------
int npx = dimx/processDim.x + 2*halo;
int npy = dimy/processDim.y + 2*halo;
int npz = dimz/processDim.z + 2*halo;
// if(rank==0)
// {
// cout << "At " << rank << endl;
// cout << npx << " " << npy << " " << npz << endl;
// }
//-----------------------------------------------------------------------------------------
// // Construct the neighbor communicator
// int left, right, top, bottom, front, back;
// MPI_Cart_shift(comm3d, 1, 1, &left, &right);
// MPI_Cart_shift(comm3d, 2, 1, &top, &bottom);
// MPI_Cart_shift(comm3d, 0, 1, &front, &back);
// MPI_Barrier(MPI_COMM_WORLD);
// fprintf(stderr, "Rank %d has LR neighbours %d %d, FB %d %d, TB %d %d\n",
// rank, left, right, front, back, top, bottom);
// MPI_Barrier(MPI_COMM_WORLD);
//-----------------------------------------------------------------------------------------
// // create subarrays(exclude halo) to write to file with MPI-IO
// // data in the local array
// int sizes[3];
// sizes[0]=npz; sizes[1]=npx; sizes[2]=npy;
// int subsizes[3];
// subsizes[0]=sizes[0]-2*halo; subsizes[1]=sizes[1]-2*halo; subsizes[2]=sizes[2]-2*halo;
// int starts[3];
// starts[0]=halo; starts[1]=halo; starts[2]=halo;
// MPI_Datatype local_array;
// MPI_Type_create_subarray(3, sizes, subsizes, starts, MPI_ORDER_C, MPI_FLOAT, &local_array);
// MPI_Type_commit(&local_array);
//-----------------------------------------------------------------------------------------
// // data in the global array
// int gsizes[3];
// // gsizes[0]=nz; gsizes[1]=nx; gsizes[2]=ny;
// gsizes[0]=dimz; gsizes[1]=dimx; gsizes[2]=dimy;
// int gstarts[3];
// gstarts[0]=subsizes[0]*coords[0]; gstarts[1]=subsizes[1]*coords[1]; gstarts[2]=subsizes[2]*coords[2];
// MPI_Datatype global_array;
// MPI_Type_create_subarray(3, gsizes, subsizes, gstarts, MPI_ORDER_C, MPI_FLOAT, &global_array);
// MPI_Type_commit(&global_array);
//-----------------------------------------------------------------------------------------
// /* allocate of halo areas */
// int halosizex = npy*npz*halo;
// float *leftRecv = (float *)calloc(3*halosizex,sizeof(float));
// float *rightRecv = (float *)calloc(3*halosizex,sizeof(float));
// float *leftSend = (float *)calloc(3*halosizex,sizeof(float));
// float *rightSend = (float *)calloc(3*halosizex,sizeof(float));
// int halosizey = npx*npz*halo;
// float *frontRecv = (float *)calloc(3*halosizey,sizeof(float));
// float *backRecv = (float *)calloc(3*halosizey,sizeof(float));
// float *frontSend = (float *)calloc(3*halosizey,sizeof(float));
// float *backSend = (float *)calloc(3*halosizey,sizeof(float));
// int halosizez = npy*npx*halo;
// float *topRecv = (float *)calloc(3*halosizez,sizeof(float));
// float *bottomRecv = (float *)calloc(3*halosizez,sizeof(float));
// float *topSend = (float *)calloc(3*halosizez,sizeof(float));
// float *bottomSend = (float *)calloc(3*halosizez,sizeof(float));
//-----------------------------------------------------------------------------------------
//---------------------------------------------------------------------------------
char *ch = strdup(srcFile.c_str());
cout << ch << endl;
MPI_Offset disp;
disp = sizeof(float)*rank*processDim.x*processDim.y*processDim.z;
MPI_Datatype etype;
etype = MPI_FLOAT;
index_3d = make_int3(
(rank%(2*2)%2)*processDim.x+0,
(rank%(2*2)/2)*processDim.y+0,
(rank/(2*2))*processDim.z+0);
// index_3d = make_int3(
// (coords[1])*processDim.x+0,
// (coords[2])*processDim.y+0,
// (coords[0])*processDim.z+0);
// index_2d = make_int2(
// (rank%2)*subDataDim.x+0,
// (rank/2)*subDataDim.y+0);
cout << "Start read from " << rank << endl;
int bigsizes[3] = {dimz, dimy, dimx}; ///!Order is very important
int subsizes[3] = {subDataDim.z, subDataDim.y, subDataDim.x}; ///!Order is very important
int starts[3] = {index_3d.z, index_3d.y, index_3d.x}; ///!Order is very important
MPI_Barrier(MPI_COMM_WORLD);
cout << "Start indices \t" << index_3d.x << " \t" << index_3d.y << " \t" << index_3d.z << " \t at " << rank << endl;
MPI_Barrier(MPI_COMM_WORLD);
MPI_Datatype subarray;
MPI_Type_create_subarray(3, bigsizes, subsizes, starts,
MPI_ORDER_C, MPI_FLOAT, &subarray);
MPI_Type_commit(&subarray);
errCode = MPI_File_open(MPI_COMM_WORLD, ch, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
cout << "Debug at " << __FILE__ << " " << __LINE__ << endl;
if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_open");
// MPI_File_set_view(fh, disp, etype, subarray, "native", MPI_INFO_NULL);
MPI_File_set_view(fh, 0, etype, subarray, "native", MPI_INFO_NULL);
cout << "Debug at " << __FILE__ << " " << __LINE__ << endl;
MPI_File_read(fh, p_src, subDataDim.x*subDataDim.y*subDataDim.z, MPI_FLOAT, MPI_STATUS_IGNORE);
// MPI_File_read_all(fh, p_src, subDataDim.x*subDataDim.y, MPI_FLOAT, MPI_STATUS_IGNORE); // Process spawn and fail
// MPI_File_read_ordered(fh, p_src, subDataDim.x*subDataDim.y, MPI_FLOAT, MPI_STATUS_IGNORE);
// MPI_Type_free(&subarray);
cout << "Debug at " << __FILE__ << " " << __LINE__ << endl;
MPI_File_close(&fh);
// MPI_Barrier(MPI_COMM_WORLD); cout << "Debug at " << __FILE__ << " " << __LINE__ << endl;
if(p_src[0] !=0)
cout << "Caught " << endl;
cout << "Debug at " << __FILE__ << " " << __LINE__ << endl;
//------------------------------------------------------------------------------
// fstream fs;
// // if(rank==0)
// // {
// fs.open(srcFile.c_str(), ios::in|ios::binary);
// // cout << "Start read from " << rank << endl;
// if (!fs.is_open())
// {
// printf("Cannot open file '%s' in file '%s' at line %i\n",
// srcFile, __FILE__, __LINE__);
// return 1;
// }
// // cout << "File opened from " << rank << endl;
// // cout << "Sub problem size: " << subDataDim.x << " " << subDataDim.y << endl;
// // cout << "Dimension size: " << dimx << " " << dimy << endl;
// processIdx.x = rank%clusterDim.x;
// processIdx.y = rank/clusterDim.x;
// for(featureIdx.y=0; featureIdx.y<subDataDim.y; featureIdx.y++)
// {
// for(featureIdx.x=0; featureIdx.x<subDataDim.x; featureIdx.x++)
// {
// if(featureIdx.x == 0) // First position of row
// {
// //2D global index
// index_2d = make_int2(
// processIdx.x*subDataDim.x+featureIdx.x,
// processIdx.y*subDataDim.y+featureIdx.y);
// // cout << "Global Index 2d: " << index_2d.x << " " << index_2d.y << endl;
// if(index_2d.y<dimy) //For handling the boundary problem
// {
// fs.seekg((index_2d.y*dimx + index_2d.x)*sizeof(float), ios::beg);
// fs.read(reinterpret_cast<char*>(&p_src[featureIdx.y * subDataDim.x]), subDataDim.x*sizeof(float));
// // if(p_src[featureIdx.y * subDataDim.x] !=0)
// // cout << "Caught " << ++caught << endl;
// }
// }
// }
// }
// fs.close();
//------------------------------------------------------------------------------
MPI_Barrier(MPI_COMM_WORLD);
cout << "Finish read from " << rank << endl;
double elapsed = MPI_Wtime() - start;
if(rank==master) cout << "Time : " << elapsed << " s " << endl;
/// Debug, write partially
MPI_Barrier(MPI_COMM_WORLD);
char *filename = new char[100];
sprintf(filename, "result_%02d_%02d_%02d.raw", processIdx_3d.x, processIdx_3d.y, processIdx_3d.z);
printf("%s\n", filename);
// float *h_tmp;
// h_tmp = (float*)malloc(subDataDim.x*subDataDim.y*sizeof(float));
// hipHostRegister(h_tmp, subDataDim.x*subDataDim.y *sizeof(float), hipHostRegisterPortable);
// hipMemcpy(h_tmp, p_src, subDataDim.x*subDataDim.y*sizeof(float), hipMemcpyDeviceToHost); cudaCheckLastError();
// checkWriteFile(filename, h_tmp, subDataDim.x*subDataDim.y*sizeof(float));
// checkWriteFile(filename, p_src, processDim.x*processDim.y*sizeof(float));
checkWriteFile(filename, p_src, subDataDim.x*subDataDim.y*subDataDim.z*sizeof(float));
///!!! Write globally
// Delete the file before using that
MPI_Barrier(MPI_COMM_WORLD);
if(rank == master)
{
// errCode = MPI_File_delete("test.raw", MPI_INFO_NULL);
// if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_delete");
}
MPI_Barrier(MPI_COMM_WORLD);
errCode = MPI_File_open(MPI_COMM_WORLD, "test.raw", MPI_MODE_RDWR|MPI_MODE_CREATE, MPI_INFO_NULL, &fh);
if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_open");
MPI_File_set_view(fh, 0, etype, subarray, "native", MPI_INFO_NULL);
MPI_Type_free(&subarray);
cout << "At rank " << rank << endl;
cout << "Sub problem size will be written: " << subDataDim.x << " "
<< subDataDim.y << " "
<< subDataDim.z << endl;
MPI_File_write_all(fh, p_src, subDataDim.x*subDataDim.y*subDataDim.z, MPI_FLOAT, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
// check identical
// if(rank==0)
// {
// float *ref = new float[dimx*dimy];
// float *arr = new float[dimx*dimy];
// checkReadFile(srcFile, ref, dimx*dimy*sizeof(float));
// // char file[10];
// string file = "test.raw";
// checkReadFile(file, arr, dimx*dimy*sizeof(float));
// // for(int y=0; y<dimy; y++)
// // {
// // for(int x=0; x<dimx; x++)
// // {
// // if(
// // }
// // }
// for(int k=0; k<total; k++)
// {
// if(ref[k] != arr[k])
// {
// cout << "Do not match at " << k << endl;
// goto cleanup;
// }
// }
// cout << "Matched!!!" << endl;
// cleanup:
// free(ref);
// free(arr);
// }
//-----------------------------------------------------------------------------------------
MPI_Finalize();
return 0;
} | 22c0d7078f30f1d2841e928dec6d99c17b7aff5a.cu | #include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip> // std::setfill, std::setw
#include <string>
#include <omp.h>
#include <mpi.h>
#include <cuda.h>
#include <assert.h>
#include <hetero_cmdparser.hpp>
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////
#define cudaCheckLastError() { \
cudaError_t error = cudaGetLastError(); \
int id; cudaGetDevice(&id); \
if(error != cudaSuccess) { \
printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \
__FILE__,__LINE__, cudaGetErrorString(error), id); \
exit(EXIT_FAILURE); \
} \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkReadFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename.c_str(), ios::in|ios::binary); \
if (!fs->is_open()) \
{ \
printf("Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->read(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkWriteFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename, ios::out|ios::binary); \
if (!fs->is_open()) \
{ \
fprintf(stderr, "Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->write(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static void handle_error(int errcode, const char *str)
{
char msg[MPI_MAX_ERROR_STRING];
int resultlen;
MPI_Error_string(errcode, msg, &resultlen);
fprintf(stderr, "%s: %s\n", str, msg);
MPI_Abort(MPI_COMM_WORLD, 1);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
const char* key =
"{ h |help | | print help message }"
"{ i |srcFile | | source of the file }"
"{ halo|halo | | halo size }"
"{ dimx|dimx | | dimension x }"
"{ dimy|dimy | | dimension y }"
"{ dimz|dimz | | dimension z }"
;
////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[])
{
//================================================================================
// Initialize MPI
int rank, size;
char name[MPI_MAX_PROCESSOR_NAME];
int length;
int errCode;
MPI_File fh;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Get_processor_name(name, &length);
printf("This is rank %02d, size %02d, of %s\n", rank, size, name);
MPI_Barrier(MPI_COMM_WORLD);
//-----------------------------------------------------------------------------------------
MPI_Comm comm3d; /* Cartesian communicator */
int dims[3];// = {0, 0}; /* allow MPI to choose grid block dimensions */
int periodic[3];// = {0, 0}; /* domain is non-periodic */
int reorder;// = 1; /* allow processes to be re-ranked */
int coords[3]; /* coordinates of our block in grid */
// int up, down; /* ranks of processes above and below ours */
// int left, right; /* ranks of processes to each side of ours */
//
int master = 0;
int worker;
int numMasters = 1;
int numWorkers = size;
// Parsing the arguments
CommandLineParser cmd(argc, argv, key);
if(rank==master) cmd.printParams();
MPI_Barrier(MPI_COMM_WORLD);
dims[0] = 2;
dims[1] = 2;
dims[2] = 2;
periodic[0] = 0;
periodic[1] = 0;
periodic[2] = 0;
reorder = 1;
// // Set up Cartesian grid of processors. A new communicator is
// // created we get our rank within it.
// MPI_Dims_create(rank, 2, dims); ///This line will not work
MPI_Cart_create(MPI_COMM_WORLD, 3, dims, periodic, reorder, &comm3d );
MPI_Cart_get(comm3d, 3, dims, periodic, coords );
MPI_Comm_rank(comm3d, &rank );
printf("x %d, y %d, z %d, rank %d\n",coords[0],coords[1],coords[2],rank);
MPI_Barrier(MPI_COMM_WORLD);
//-----------------------------------------------------------------------------------------
// Retrieve the information from cmd
string srcFile = cmd.get<string>("srcFile", false);
const int halo = cmd.get<int>("halo", false);
const int dimx = cmd.get<int>("dimx", false);
const int dimy = cmd.get<int>("dimy", false);
const int dimz = cmd.get<int>("dimz", false);
const int total = dimx*dimy*dimz;
// float *h_src = new float[total];
int processIdx_1d = rank;
int3 processIdx_3d = make_int3(coords[0], coords[1], coords[2]);
/// Mimic Pack and Unpack MPI
int3 featureIdx { 0, 0, 0};
int3 processIdx { 0, 0, 0};
int3 processDim { 256, 256, 256};
int3 subDataDim {0, 0, 0};
int3 clusterDim {(dimx/processDim.x + ((dimx%processDim.x)?1:0)),
(dimy/processDim.y + ((dimy%processDim.y)?1:0)),
(dimz/processDim.z + ((dimz%processDim.z)?1:0))};
MPI_Barrier(MPI_COMM_WORLD);
// cout << "Cluster Dimension: " << clusterDim.x << " "
// << clusterDim.y << " "
// << clusterDim.z << " "
// << endl;
MPI_Barrier(MPI_COMM_WORLD);
//-----------------------------------------------------------------------------------------
MPI_Request request;
MPI_Request status;
//Start packing
/// Naive approach, copy to another buffer, then send
int3 index_3d;
double start = MPI_Wtime();
int caught = 0;
if(rank==master)
{
for(processIdx.z=0; processIdx.z<clusterDim.z; processIdx.z++)
{
for(processIdx.y=0; processIdx.y<clusterDim.y; processIdx.y++)
{
for(processIdx.x=0; processIdx.x<clusterDim.x; processIdx.x++)
{
/// !!! First step: Determine size of buffer
for(featureIdx.z=0; featureIdx.z<processDim.z; featureIdx.z++)
{
for(featureIdx.y=0; featureIdx.y<processDim.y; featureIdx.y++)
{
for(featureIdx.x=0; featureIdx.x<processDim.x; featureIdx.x++)
{
//3D global index
index_3d = make_int3(
processIdx.x*processDim.x+featureIdx.x,
processIdx.y*processDim.y+featureIdx.y,
processIdx.z*processDim.z+featureIdx.z);
if(index_3d.x==dimx) break;
}
if(index_3d.y==dimy) break;
}
if(index_3d.z==dimz) break;
}
subDataDim = make_int3(featureIdx.x, featureIdx.y, featureIdx.z);
cout << "Sub problem size: " << subDataDim.x << " " << subDataDim.y << " " << subDataDim.z << endl;
//Second step: copy subdataSize
index_3d = make_int3(
processIdx.x*processDim.x+0,
processIdx.y*processDim.y+0,
processIdx.z*processDim.z+0);
MPI_Datatype mysubarray;
int starts[3] = {index_3d.z, index_3d.y, index_3d.x}; ///!Order is very important
int subsizes[3] = {subDataDim.z, subDataDim.y, subDataDim.x}; ///!Order is very important
int bigsizes[3] = {dimz, dimy, dimx}; ///!Order is very important
MPI_Type_create_subarray(3, bigsizes, subsizes, starts,
MPI_ORDER_C, MPI_FLOAT, &mysubarray);
MPI_Type_commit(&mysubarray);
// for(featureIdx.y=0; featureIdx.y<processDim.y; featureIdx.y++)
// {
// for(featureIdx.x=0; featureIdx.x<processDim.x; featureIdx.x++)
// {
// if(featureIdx.x == 0) // First position of first block
// {
// //2D global index
// index_2d = make_int2(
// processIdx.x*processDim.x+featureIdx.x,
// processIdx.y*processDim.y+featureIdx.y);
// if(index_2d.y<dimy)
// {
// // cout << "Caught " << ++caught << endl;
// memcpy(
// // &tmp[featureIdx.y * processDim.x],
// &tmp[featureIdx.y * subDataDim.x],
// &h_src[index_2d.y*dimx + index_2d.x],
// // processDim.x*sizeof(float));
// subDataDim.x*sizeof(float));
// }
// }
// }
// }
processIdx_1d = processIdx.z * clusterDim.y * clusterDim.x +
processIdx.y * clusterDim.x +
processIdx.x;
cout << processIdx_1d << endl;
/// !!! Send to worker process
// Send the size of message
MPI_Isend(&subDataDim, 1, MPI_LONG_DOUBLE, processIdx_1d, 0, MPI_COMM_WORLD, &request); //Data need to be long enough
cout << "Sent" << endl;
MPI_Type_free(&mysubarray);
}
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
// MPI_Recv(p_src, processDim.x*processDim.y, MPI_FLOAT, master, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&subDataDim, 1, MPI_LONG_DOUBLE, master, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// MPI_Irecv(&subDataDim, 1, MPI_LONG_DOUBLE, master, 0, MPI_COMM_WORLD, &request);
// MPI_Wait(&request, &status);
//MPI_Recv(p_src, subDataDim.x * subDataDim.y, MPI_FLOAT, master, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
cout << "Receive preamble read from " << rank << endl;
cout << "Sub problem size: " << subDataDim.x << " " << subDataDim.y << " " << subDataDim.z << endl;
MPI_Barrier(MPI_COMM_WORLD);
float *p_src;
p_src = (float*)malloc(subDataDim.x*subDataDim.y*subDataDim.z*sizeof(float));
//-----------------------------------------------------------------------------------------
int npx = dimx/processDim.x + 2*halo;
int npy = dimy/processDim.y + 2*halo;
int npz = dimz/processDim.z + 2*halo;
// if(rank==0)
// {
// cout << "At " << rank << endl;
// cout << npx << " " << npy << " " << npz << endl;
// }
//-----------------------------------------------------------------------------------------
// // Construct the neighbor communicator
// int left, right, top, bottom, front, back;
// MPI_Cart_shift(comm3d, 1, 1, &left, &right);
// MPI_Cart_shift(comm3d, 2, 1, &top, &bottom);
// MPI_Cart_shift(comm3d, 0, 1, &front, &back);
// MPI_Barrier(MPI_COMM_WORLD);
// fprintf(stderr, "Rank %d has LR neighbours %d %d, FB %d %d, TB %d %d\n",
// rank, left, right, front, back, top, bottom);
// MPI_Barrier(MPI_COMM_WORLD);
//-----------------------------------------------------------------------------------------
// // create subarrays(exclude halo) to write to file with MPI-IO
// // data in the local array
// int sizes[3];
// sizes[0]=npz; sizes[1]=npx; sizes[2]=npy;
// int subsizes[3];
// subsizes[0]=sizes[0]-2*halo; subsizes[1]=sizes[1]-2*halo; subsizes[2]=sizes[2]-2*halo;
// int starts[3];
// starts[0]=halo; starts[1]=halo; starts[2]=halo;
// MPI_Datatype local_array;
// MPI_Type_create_subarray(3, sizes, subsizes, starts, MPI_ORDER_C, MPI_FLOAT, &local_array);
// MPI_Type_commit(&local_array);
//-----------------------------------------------------------------------------------------
// // data in the global array
// int gsizes[3];
// // gsizes[0]=nz; gsizes[1]=nx; gsizes[2]=ny;
// gsizes[0]=dimz; gsizes[1]=dimx; gsizes[2]=dimy;
// int gstarts[3];
// gstarts[0]=subsizes[0]*coords[0]; gstarts[1]=subsizes[1]*coords[1]; gstarts[2]=subsizes[2]*coords[2];
// MPI_Datatype global_array;
// MPI_Type_create_subarray(3, gsizes, subsizes, gstarts, MPI_ORDER_C, MPI_FLOAT, &global_array);
// MPI_Type_commit(&global_array);
//-----------------------------------------------------------------------------------------
// /* allocate of halo areas */
// int halosizex = npy*npz*halo;
// float *leftRecv = (float *)calloc(3*halosizex,sizeof(float));
// float *rightRecv = (float *)calloc(3*halosizex,sizeof(float));
// float *leftSend = (float *)calloc(3*halosizex,sizeof(float));
// float *rightSend = (float *)calloc(3*halosizex,sizeof(float));
// int halosizey = npx*npz*halo;
// float *frontRecv = (float *)calloc(3*halosizey,sizeof(float));
// float *backRecv = (float *)calloc(3*halosizey,sizeof(float));
// float *frontSend = (float *)calloc(3*halosizey,sizeof(float));
// float *backSend = (float *)calloc(3*halosizey,sizeof(float));
// int halosizez = npy*npx*halo;
// float *topRecv = (float *)calloc(3*halosizez,sizeof(float));
// float *bottomRecv = (float *)calloc(3*halosizez,sizeof(float));
// float *topSend = (float *)calloc(3*halosizez,sizeof(float));
// float *bottomSend = (float *)calloc(3*halosizez,sizeof(float));
//-----------------------------------------------------------------------------------------
//---------------------------------------------------------------------------------
char *ch = strdup(srcFile.c_str());
cout << ch << endl;
MPI_Offset disp;
disp = sizeof(float)*rank*processDim.x*processDim.y*processDim.z;
MPI_Datatype etype;
etype = MPI_FLOAT;
index_3d = make_int3(
(rank%(2*2)%2)*processDim.x+0,
(rank%(2*2)/2)*processDim.y+0,
(rank/(2*2))*processDim.z+0);
// index_3d = make_int3(
// (coords[1])*processDim.x+0,
// (coords[2])*processDim.y+0,
// (coords[0])*processDim.z+0);
// index_2d = make_int2(
// (rank%2)*subDataDim.x+0,
// (rank/2)*subDataDim.y+0);
cout << "Start read from " << rank << endl;
int bigsizes[3] = {dimz, dimy, dimx}; ///!Order is very important
int subsizes[3] = {subDataDim.z, subDataDim.y, subDataDim.x}; ///!Order is very important
int starts[3] = {index_3d.z, index_3d.y, index_3d.x}; ///!Order is very important
MPI_Barrier(MPI_COMM_WORLD);
cout << "Start indices \t" << index_3d.x << " \t" << index_3d.y << " \t" << index_3d.z << " \t at " << rank << endl;
MPI_Barrier(MPI_COMM_WORLD);
MPI_Datatype subarray;
MPI_Type_create_subarray(3, bigsizes, subsizes, starts,
MPI_ORDER_C, MPI_FLOAT, &subarray);
MPI_Type_commit(&subarray);
errCode = MPI_File_open(MPI_COMM_WORLD, ch, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
cout << "Debug at " << __FILE__ << " " << __LINE__ << endl;
if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_open");
// MPI_File_set_view(fh, disp, etype, subarray, "native", MPI_INFO_NULL);
MPI_File_set_view(fh, 0, etype, subarray, "native", MPI_INFO_NULL);
cout << "Debug at " << __FILE__ << " " << __LINE__ << endl;
MPI_File_read(fh, p_src, subDataDim.x*subDataDim.y*subDataDim.z, MPI_FLOAT, MPI_STATUS_IGNORE);
// MPI_File_read_all(fh, p_src, subDataDim.x*subDataDim.y, MPI_FLOAT, MPI_STATUS_IGNORE); // Process spawn and fail
// MPI_File_read_ordered(fh, p_src, subDataDim.x*subDataDim.y, MPI_FLOAT, MPI_STATUS_IGNORE);
// MPI_Type_free(&subarray);
cout << "Debug at " << __FILE__ << " " << __LINE__ << endl;
MPI_File_close(&fh);
// MPI_Barrier(MPI_COMM_WORLD); cout << "Debug at " << __FILE__ << " " << __LINE__ << endl;
if(p_src[0] !=0)
cout << "Caught " << endl;
cout << "Debug at " << __FILE__ << " " << __LINE__ << endl;
//------------------------------------------------------------------------------
// fstream fs;
// // if(rank==0)
// // {
// fs.open(srcFile.c_str(), ios::in|ios::binary);
// // cout << "Start read from " << rank << endl;
// if (!fs.is_open())
// {
// printf("Cannot open file '%s' in file '%s' at line %i\n",
// srcFile, __FILE__, __LINE__);
// return 1;
// }
// // cout << "File opened from " << rank << endl;
// // cout << "Sub problem size: " << subDataDim.x << " " << subDataDim.y << endl;
// // cout << "Dimension size: " << dimx << " " << dimy << endl;
// processIdx.x = rank%clusterDim.x;
// processIdx.y = rank/clusterDim.x;
// for(featureIdx.y=0; featureIdx.y<subDataDim.y; featureIdx.y++)
// {
// for(featureIdx.x=0; featureIdx.x<subDataDim.x; featureIdx.x++)
// {
// if(featureIdx.x == 0) // First position of row
// {
// //2D global index
// index_2d = make_int2(
// processIdx.x*subDataDim.x+featureIdx.x,
// processIdx.y*subDataDim.y+featureIdx.y);
// // cout << "Global Index 2d: " << index_2d.x << " " << index_2d.y << endl;
// if(index_2d.y<dimy) //For handling the boundary problem
// {
// fs.seekg((index_2d.y*dimx + index_2d.x)*sizeof(float), ios::beg);
// fs.read(reinterpret_cast<char*>(&p_src[featureIdx.y * subDataDim.x]), subDataDim.x*sizeof(float));
// // if(p_src[featureIdx.y * subDataDim.x] !=0)
// // cout << "Caught " << ++caught << endl;
// }
// }
// }
// }
// fs.close();
//------------------------------------------------------------------------------
MPI_Barrier(MPI_COMM_WORLD);
cout << "Finish read from " << rank << endl;
double elapsed = MPI_Wtime() - start;
if(rank==master) cout << "Time : " << elapsed << " s " << endl;
/// Debug, write partially
MPI_Barrier(MPI_COMM_WORLD);
char *filename = new char[100];
sprintf(filename, "result_%02d_%02d_%02d.raw", processIdx_3d.x, processIdx_3d.y, processIdx_3d.z);
printf("%s\n", filename);
// float *h_tmp;
// h_tmp = (float*)malloc(subDataDim.x*subDataDim.y*sizeof(float));
// cudaHostRegister(h_tmp, subDataDim.x*subDataDim.y *sizeof(float), cudaHostRegisterPortable);
// cudaMemcpy(h_tmp, p_src, subDataDim.x*subDataDim.y*sizeof(float), cudaMemcpyDeviceToHost); cudaCheckLastError();
// checkWriteFile(filename, h_tmp, subDataDim.x*subDataDim.y*sizeof(float));
// checkWriteFile(filename, p_src, processDim.x*processDim.y*sizeof(float));
checkWriteFile(filename, p_src, subDataDim.x*subDataDim.y*subDataDim.z*sizeof(float));
///!!! Write globally
// Delete the file before using that
MPI_Barrier(MPI_COMM_WORLD);
if(rank == master)
{
// errCode = MPI_File_delete("test.raw", MPI_INFO_NULL);
// if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_delete");
}
MPI_Barrier(MPI_COMM_WORLD);
errCode = MPI_File_open(MPI_COMM_WORLD, "test.raw", MPI_MODE_RDWR|MPI_MODE_CREATE, MPI_INFO_NULL, &fh);
if (errCode != MPI_SUCCESS) handle_error(errCode, "MPI_File_open");
MPI_File_set_view(fh, 0, etype, subarray, "native", MPI_INFO_NULL);
MPI_Type_free(&subarray);
cout << "At rank " << rank << endl;
cout << "Sub problem size will be written: " << subDataDim.x << " "
<< subDataDim.y << " "
<< subDataDim.z << endl;
MPI_File_write_all(fh, p_src, subDataDim.x*subDataDim.y*subDataDim.z, MPI_FLOAT, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
// check identical
// if(rank==0)
// {
// float *ref = new float[dimx*dimy];
// float *arr = new float[dimx*dimy];
// checkReadFile(srcFile, ref, dimx*dimy*sizeof(float));
// // char file[10];
// string file = "test.raw";
// checkReadFile(file, arr, dimx*dimy*sizeof(float));
// // for(int y=0; y<dimy; y++)
// // {
// // for(int x=0; x<dimx; x++)
// // {
// // if(
// // }
// // }
// for(int k=0; k<total; k++)
// {
// if(ref[k] != arr[k])
// {
// cout << "Do not match at " << k << endl;
// goto cleanup;
// }
// }
// cout << "Matched!!!" << endl;
// cleanup:
// free(ref);
// free(arr);
// }
//-----------------------------------------------------------------------------------------
MPI_Finalize();
return 0;
} |
943c70b45867b25bfec421e4f837cc1067499e95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//****************************************************************************
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
//****************************************************************************
#include "utils.h"
// GLOBAL VARS ON DEVICE
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
// devicel functions:
__device__ int min_int(int a, int b){
if (a <= b) return a;
else return b;
}
__device__ int max_int(int a, int b) {
if (a>=b) return a;
else return b;
}
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int2 thread_2D_pos = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
// don't access memory outside of bounds
if(thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return;
const int thread_1D_pos = thread_2D_pos.y*numCols + thread_2D_pos.x;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min_int(max_int(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min_int(max_int(thread_2D_pos.x + filter_c, 0), static_cast<int>(numCols - 1));
if (thread_1D_pos == 0) {
printf("filter_r: %d, filter_c: %d, image_r: %d, image_c: %d \n",filter_r,filter_c,image_r,image_c);
}
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int2 thread_2D_pos = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
// don't access memory outside of bounds
if(thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return;
const int thread_1D_pos = thread_2D_pos.y*numCols + thread_2D_pos.x;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//don't try and access memory outside the image
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//allocate memory for filter and copy from host to device ptr.
checkCudaErrors(hipMalloc((void**)&d_filter,sizeof(float)*filterWidth*filterWidth));
checkCudaErrors(hipMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,
hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
// printf("numRows: %lu, numCols: %lu\n",numRows,numCols);
//Set reasonable block size (number of threads per block)
const dim3 blockSize(20,20,1); // 20*20 = 400 threads per block
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockSize.x+1,numRows/blockSize.y+1,1);
//printf("block size x: %d, y: %d \n",blockSize.x,blockSize.y);
//printf("grid size x: %d, y: %d \n",gridSize.x,gridSize.y);
//separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize),dim3(blockSize), 0, 0, d_inputImageRGBA,numRows,numCols,
d_red,d_green,d_blue);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//for each color channel
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_red,d_redBlurred,numRows,numCols,
d_filter,filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_green,d_greenBlurred,numRows,numCols,
d_filter,filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_blue,d_blueBlurred,numRows,numCols,
d_filter,filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
| 943c70b45867b25bfec421e4f837cc1067499e95.cu | //****************************************************************************
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
//****************************************************************************
#include "utils.h"
// GLOBAL VARS ON DEVICE
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
// devicel functions:
__device__ int min_int(int a, int b){
if (a <= b) return a;
else return b;
}
__device__ int max_int(int a, int b) {
if (a>=b) return a;
else return b;
}
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int2 thread_2D_pos = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
// don't access memory outside of bounds
if(thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return;
const int thread_1D_pos = thread_2D_pos.y*numCols + thread_2D_pos.x;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min_int(max_int(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min_int(max_int(thread_2D_pos.x + filter_c, 0), static_cast<int>(numCols - 1));
if (thread_1D_pos == 0) {
printf("filter_r: %d, filter_c: %d, image_r: %d, image_c: %d \n",filter_r,filter_c,image_r,image_c);
}
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int2 thread_2D_pos = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
// don't access memory outside of bounds
if(thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return;
const int thread_1D_pos = thread_2D_pos.y*numCols + thread_2D_pos.x;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//don't try and access memory outside the image
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//allocate memory for filter and copy from host to device ptr.
checkCudaErrors(cudaMalloc((void**)&d_filter,sizeof(float)*filterWidth*filterWidth));
checkCudaErrors(cudaMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,
cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
// printf("numRows: %lu, numCols: %lu\n",numRows,numCols);
//Set reasonable block size (number of threads per block)
const dim3 blockSize(20,20,1); // 20*20 = 400 threads per block
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockSize.x+1,numRows/blockSize.y+1,1);
//printf("block size x: %d, y: %d \n",blockSize.x,blockSize.y);
//printf("grid size x: %d, y: %d \n",gridSize.x,gridSize.y);
//separating the RGBA image into different color channels
separateChannels<<<gridSize,blockSize>>>(d_inputImageRGBA,numRows,numCols,
d_red,d_green,d_blue);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//for each color channel
gaussian_blur<<<gridSize,blockSize>>>(d_red,d_redBlurred,numRows,numCols,
d_filter,filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize,blockSize>>>(d_green,d_greenBlurred,numRows,numCols,
d_filter,filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize,blockSize>>>(d_blue,d_blueBlurred,numRows,numCols,
d_filter,filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
76d4afc52f53949f613028395d210165a779e856.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/native/ConvUtils.h>
#include <algorithm>
#include <tuple>
#include <limits>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t,
int kKnownKernelT, int kKnownKernelH, int kKnownKernelW,
int kKnownDilationT, int kKnownDilationH, int kKnownDilationW>
__global__ void conv_depthwise3d_cuda_kernel(
const PackedTensorAccessor32<scalar_t, 5> input,
PackedTensorAccessor32<scalar_t, 5> output,
const PackedTensorAccessor32<scalar_t, 5> kernel,
const scalar_t* bias,
int strideT, int strideH, int strideW,
int paddingT, int paddingH, int paddingW,
int dilationT_, int dilationH_, int dilationW_)
{
const int kT = kKnownKernelT > 0 ? kKnownKernelT : kernel.size(2);
const int kH = kKnownKernelH > 0 ? kKnownKernelH : kernel.size(3);
const int kW = kKnownKernelW > 0 ? kKnownKernelW : kernel.size(4);
const int oC = output.size(1);
const int oT = output.size(2);
const int oH = output.size(3);
const int oW = output.size(4);
const int iC = input.size(1);
const int iT = input.size(2);
const int iH = input.size(3);
const int iW = input.size(4);
const int channel_multiplier = oC / iC;
const int dilationT = kKnownDilationT > 0 ? kKnownDilationT : dilationT_;
const int dilationH = kKnownDilationH > 0 ? kKnownDilationH : dilationH_;
const int dilationW = kKnownDilationW > 0 ? kKnownDilationW : dilationW_;
const int num_output = output.size(0) * output.stride(0);
CUDA_KERNEL_LOOP(index, num_output) {
const int out_col = index % oW;
const int out_row = (index / oW) % oH;
const int out_frame = (index / oW / oH) % oT;
const int out_channel = (index / oW / oH / oT) % oC;
const int batch = index / oW / oH / oT / oC;
const int in_channel = out_channel / channel_multiplier;
const int in_col_start = out_col * strideW - paddingW;
const int in_row_start = out_row * strideH - paddingH;
const int in_frame_start = out_frame * strideT - paddingT;
accscalar_t sum = 0;
const scalar_t *kernel_ptr = kernel[out_channel].data();
const scalar_t *input_ptr =
&input[batch][in_channel][in_frame_start][in_row_start][in_col_start];
for (int k_frame = 0; k_frame < kT; ++k_frame) {
const int in_frame = in_frame_start + k_frame * dilationT;
for (int k_row = 0; k_row < kH; ++k_row) {
const int in_row = in_row_start + k_row * dilationH;
for (int k_col = 0; k_col < kW; ++k_col) {
const accscalar_t op1 = *(kernel_ptr++);
const int in_col = in_col_start + k_col * dilationW;
if (in_frame >= 0 && in_row >= 0 && in_col >= 0 &&
in_frame < iT && in_row < iH && in_col < iW) {
sum += op1 * *(input_ptr);
}
input_ptr += dilationW;
}
input_ptr += iW * dilationH - kW * dilationW;
}
input_ptr += iW * (iH * dilationT - kH * dilationH);
}
if (bias != NULL) {
sum += bias[out_channel];
}
output[batch][out_channel][out_frame][out_row][out_col] = sum;
}
}
template <typename scalar_t, typename accscalar_t,
int kKnownKernelT, int kKnownKernelH, int kKnownKernelW,
int kKnownDilationT, int kKnownDilationH, int kKnownDilationW,
int kKnownStrideT, int kKnownStrideH, int kKnownStrideW>
__global__ void
conv_depthwise3d_cuda_backward_input_kernel(
const PackedTensorAccessor32<scalar_t, 5> grad_output,
PackedTensorAccessor32<scalar_t, 5> grad_input,
const PackedTensorAccessor32<scalar_t, 5> kernel,
int strideT_, int strideH_, int strideW_,
int paddingT, int paddingH, int paddingW,
int dilationT_, int dilationH_, int dilationW_) {
const int kT = kKnownKernelT > 0 ? kKnownKernelT : kernel.size(2);
const int kH = kKnownKernelH > 0 ? kKnownKernelH : kernel.size(3);
const int kW = kKnownKernelW > 0 ? kKnownKernelW : kernel.size(4);
const int oC = grad_output.size(1);
const int oT = grad_output.size(2);
const int oH = grad_output.size(3);
const int oW = grad_output.size(4);
const int iC = grad_input.size(1);
const int iT = grad_input.size(2);
const int iH = grad_input.size(3);
const int iW = grad_input.size(4);
const int channel_multiplier = oC / iC;
const int dilationT = kKnownDilationT > 0 ? kKnownDilationT : dilationT_;
const int dilationH = kKnownDilationH > 0 ? kKnownDilationH : dilationH_;
const int dilationW = kKnownDilationW > 0 ? kKnownDilationW : dilationW_;
const int strideT = kKnownStrideT > 0 ? kKnownStrideT : strideT_;
const int strideH = kKnownStrideH > 0 ? kKnownStrideH : strideH_;
const int strideW = kKnownStrideW > 0 ? kKnownStrideW : strideW_;
const int num_input = grad_input.size(0) * grad_input.stride(0);
CUDA_KERNEL_LOOP(index, num_input) {
const int in_col = index % iW;
const int in_row = (index / iW) % iH;
const int in_frame = (index / iW / iH) % iT;
const int in_channel = (index / iW / iH / iT) % iC;
const int batch = index / iW / iH / iT / iC;
const int out_col_end = in_col + paddingW;
const int out_row_end = in_row + paddingH;
const int out_frame_end = in_frame + paddingT;
const scalar_t* kernel_ptr = kernel[in_channel * channel_multiplier].data();
accscalar_t sum = 0;
for (int k_chn = in_channel * channel_multiplier;
k_chn < (in_channel + 1) * channel_multiplier;
++k_chn) {
const scalar_t* gout_ptr = grad_output[batch][k_chn].data();
for (int k_frame = 0; k_frame < kT; ++k_frame) {
const int out_frame_raw = out_frame_end - k_frame * dilationT;
const int out_frame = out_frame_raw / strideT;
for (int k_row = 0; k_row < kH; ++k_row) {
const int out_row_raw = out_row_end - k_row * dilationH;
const int out_row = out_row_raw / strideH;
for (int k_col = 0; k_col < kW; ++k_col) {
const accscalar_t op1 = *(kernel_ptr++);
const int out_col_raw = out_col_end - k_col * dilationW;
const int out_col = out_col_raw / strideW;
const int out_offs = (out_frame * oH + out_row) * oW + out_col;
accscalar_t op2 = (accscalar_t)0;
if (out_col >= 0 && out_row >= 0 && out_frame >= 0 &&
out_col < oW && out_row < oH && out_frame < oT) {
op2 = *(gout_ptr + out_offs);
}
if (out_frame * strideT == out_frame_raw &&
out_row * strideH == out_row_raw &&
out_col * strideW == out_col_raw) {
sum += op1 * op2;
}
}
}
}
}
grad_input[batch][in_channel][in_frame][in_row][in_col] = sum;
}
}
template <typename scalar_t, typename accscalar_t,
int kKnownStrideH, int kKnownStrideW>
__global__ void
conv_depthwise3d_cuda_backward_weight_kernel(
const PackedTensorAccessor32<scalar_t, 5> grad_output,
const PackedTensorAccessor32<scalar_t, 5> input,
PackedTensorAccessor32<scalar_t, 5> grad_kernel,
int strideT, int strideH_, int strideW_,
int paddingT, int paddingH, int paddingW,
int dilationT, int dilationH, int dilationW) {
const int kC = grad_kernel.size(0);
const int kT = grad_kernel.size(2);
const int kH = grad_kernel.size(3);
const int kW = grad_kernel.size(4);
const int strideH = kKnownStrideH > 0 ? kKnownStrideH : strideH_;
const int strideW = kKnownStrideW > 0 ? kKnownStrideW : strideW_;
const int k_col = blockIdx.x % kW;
const int k_row = (blockIdx.x / kW) % kH;
const int k_frame = (blockIdx.x / kW / kH) % kT;
const int k_channel = blockIdx.x / kW / kH / kT;
scalar_t *result = &grad_kernel[k_channel][0][k_frame][k_row][k_col];
const int oT = grad_output.size(2);
const int oH = grad_output.size(3);
const int oW = grad_output.size(4);
const int iT = input.size(2);
const int iH = input.size(3);
const int iW = input.size(4);
const int channel_multiplier = grad_output.size(1) / input.size(1);
const int in_channel = k_channel / channel_multiplier;
extern __shared__ int sdata_raw[];
scalar_t* sdata = reinterpret_cast<scalar_t*>(sdata_raw);
if (k_channel >= kC) {
return;
}
const int laneid = threadIdx.x % C10_WARP_SIZE;
const int warpid = threadIdx.x / C10_WARP_SIZE;
const int nwarps = blockDim.x / C10_WARP_SIZE;
accscalar_t grad = 0;
int batch = warpid / oT;
int gout_frame = warpid - batch * oT;
for (int outer_pos = warpid; outer_pos < input.size(0) * oT;
outer_pos += nwarps, gout_frame += nwarps) {
while (gout_frame >= oT) { gout_frame -= oT; batch ++; }
const int in_frame = (gout_frame * strideT) + (k_frame * dilationT) - paddingT;
if (in_frame < 0 || in_frame >= iT) {
continue;
}
const scalar_t* gout_ptr = grad_output[batch][k_channel][gout_frame].data() + laneid;
const scalar_t* input_ptr = input[batch][in_channel][in_frame].data();
int gout_row = laneid / oW;
int gout_col = laneid - gout_row * oW;
for (; gout_row < oH; ) {
const accscalar_t op1 = *(gout_ptr);
gout_ptr += C10_WARP_SIZE;
const int in_col = (gout_col * strideW) + (k_col * dilationW) - paddingW;
const int in_row = (gout_row * strideH) + (k_row * dilationH) - paddingH;
const int in_pos = in_row * iW + in_col;
accscalar_t op2 = (accscalar_t)0;
if (in_col >= 0 && in_col < iW && in_row >= 0 && in_row < iH) {
op2 = *(input_ptr + in_pos);
}
gout_col += C10_WARP_SIZE;
while (gout_col >= oW) {
gout_col -= oW; gout_row ++;
}
grad += op1 * op2;
}
}
sdata[threadIdx.x] = grad;
__syncthreads();
CUDA_KERNEL_ASSERT(__popc(blockDim.x) == 1);
#pragma unroll
for (int i = blockDim.x / 2; i >= 1; i >>= 1) {
if (threadIdx.x < i) {
sdata[threadIdx.x] += sdata[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
*result = sdata[0];
}
}
template <int dim>
void conv_depthwise_shape_check(
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation) {
TORCH_CHECK(kernel_size.size() == dim,
"kernel size length should be ", dim, ", but got ", kernel_size.size());
TORCH_CHECK(stride.size() == dim,
"stride length should be ", dim, ", but got ", stride.size());
TORCH_CHECK(padding.size() == dim,
"padding length should be ", dim, ", but got ", padding.size());
TORCH_CHECK(dilation.size() == dim,
"dilation length should be ", dim, ", but got ", dilation.size());
TORCH_CHECK(weight.defined(),
"Weight must be defined.");
TORCH_CHECK(input.dim() == dim + 1 || input.dim() == dim + 2,
"Input dimension should be ",
dim + 1, "D or ", dim + 2, "D, got ",
input.dim(), "D");
TORCH_CHECK(weight.dim() == dim + 2,
"Weight dimension should be ", dim + 2, "D, got ", weight.dim(), "D");
TORCH_CHECK(weight.size(1) == 1,
"Depthwise weight should have in_channels=1, got ", weight.size(1));
TORCH_CHECK(weight.size(0) % input.size(-dim - 1) == 0,
"Depthwise out channels should be a multiple of in channels, got ",
weight.size(0), " and ", input.size(-dim - 1));
for (int i = 0; i < dim; ++i) {
TORCH_CHECK(weight.size(i + 2) == kernel_size[i],
"kernel size and weight size mismatch, got ",
kernel_size, " and ", weight.sizes());
TORCH_CHECK(stride[i] >= 1,
"stride should be at least 1, got ", stride);
TORCH_CHECK(padding[i] >= 0,
"padding should be non-negative, got ", padding);
TORCH_CHECK(dilation[i] >= 1,
"dilation should be at least 1, got ", dilation);
}
if (bias.defined()) {
TORCH_CHECK(bias.dim() == 1,
"Bias should be 1D tensor, got ", bias.dim(), "D");
TORCH_CHECK(bias.size(0) == weight.size(0),
"Bias length should be equal to out_channels, got ",
bias.size(0), " and ", weight.size(0));
}
if (grad_output.defined()) {
auto expected_output_size = conv_output_size(input.sizes(), weight.sizes(),
padding, stride, dilation);
TORCH_CHECK(grad_output.dim() == expected_output_size.size(),
"Expect grad_output to be ",
expected_output_size.size(), "D, got ",
grad_output.dim(), "D.");
for (int i = 0; i < grad_output.dim(); ++i) {
TORCH_CHECK(grad_output.size(i) == expected_output_size[i],
"Expect grad_output to be of same shape as output, got ",
grad_output.size(i), " and ", expected_output_size[i],
" at dimension ", i);
}
}
}
}
#define NODEF_OR_EQUAL(x, y) ((y) < 0 || (x) == (y))
#define NODEF_OR_EQUAL_3(x, y1, y2, y3) \
(NODEF_OR_EQUAL(x[0], y1) && \
NODEF_OR_EQUAL(x[1], y2) && \
NODEF_OR_EQUAL(x[2], y3))
#define DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(kt, kh, kw, dilt, dilh, dilw) \
if (NODEF_OR_EQUAL_3(kernel_size, (kt), (kh), (kw)) && \
NODEF_OR_EQUAL_3(dilation, (dilt), (dilh), (dilw))) { \
using accscalar_t = acc_type<scalar_t, true>; \
hipLaunchKernelGGL(( conv_depthwise3d_cuda_kernel \
<scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw)>) \
, dim3(grid), dim3(block), (smem), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
input_.packed_accessor32<scalar_t, 5>(), \
output_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
bias_ptr, \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} else
#define DWCONV3D_FORWARD_DISPATCH_OTHERS \
{ \
using accscalar_t = acc_type<scalar_t, true>; \
hipLaunchKernelGGL(( conv_depthwise3d_cuda_kernel \
<scalar_t,accscalar_t, -1, -1, -1, -1, -1, -1>) \
, dim3(grid), dim3(block), (smem), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
input_.packed_accessor32<scalar_t, 5>(), \
output_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
bias_ptr, \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
}
Tensor conv_depthwise3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation) {
TORCH_CHECK(input.device() == weight.device(), "expects input and weight tensors to be on the same device.");
if (bias.defined()) {
TORCH_CHECK(input.device() == bias.device(), "expects input and bias tensors to be on the same device.");
}
conv_depthwise_shape_check<3>(input, weight, bias, Tensor() /* undefined */,
kernel_size, stride, padding, dilation);
Tensor input_ = input.contiguous();
if (input.dim() == 4 /* no batch */) {
input_ = input.unsqueeze(0);
}
auto output_size = conv_output_size(input_.sizes(), weight.sizes(),
padding, stride, dilation);
for (size_t i = 0; i < output_size.size(); ++i) {
TORCH_CHECK(output_size[i] > 0,
"Output size should be positive, got ", output_size[i], " at dim ", i);
}
Tensor output = at::empty(output_size, input.options());
Tensor output_ = output;
Tensor weight_ = weight.contiguous();
Tensor bias_ = bias.defined() ? bias.contiguous() : bias;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"conv_depthwise3d",
[&]{
int64_t num_outputs = output_.numel();
int64_t block = 256;
int64_t grid = ::min((num_outputs - 1) / block + 1, (int64_t)65536);
int64_t smem = 0;
const scalar_t* bias_ptr =
bias_.defined() ? bias_.data_ptr<scalar_t>() : NULL;
// Range check to avoid overflow in CUDA kernels.
TORCH_CHECK(input_.numel() <= std::numeric_limits<int32_t>::max(),
"Input tensor is too large.");
TORCH_CHECK(output_.numel() <= std::numeric_limits<int32_t>::max(),
"Output tensor is too large.");
TORCH_CHECK(weight_.numel() <= std::numeric_limits<int32_t>::max(),
"Weight tensor is too large.");
for (int i = 0; i < 3; ++i) {
TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= std::numeric_limits<int32_t>::max(),
"Padded input tensor is too large.");
}
DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(3, 3, 3, 1, 1, 1)
DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(-1, -1, -1, 1, 1, 1)
DWCONV3D_FORWARD_DISPATCH_OTHERS
}
);
return output;
}
#undef DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION
#undef DWCONV3D_FORWARD_DISPATCH_OTHERS
#define DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( \
kt, kh, kw, dilt, dilh, dilw, dt, dh, dw) \
if (NODEF_OR_EQUAL_3(kernel_size, (kt), (kh), (kw)) && \
NODEF_OR_EQUAL_3(dilation, (dilt), (dilh), (dilw)) && \
NODEF_OR_EQUAL_3(stride, (dt), (dh), (dw))) { \
using accscalar_t = acc_type<scalar_t, true>; \
hipLaunchKernelGGL(( conv_depthwise3d_cuda_backward_input_kernel \
<scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw), (dt), (dh), (dw)>) \
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
grad_output_.packed_accessor32<scalar_t, 5>(), \
grad_input_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} else
#define DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS \
{ \
using accscalar_t = acc_type<scalar_t, true>; \
hipLaunchKernelGGL(( conv_depthwise3d_cuda_backward_input_kernel \
<scalar_t, accscalar_t, -1, -1, -1, -1, -1, -1, -1, -1, -1>) \
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
grad_output_.packed_accessor32<scalar_t, 5>(), \
grad_input_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
}
#define DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(dh, dw) \
if (NODEF_OR_EQUAL_3(stride, -1, (dh), (dw))) { \
using accscalar_t = acc_type<scalar_t, true>; \
hipLaunchKernelGGL(( conv_depthwise3d_cuda_backward_weight_kernel \
<scalar_t, accscalar_t, (dh), (dw)>) \
, dim3(grid), dim3(block), smem, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
grad_output_.packed_accessor32<scalar_t, 5>(), \
input_.packed_accessor32<scalar_t, 5>(), \
grad_weight.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} else
#define DWCONV3D_BACKWARD_WEIGHT_DISPATCH_OTHERS \
{ \
using accscalar_t = acc_type<scalar_t, true>; \
hipLaunchKernelGGL(( conv_depthwise3d_cuda_backward_weight_kernel \
<scalar_t, accscalar_t, -1, -1>) \
, dim3(grid), dim3(block), smem, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
grad_output_.packed_accessor32<scalar_t, 5>(), \
input_.packed_accessor32<scalar_t, 5>(), \
grad_weight.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
}
std::tuple<Tensor&, Tensor&, Tensor&> _depthwise_3d_backward_cuda_out(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
const std::array<bool, 3> output_mask)
{
TORCH_CHECK(grad_output.device() == input.device() &&
input.device() == weight.device(),
"expects input, weight and grad_output to be on the same device.");
conv_depthwise_shape_check<3>(
input, weight, Tensor() /* undefined */, grad_output,
kernel_size, stride, padding, dilation);
const Tensor grad_output_ = grad_output.contiguous();
const Tensor input_ = input.contiguous();
const Tensor weight_ = weight.contiguous();
Tensor grad_input_ =
(output_mask[0] ? grad_input
: Tensor());
if (output_mask[0]) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(),
"conv_depthwise3d",
[&] {
int64_t num_inputs = grad_input_.numel();
int64_t block = 256;
int64_t grid = ::min((num_inputs - 1) / block + 1, (int64_t)65536);
// Range check to avoid overflow in CUDA kernels.
TORCH_CHECK(grad_input_.numel() <= std::numeric_limits<int32_t>::max(),
"Input tensor is too large.");
TORCH_CHECK(grad_output_.numel() <= std::numeric_limits<int32_t>::max(),
"Output tensor is too large.");
TORCH_CHECK(weight_.numel() <= std::numeric_limits<int32_t>::max(),
"Weight tensor is too large.");
for (int i = 0; i < 3; ++i) {
TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= std::numeric_limits<int32_t>::max(),
"Padded input tensor is too large.");
}
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, 1, 1, 1, 1, 1, 1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, 1, 1, 1, -1, -1, -1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, -1, -1, -1, 1, 1, 1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, -1, -1, -1, -1, -1, -1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS
}
);
}
if (output_mask[1]) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(),
"conv_depthwise3d",
[&] {
int64_t grid = grad_weight.numel();
int64_t block = 256;
int64_t smem = sizeof(scalar_t) * block;
const int64_t int_max = std::numeric_limits<int32_t>::max();
TORCH_CHECK(grad_input_.numel() <= int_max,
"Input tensor is too large.");
TORCH_CHECK(grad_output_.numel() <= int_max,
"Output tensor is too large.");
TORCH_CHECK(weight_.numel() <= int_max,
"Weight tensor is too large.");
for (int i = 0; i < 3; ++i) {
TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= int_max,
"Padded input tensor is too large.");
}
TORCH_CHECK(grad_output_.size(0) * grad_output_.size(2) < int_max - block / C10_WARP_SIZE &&
grad_output_.size(3) <= int_max - C10_WARP_SIZE &&
grad_output_.size(4) <= int_max - C10_WARP_SIZE,
"Output size is too large.");
DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(1, 1)
DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(2, 2)
DWCONV3D_BACKWARD_WEIGHT_DISPATCH_OTHERS
}
);
}
if (output_mask[2]) {
grad_bias = grad_output.sum({0, 2, 3, 4});
}
return std::tie(grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor&, Tensor&, Tensor&> conv_depthwise3d_backward_cuda_out(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation) {
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
return _depthwise_3d_backward_cuda_out(
grad_input,
grad_weight,
grad_bias,
grad_output,
input,
weight,
kernel_size,
stride,
padding,
dilation,
{true,true,true});
}
std::tuple<Tensor, Tensor, Tensor> conv_depthwise3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
const std::array<bool, 3> output_mask) {
auto options = grad_output.options();
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : Tensor());
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : Tensor());
Tensor grad_bias; /* undefined temporarily */
return _depthwise_3d_backward_cuda_out(
grad_input,
grad_weight,
grad_bias,
grad_output,
input,
weight,
kernel_size,
stride,
padding,
dilation,
output_mask
);
}
#undef DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION
#undef DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS
#undef NODEF_OR_EQUAL_3
#undef NODEF_OR_EQUAL
}
}
| 76d4afc52f53949f613028395d210165a779e856.cu | #include <ATen/ATen.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/native/ConvUtils.h>
#include <algorithm>
#include <tuple>
#include <limits>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t,
int kKnownKernelT, int kKnownKernelH, int kKnownKernelW,
int kKnownDilationT, int kKnownDilationH, int kKnownDilationW>
__global__ void conv_depthwise3d_cuda_kernel(
const PackedTensorAccessor32<scalar_t, 5> input,
PackedTensorAccessor32<scalar_t, 5> output,
const PackedTensorAccessor32<scalar_t, 5> kernel,
const scalar_t* bias,
int strideT, int strideH, int strideW,
int paddingT, int paddingH, int paddingW,
int dilationT_, int dilationH_, int dilationW_)
{
const int kT = kKnownKernelT > 0 ? kKnownKernelT : kernel.size(2);
const int kH = kKnownKernelH > 0 ? kKnownKernelH : kernel.size(3);
const int kW = kKnownKernelW > 0 ? kKnownKernelW : kernel.size(4);
const int oC = output.size(1);
const int oT = output.size(2);
const int oH = output.size(3);
const int oW = output.size(4);
const int iC = input.size(1);
const int iT = input.size(2);
const int iH = input.size(3);
const int iW = input.size(4);
const int channel_multiplier = oC / iC;
const int dilationT = kKnownDilationT > 0 ? kKnownDilationT : dilationT_;
const int dilationH = kKnownDilationH > 0 ? kKnownDilationH : dilationH_;
const int dilationW = kKnownDilationW > 0 ? kKnownDilationW : dilationW_;
const int num_output = output.size(0) * output.stride(0);
CUDA_KERNEL_LOOP(index, num_output) {
const int out_col = index % oW;
const int out_row = (index / oW) % oH;
const int out_frame = (index / oW / oH) % oT;
const int out_channel = (index / oW / oH / oT) % oC;
const int batch = index / oW / oH / oT / oC;
const int in_channel = out_channel / channel_multiplier;
const int in_col_start = out_col * strideW - paddingW;
const int in_row_start = out_row * strideH - paddingH;
const int in_frame_start = out_frame * strideT - paddingT;
accscalar_t sum = 0;
const scalar_t *kernel_ptr = kernel[out_channel].data();
const scalar_t *input_ptr =
&input[batch][in_channel][in_frame_start][in_row_start][in_col_start];
for (int k_frame = 0; k_frame < kT; ++k_frame) {
const int in_frame = in_frame_start + k_frame * dilationT;
for (int k_row = 0; k_row < kH; ++k_row) {
const int in_row = in_row_start + k_row * dilationH;
for (int k_col = 0; k_col < kW; ++k_col) {
const accscalar_t op1 = *(kernel_ptr++);
const int in_col = in_col_start + k_col * dilationW;
if (in_frame >= 0 && in_row >= 0 && in_col >= 0 &&
in_frame < iT && in_row < iH && in_col < iW) {
sum += op1 * *(input_ptr);
}
input_ptr += dilationW;
}
input_ptr += iW * dilationH - kW * dilationW;
}
input_ptr += iW * (iH * dilationT - kH * dilationH);
}
if (bias != NULL) {
sum += bias[out_channel];
}
output[batch][out_channel][out_frame][out_row][out_col] = sum;
}
}
template <typename scalar_t, typename accscalar_t,
int kKnownKernelT, int kKnownKernelH, int kKnownKernelW,
int kKnownDilationT, int kKnownDilationH, int kKnownDilationW,
int kKnownStrideT, int kKnownStrideH, int kKnownStrideW>
__global__ void
conv_depthwise3d_cuda_backward_input_kernel(
const PackedTensorAccessor32<scalar_t, 5> grad_output,
PackedTensorAccessor32<scalar_t, 5> grad_input,
const PackedTensorAccessor32<scalar_t, 5> kernel,
int strideT_, int strideH_, int strideW_,
int paddingT, int paddingH, int paddingW,
int dilationT_, int dilationH_, int dilationW_) {
const int kT = kKnownKernelT > 0 ? kKnownKernelT : kernel.size(2);
const int kH = kKnownKernelH > 0 ? kKnownKernelH : kernel.size(3);
const int kW = kKnownKernelW > 0 ? kKnownKernelW : kernel.size(4);
const int oC = grad_output.size(1);
const int oT = grad_output.size(2);
const int oH = grad_output.size(3);
const int oW = grad_output.size(4);
const int iC = grad_input.size(1);
const int iT = grad_input.size(2);
const int iH = grad_input.size(3);
const int iW = grad_input.size(4);
const int channel_multiplier = oC / iC;
const int dilationT = kKnownDilationT > 0 ? kKnownDilationT : dilationT_;
const int dilationH = kKnownDilationH > 0 ? kKnownDilationH : dilationH_;
const int dilationW = kKnownDilationW > 0 ? kKnownDilationW : dilationW_;
const int strideT = kKnownStrideT > 0 ? kKnownStrideT : strideT_;
const int strideH = kKnownStrideH > 0 ? kKnownStrideH : strideH_;
const int strideW = kKnownStrideW > 0 ? kKnownStrideW : strideW_;
const int num_input = grad_input.size(0) * grad_input.stride(0);
CUDA_KERNEL_LOOP(index, num_input) {
const int in_col = index % iW;
const int in_row = (index / iW) % iH;
const int in_frame = (index / iW / iH) % iT;
const int in_channel = (index / iW / iH / iT) % iC;
const int batch = index / iW / iH / iT / iC;
const int out_col_end = in_col + paddingW;
const int out_row_end = in_row + paddingH;
const int out_frame_end = in_frame + paddingT;
const scalar_t* kernel_ptr = kernel[in_channel * channel_multiplier].data();
accscalar_t sum = 0;
for (int k_chn = in_channel * channel_multiplier;
k_chn < (in_channel + 1) * channel_multiplier;
++k_chn) {
const scalar_t* gout_ptr = grad_output[batch][k_chn].data();
for (int k_frame = 0; k_frame < kT; ++k_frame) {
const int out_frame_raw = out_frame_end - k_frame * dilationT;
const int out_frame = out_frame_raw / strideT;
for (int k_row = 0; k_row < kH; ++k_row) {
const int out_row_raw = out_row_end - k_row * dilationH;
const int out_row = out_row_raw / strideH;
for (int k_col = 0; k_col < kW; ++k_col) {
const accscalar_t op1 = *(kernel_ptr++);
const int out_col_raw = out_col_end - k_col * dilationW;
const int out_col = out_col_raw / strideW;
const int out_offs = (out_frame * oH + out_row) * oW + out_col;
accscalar_t op2 = (accscalar_t)0;
if (out_col >= 0 && out_row >= 0 && out_frame >= 0 &&
out_col < oW && out_row < oH && out_frame < oT) {
op2 = *(gout_ptr + out_offs);
}
if (out_frame * strideT == out_frame_raw &&
out_row * strideH == out_row_raw &&
out_col * strideW == out_col_raw) {
sum += op1 * op2;
}
}
}
}
}
grad_input[batch][in_channel][in_frame][in_row][in_col] = sum;
}
}
template <typename scalar_t, typename accscalar_t,
int kKnownStrideH, int kKnownStrideW>
__global__ void
conv_depthwise3d_cuda_backward_weight_kernel(
const PackedTensorAccessor32<scalar_t, 5> grad_output,
const PackedTensorAccessor32<scalar_t, 5> input,
PackedTensorAccessor32<scalar_t, 5> grad_kernel,
int strideT, int strideH_, int strideW_,
int paddingT, int paddingH, int paddingW,
int dilationT, int dilationH, int dilationW) {
const int kC = grad_kernel.size(0);
const int kT = grad_kernel.size(2);
const int kH = grad_kernel.size(3);
const int kW = grad_kernel.size(4);
const int strideH = kKnownStrideH > 0 ? kKnownStrideH : strideH_;
const int strideW = kKnownStrideW > 0 ? kKnownStrideW : strideW_;
const int k_col = blockIdx.x % kW;
const int k_row = (blockIdx.x / kW) % kH;
const int k_frame = (blockIdx.x / kW / kH) % kT;
const int k_channel = blockIdx.x / kW / kH / kT;
scalar_t *result = &grad_kernel[k_channel][0][k_frame][k_row][k_col];
const int oT = grad_output.size(2);
const int oH = grad_output.size(3);
const int oW = grad_output.size(4);
const int iT = input.size(2);
const int iH = input.size(3);
const int iW = input.size(4);
const int channel_multiplier = grad_output.size(1) / input.size(1);
const int in_channel = k_channel / channel_multiplier;
extern __shared__ int sdata_raw[];
scalar_t* sdata = reinterpret_cast<scalar_t*>(sdata_raw);
if (k_channel >= kC) {
return;
}
const int laneid = threadIdx.x % C10_WARP_SIZE;
const int warpid = threadIdx.x / C10_WARP_SIZE;
const int nwarps = blockDim.x / C10_WARP_SIZE;
accscalar_t grad = 0;
int batch = warpid / oT;
int gout_frame = warpid - batch * oT;
for (int outer_pos = warpid; outer_pos < input.size(0) * oT;
outer_pos += nwarps, gout_frame += nwarps) {
while (gout_frame >= oT) { gout_frame -= oT; batch ++; }
const int in_frame = (gout_frame * strideT) + (k_frame * dilationT) - paddingT;
if (in_frame < 0 || in_frame >= iT) {
continue;
}
const scalar_t* gout_ptr = grad_output[batch][k_channel][gout_frame].data() + laneid;
const scalar_t* input_ptr = input[batch][in_channel][in_frame].data();
int gout_row = laneid / oW;
int gout_col = laneid - gout_row * oW;
for (; gout_row < oH; ) {
const accscalar_t op1 = *(gout_ptr);
gout_ptr += C10_WARP_SIZE;
const int in_col = (gout_col * strideW) + (k_col * dilationW) - paddingW;
const int in_row = (gout_row * strideH) + (k_row * dilationH) - paddingH;
const int in_pos = in_row * iW + in_col;
accscalar_t op2 = (accscalar_t)0;
if (in_col >= 0 && in_col < iW && in_row >= 0 && in_row < iH) {
op2 = *(input_ptr + in_pos);
}
gout_col += C10_WARP_SIZE;
while (gout_col >= oW) {
gout_col -= oW; gout_row ++;
}
grad += op1 * op2;
}
}
sdata[threadIdx.x] = grad;
__syncthreads();
CUDA_KERNEL_ASSERT(__popc(blockDim.x) == 1);
#pragma unroll
for (int i = blockDim.x / 2; i >= 1; i >>= 1) {
if (threadIdx.x < i) {
sdata[threadIdx.x] += sdata[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
*result = sdata[0];
}
}
template <int dim>
void conv_depthwise_shape_check(
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation) {
TORCH_CHECK(kernel_size.size() == dim,
"kernel size length should be ", dim, ", but got ", kernel_size.size());
TORCH_CHECK(stride.size() == dim,
"stride length should be ", dim, ", but got ", stride.size());
TORCH_CHECK(padding.size() == dim,
"padding length should be ", dim, ", but got ", padding.size());
TORCH_CHECK(dilation.size() == dim,
"dilation length should be ", dim, ", but got ", dilation.size());
TORCH_CHECK(weight.defined(),
"Weight must be defined.");
TORCH_CHECK(input.dim() == dim + 1 || input.dim() == dim + 2,
"Input dimension should be ",
dim + 1, "D or ", dim + 2, "D, got ",
input.dim(), "D");
TORCH_CHECK(weight.dim() == dim + 2,
"Weight dimension should be ", dim + 2, "D, got ", weight.dim(), "D");
TORCH_CHECK(weight.size(1) == 1,
"Depthwise weight should have in_channels=1, got ", weight.size(1));
TORCH_CHECK(weight.size(0) % input.size(-dim - 1) == 0,
"Depthwise out channels should be a multiple of in channels, got ",
weight.size(0), " and ", input.size(-dim - 1));
for (int i = 0; i < dim; ++i) {
TORCH_CHECK(weight.size(i + 2) == kernel_size[i],
"kernel size and weight size mismatch, got ",
kernel_size, " and ", weight.sizes());
TORCH_CHECK(stride[i] >= 1,
"stride should be at least 1, got ", stride);
TORCH_CHECK(padding[i] >= 0,
"padding should be non-negative, got ", padding);
TORCH_CHECK(dilation[i] >= 1,
"dilation should be at least 1, got ", dilation);
}
if (bias.defined()) {
TORCH_CHECK(bias.dim() == 1,
"Bias should be 1D tensor, got ", bias.dim(), "D");
TORCH_CHECK(bias.size(0) == weight.size(0),
"Bias length should be equal to out_channels, got ",
bias.size(0), " and ", weight.size(0));
}
if (grad_output.defined()) {
auto expected_output_size = conv_output_size(input.sizes(), weight.sizes(),
padding, stride, dilation);
TORCH_CHECK(grad_output.dim() == expected_output_size.size(),
"Expect grad_output to be ",
expected_output_size.size(), "D, got ",
grad_output.dim(), "D.");
for (int i = 0; i < grad_output.dim(); ++i) {
TORCH_CHECK(grad_output.size(i) == expected_output_size[i],
"Expect grad_output to be of same shape as output, got ",
grad_output.size(i), " and ", expected_output_size[i],
" at dimension ", i);
}
}
}
}
#define NODEF_OR_EQUAL(x, y) ((y) < 0 || (x) == (y))
#define NODEF_OR_EQUAL_3(x, y1, y2, y3) \
(NODEF_OR_EQUAL(x[0], y1) && \
NODEF_OR_EQUAL(x[1], y2) && \
NODEF_OR_EQUAL(x[2], y3))
#define DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(kt, kh, kw, dilt, dilh, dilw) \
if (NODEF_OR_EQUAL_3(kernel_size, (kt), (kh), (kw)) && \
NODEF_OR_EQUAL_3(dilation, (dilt), (dilh), (dilw))) { \
using accscalar_t = acc_type<scalar_t, true>; \
conv_depthwise3d_cuda_kernel \
<scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw)> \
<<<grid, block, (smem), at::cuda::getCurrentCUDAStream()>>>( \
input_.packed_accessor32<scalar_t, 5>(), \
output_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
bias_ptr, \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} else
#define DWCONV3D_FORWARD_DISPATCH_OTHERS \
{ \
using accscalar_t = acc_type<scalar_t, true>; \
conv_depthwise3d_cuda_kernel \
<scalar_t,accscalar_t, -1, -1, -1, -1, -1, -1> \
<<<grid, block, (smem), at::cuda::getCurrentCUDAStream()>>>( \
input_.packed_accessor32<scalar_t, 5>(), \
output_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
bias_ptr, \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
}
Tensor conv_depthwise3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation) {
TORCH_CHECK(input.device() == weight.device(), "expects input and weight tensors to be on the same device.");
if (bias.defined()) {
TORCH_CHECK(input.device() == bias.device(), "expects input and bias tensors to be on the same device.");
}
conv_depthwise_shape_check<3>(input, weight, bias, Tensor() /* undefined */,
kernel_size, stride, padding, dilation);
Tensor input_ = input.contiguous();
if (input.dim() == 4 /* no batch */) {
input_ = input.unsqueeze(0);
}
auto output_size = conv_output_size(input_.sizes(), weight.sizes(),
padding, stride, dilation);
for (size_t i = 0; i < output_size.size(); ++i) {
TORCH_CHECK(output_size[i] > 0,
"Output size should be positive, got ", output_size[i], " at dim ", i);
}
Tensor output = at::empty(output_size, input.options());
Tensor output_ = output;
Tensor weight_ = weight.contiguous();
Tensor bias_ = bias.defined() ? bias.contiguous() : bias;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"conv_depthwise3d",
[&]{
int64_t num_outputs = output_.numel();
int64_t block = 256;
int64_t grid = std::min((num_outputs - 1) / block + 1, (int64_t)65536);
int64_t smem = 0;
const scalar_t* bias_ptr =
bias_.defined() ? bias_.data_ptr<scalar_t>() : NULL;
// Range check to avoid overflow in CUDA kernels.
TORCH_CHECK(input_.numel() <= std::numeric_limits<int32_t>::max(),
"Input tensor is too large.");
TORCH_CHECK(output_.numel() <= std::numeric_limits<int32_t>::max(),
"Output tensor is too large.");
TORCH_CHECK(weight_.numel() <= std::numeric_limits<int32_t>::max(),
"Weight tensor is too large.");
for (int i = 0; i < 3; ++i) {
TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= std::numeric_limits<int32_t>::max(),
"Padded input tensor is too large.");
}
DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(3, 3, 3, 1, 1, 1)
DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(-1, -1, -1, 1, 1, 1)
DWCONV3D_FORWARD_DISPATCH_OTHERS
}
);
return output;
}
#undef DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION
#undef DWCONV3D_FORWARD_DISPATCH_OTHERS
#define DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( \
kt, kh, kw, dilt, dilh, dilw, dt, dh, dw) \
if (NODEF_OR_EQUAL_3(kernel_size, (kt), (kh), (kw)) && \
NODEF_OR_EQUAL_3(dilation, (dilt), (dilh), (dilw)) && \
NODEF_OR_EQUAL_3(stride, (dt), (dh), (dw))) { \
using accscalar_t = acc_type<scalar_t, true>; \
conv_depthwise3d_cuda_backward_input_kernel \
<scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw), (dt), (dh), (dw)> \
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( \
grad_output_.packed_accessor32<scalar_t, 5>(), \
grad_input_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} else
#define DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS \
{ \
using accscalar_t = acc_type<scalar_t, true>; \
conv_depthwise3d_cuda_backward_input_kernel \
<scalar_t, accscalar_t, -1, -1, -1, -1, -1, -1, -1, -1, -1> \
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( \
grad_output_.packed_accessor32<scalar_t, 5>(), \
grad_input_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
}
#define DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(dh, dw) \
if (NODEF_OR_EQUAL_3(stride, -1, (dh), (dw))) { \
using accscalar_t = acc_type<scalar_t, true>; \
conv_depthwise3d_cuda_backward_weight_kernel \
<scalar_t, accscalar_t, (dh), (dw)> \
<<<grid, block, smem, at::cuda::getCurrentCUDAStream()>>>( \
grad_output_.packed_accessor32<scalar_t, 5>(), \
input_.packed_accessor32<scalar_t, 5>(), \
grad_weight.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} else
#define DWCONV3D_BACKWARD_WEIGHT_DISPATCH_OTHERS \
{ \
using accscalar_t = acc_type<scalar_t, true>; \
conv_depthwise3d_cuda_backward_weight_kernel \
<scalar_t, accscalar_t, -1, -1> \
<<<grid, block, smem, at::cuda::getCurrentCUDAStream()>>>( \
grad_output_.packed_accessor32<scalar_t, 5>(), \
input_.packed_accessor32<scalar_t, 5>(), \
grad_weight.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
}
std::tuple<Tensor&, Tensor&, Tensor&> _depthwise_3d_backward_cuda_out(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
const std::array<bool, 3> output_mask)
{
TORCH_CHECK(grad_output.device() == input.device() &&
input.device() == weight.device(),
"expects input, weight and grad_output to be on the same device.");
conv_depthwise_shape_check<3>(
input, weight, Tensor() /* undefined */, grad_output,
kernel_size, stride, padding, dilation);
const Tensor grad_output_ = grad_output.contiguous();
const Tensor input_ = input.contiguous();
const Tensor weight_ = weight.contiguous();
Tensor grad_input_ =
(output_mask[0] ? grad_input
: Tensor());
if (output_mask[0]) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(),
"conv_depthwise3d",
[&] {
int64_t num_inputs = grad_input_.numel();
int64_t block = 256;
int64_t grid = std::min((num_inputs - 1) / block + 1, (int64_t)65536);
// Range check to avoid overflow in CUDA kernels.
TORCH_CHECK(grad_input_.numel() <= std::numeric_limits<int32_t>::max(),
"Input tensor is too large.");
TORCH_CHECK(grad_output_.numel() <= std::numeric_limits<int32_t>::max(),
"Output tensor is too large.");
TORCH_CHECK(weight_.numel() <= std::numeric_limits<int32_t>::max(),
"Weight tensor is too large.");
for (int i = 0; i < 3; ++i) {
TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= std::numeric_limits<int32_t>::max(),
"Padded input tensor is too large.");
}
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, 1, 1, 1, 1, 1, 1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, 1, 1, 1, -1, -1, -1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, -1, -1, -1, 1, 1, 1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, -1, -1, -1, -1, -1, -1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS
}
);
}
if (output_mask[1]) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(),
"conv_depthwise3d",
[&] {
int64_t grid = grad_weight.numel();
int64_t block = 256;
int64_t smem = sizeof(scalar_t) * block;
const int64_t int_max = std::numeric_limits<int32_t>::max();
TORCH_CHECK(grad_input_.numel() <= int_max,
"Input tensor is too large.");
TORCH_CHECK(grad_output_.numel() <= int_max,
"Output tensor is too large.");
TORCH_CHECK(weight_.numel() <= int_max,
"Weight tensor is too large.");
for (int i = 0; i < 3; ++i) {
TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= int_max,
"Padded input tensor is too large.");
}
TORCH_CHECK(grad_output_.size(0) * grad_output_.size(2) < int_max - block / C10_WARP_SIZE &&
grad_output_.size(3) <= int_max - C10_WARP_SIZE &&
grad_output_.size(4) <= int_max - C10_WARP_SIZE,
"Output size is too large.");
DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(1, 1)
DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(2, 2)
DWCONV3D_BACKWARD_WEIGHT_DISPATCH_OTHERS
}
);
}
if (output_mask[2]) {
grad_bias = grad_output.sum({0, 2, 3, 4});
}
return std::tie(grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor&, Tensor&, Tensor&> conv_depthwise3d_backward_cuda_out(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation) {
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
return _depthwise_3d_backward_cuda_out(
grad_input,
grad_weight,
grad_bias,
grad_output,
input,
weight,
kernel_size,
stride,
padding,
dilation,
{true,true,true});
}
std::tuple<Tensor, Tensor, Tensor> conv_depthwise3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
const std::array<bool, 3> output_mask) {
auto options = grad_output.options();
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : Tensor());
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : Tensor());
Tensor grad_bias; /* undefined temporarily */
return _depthwise_3d_backward_cuda_out(
grad_input,
grad_weight,
grad_bias,
grad_output,
input,
weight,
kernel_size,
stride,
padding,
dilation,
output_mask
);
}
#undef DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION
#undef DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS
#undef NODEF_OR_EQUAL_3
#undef NODEF_OR_EQUAL
}
}
|
dfabe58756b8b5d6b0d6e1206106428a8234806f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "cuda/stereocsbp.hpp"
namespace cv { namespace cuda { namespace device
{
namespace stereocsbp
{
///////////////////////////////////////////////////////////////
/////////////////////// init data cost ////////////////////////
///////////////////////////////////////////////////////////////
template <int channels> static float __device__ pixeldiff(const uchar* left, const uchar* right, float max_data_term);
template<> __device__ __forceinline__ float pixeldiff<1>(const uchar* left, const uchar* right, float max_data_term)
{
return fminf( ::abs((int)*left - *right), max_data_term);
}
template<> __device__ __forceinline__ float pixeldiff<3>(const uchar* left, const uchar* right, float max_data_term)
{
float tb = 0.114f * ::abs((int)left[0] - right[0]);
float tg = 0.587f * ::abs((int)left[1] - right[1]);
float tr = 0.299f * ::abs((int)left[2] - right[2]);
return fminf(tr + tg + tb, max_data_term);
}
template<> __device__ __forceinline__ float pixeldiff<4>(const uchar* left, const uchar* right, float max_data_term)
{
uchar4 l = *((const uchar4*)left);
uchar4 r = *((const uchar4*)right);
float tb = 0.114f * ::abs((int)l.x - r.x);
float tg = 0.587f * ::abs((int)l.y - r.y);
float tr = 0.299f * ::abs((int)l.z - r.z);
return fminf(tr + tg + tb, max_data_term);
}
template <typename T>
__global__ void get_first_k_initial_global(uchar *ctemp, T* data_cost_selected_, T *selected_disp_pyr, int h, int w, int nr_plane, int ndisp,
size_t msg_step, size_t disp_step)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < h && x < w)
{
T* selected_disparity = selected_disp_pyr + y * msg_step + x;
T* data_cost_selected = data_cost_selected_ + y * msg_step + x;
T* data_cost = (T*)ctemp + y * msg_step + x;
for(int i = 0; i < nr_plane; i++)
{
T minimum = device::numeric_limits<T>::max();
int id = 0;
for(int d = 0; d < ndisp; d++)
{
T cur = data_cost[d * disp_step];
if(cur < minimum)
{
minimum = cur;
id = d;
}
}
data_cost_selected[i * disp_step] = minimum;
selected_disparity[i * disp_step] = id;
data_cost [id * disp_step] = numeric_limits<T>::max();
}
}
}
template <typename T>
__global__ void get_first_k_initial_local(uchar *ctemp, T* data_cost_selected_, T* selected_disp_pyr, int h, int w, int nr_plane, int ndisp,
size_t msg_step, size_t disp_step)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < h && x < w)
{
T* selected_disparity = selected_disp_pyr + y * msg_step + x;
T* data_cost_selected = data_cost_selected_ + y * msg_step + x;
T* data_cost = (T*)ctemp + y * msg_step + x;
int nr_local_minimum = 0;
T prev = data_cost[0 * disp_step];
T cur = data_cost[1 * disp_step];
T next = data_cost[2 * disp_step];
for (int d = 1; d < ndisp - 1 && nr_local_minimum < nr_plane; d++)
{
if (cur < prev && cur < next)
{
data_cost_selected[nr_local_minimum * disp_step] = cur;
selected_disparity[nr_local_minimum * disp_step] = d;
data_cost[d * disp_step] = numeric_limits<T>::max();
nr_local_minimum++;
}
prev = cur;
cur = next;
next = data_cost[(d + 1) * disp_step];
}
for (int i = nr_local_minimum; i < nr_plane; i++)
{
T minimum = numeric_limits<T>::max();
int id = 0;
for (int d = 0; d < ndisp; d++)
{
cur = data_cost[d * disp_step];
if (cur < minimum)
{
minimum = cur;
id = d;
}
}
data_cost_selected[i * disp_step] = minimum;
selected_disparity[i * disp_step] = id;
data_cost[id * disp_step] = numeric_limits<T>::max();
}
}
}
template <typename T, int channels>
__global__ void init_data_cost(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step,
int h, int w, int level, int ndisp, float data_weight, float max_data_term,
int min_disp, size_t msg_step, size_t disp_step)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < h && x < w)
{
int y0 = y << level;
int yt = (y + 1) << level;
int x0 = x << level;
int xt = (x + 1) << level;
T* data_cost = (T*)ctemp + y * msg_step + x;
for(int d = 0; d < ndisp; ++d)
{
float val = 0.0f;
for(int yi = y0; yi < yt; yi++)
{
for(int xi = x0; xi < xt; xi++)
{
int xr = xi - d;
if(d < min_disp || xr < 0)
val += data_weight * max_data_term;
else
{
const uchar* lle = cleft + yi * cimg_step + xi * channels;
const uchar* lri = cright + yi * cimg_step + xr * channels;
val += data_weight * pixeldiff<channels>(lle, lri, max_data_term);
}
}
}
data_cost[disp_step * d] = saturate_cast<T>(val);
}
}
}
template <typename T, int winsz, int channels>
__global__ void init_data_cost_reduce(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step,
int level, int rows, int cols, int h, int ndisp, float data_weight, float max_data_term,
int min_disp, size_t msg_step, size_t disp_step)
{
int x_out = blockIdx.x;
int y_out = blockIdx.y % h;
int d = (blockIdx.y / h) * blockDim.z + threadIdx.z;
int tid = threadIdx.x;
if (d < ndisp)
{
int x0 = x_out << level;
int y0 = y_out << level;
int len = ::min(y0 + winsz, rows) - y0;
float val = 0.0f;
if (x0 + tid < cols)
{
if (x0 + tid - d < 0 || d < min_disp)
val = data_weight * max_data_term * len;
else
{
const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid );
const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - d);
for(int y = 0; y < len; ++y)
{
val += data_weight * pixeldiff<channels>(lle, lri, max_data_term);
lle += cimg_step;
lri += cimg_step;
}
}
}
extern __shared__ float smem[];
reduce<winsz>(smem + winsz * threadIdx.z, val, tid, plus<float>());
T* data_cost = (T*)ctemp + y_out * msg_step + x_out;
if (tid == 0)
data_cost[disp_step * d] = saturate_cast<T>(val);
}
}
template <typename T>
void init_data_cost_caller_(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int /*rows*/, int /*cols*/, int h, int w, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(w, threads.x);
grid.y = divUp(h, threads.y);
switch (channels)
{
case 1:hipLaunchKernelGGL(( init_data_cost<T, 1>), dim3(grid), dim3(threads), 0, stream, cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break;
case 3:hipLaunchKernelGGL(( init_data_cost<T, 3>), dim3(grid), dim3(threads), 0, stream, cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break;
case 4:hipLaunchKernelGGL(( init_data_cost<T, 4>), dim3(grid), dim3(threads), 0, stream, cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break;
default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count");
}
}
template <typename T, int winsz>
void init_data_cost_reduce_caller_(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, int h, int w, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, hipStream_t stream)
{
const int threadsNum = 256;
const size_t smem_size = threadsNum * sizeof(float);
dim3 threads(winsz, 1, threadsNum / winsz);
dim3 grid(w, h, 1);
grid.y *= divUp(ndisp, threads.z);
switch (channels)
{
case 1:hipLaunchKernelGGL(( init_data_cost_reduce<T, winsz, 1>), dim3(grid), dim3(threads), smem_size, stream, cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break;
case 3:hipLaunchKernelGGL(( init_data_cost_reduce<T, winsz, 3>), dim3(grid), dim3(threads), smem_size, stream, cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break;
case 4:hipLaunchKernelGGL(( init_data_cost_reduce<T, winsz, 4>), dim3(grid), dim3(threads), smem_size, stream, cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break;
default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count");
}
}
template<class T>
void init_data_cost(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step,
int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, hipStream_t stream)
{
typedef void (*InitDataCostCaller)(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int cols, int rows, int w, int h, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, hipStream_t stream);
static const InitDataCostCaller init_data_cost_callers[] =
{
init_data_cost_caller_<T>, init_data_cost_caller_<T>, init_data_cost_reduce_caller_<T, 4>,
init_data_cost_reduce_caller_<T, 8>, init_data_cost_reduce_caller_<T, 16>, init_data_cost_reduce_caller_<T, 32>,
init_data_cost_reduce_caller_<T, 64>, init_data_cost_reduce_caller_<T, 128>, init_data_cost_reduce_caller_<T, 256>
};
size_t disp_step = msg_step * h;
init_data_cost_callers[level](cleft, cright, ctemp, cimg_step, rows, cols, h, w, level, ndisp, channels, data_weight, max_data_term, min_disp, msg_step, disp_step, stream);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(w, threads.x);
grid.y = divUp(h, threads.y);
if (use_local_init_data_cost == true)
hipLaunchKernelGGL(( get_first_k_initial_local), dim3(grid), dim3(threads), 0, stream, ctemp, data_cost_selected, disp_selected_pyr, h, w, nr_plane, ndisp, msg_step, disp_step);
else
hipLaunchKernelGGL(( get_first_k_initial_global), dim3(grid), dim3(threads), 0, stream, ctemp, data_cost_selected, disp_selected_pyr, h, w, nr_plane, ndisp, msg_step, disp_step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void init_data_cost<short>(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, short* disp_selected_pyr, short* data_cost_selected, size_t msg_step,
int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, hipStream_t stream);
template void init_data_cost<float>(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, float* disp_selected_pyr, float* data_cost_selected, size_t msg_step,
int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, hipStream_t stream);
///////////////////////////////////////////////////////////////
////////////////////// compute data cost //////////////////////
///////////////////////////////////////////////////////////////
template <typename T, int channels>
__global__ void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* selected_disp_pyr, T* data_cost_, int h, int w, int level, int nr_plane, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < h && x < w)
{
int y0 = y << level;
int yt = (y + 1) << level;
int x0 = x << level;
int xt = (x + 1) << level;
const T* selected_disparity = selected_disp_pyr + y/2 * msg_step + x/2;
T* data_cost = data_cost_ + y * msg_step + x;
for(int d = 0; d < nr_plane; d++)
{
float val = 0.0f;
for(int yi = y0; yi < yt; yi++)
{
for(int xi = x0; xi < xt; xi++)
{
int sel_disp = selected_disparity[d * disp_step2];
int xr = xi - sel_disp;
if (xr < 0 || sel_disp < min_disp)
val += data_weight * max_data_term;
else
{
const uchar* left_x = cleft + yi * cimg_step + xi * channels;
const uchar* right_x = cright + yi * cimg_step + xr * channels;
val += data_weight * pixeldiff<channels>(left_x, right_x, max_data_term);
}
}
}
data_cost[disp_step1 * d] = saturate_cast<T>(val);
}
}
}
template <typename T, int winsz, int channels>
__global__ void compute_data_cost_reduce(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* selected_disp_pyr, T* data_cost_, int level, int rows, int cols, int h, int nr_plane, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2)
{
int x_out = blockIdx.x;
int y_out = blockIdx.y % h;
int d = (blockIdx.y / h) * blockDim.z + threadIdx.z;
int tid = threadIdx.x;
const T* selected_disparity = selected_disp_pyr + y_out/2 * msg_step + x_out/2;
T* data_cost = data_cost_ + y_out * msg_step + x_out;
if (d < nr_plane)
{
int sel_disp = selected_disparity[d * disp_step2];
int x0 = x_out << level;
int y0 = y_out << level;
int len = ::min(y0 + winsz, rows) - y0;
float val = 0.0f;
if (x0 + tid < cols)
{
if (x0 + tid - sel_disp < 0 || sel_disp < min_disp)
val = data_weight * max_data_term * len;
else
{
const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid );
const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - sel_disp);
for(int y = 0; y < len; ++y)
{
val += data_weight * pixeldiff<channels>(lle, lri, max_data_term);
lle += cimg_step;
lri += cimg_step;
}
}
}
extern __shared__ float smem[];
reduce<winsz>(smem + winsz * threadIdx.z, val, tid, plus<float>());
if (tid == 0)
data_cost[disp_step1 * d] = saturate_cast<T>(val);
}
}
template <typename T>
void compute_data_cost_caller_(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int /*rows*/, int /*cols*/,
int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(w, threads.x);
grid.y = divUp(h, threads.y);
switch(channels)
{
case 1:hipLaunchKernelGGL(( compute_data_cost<T, 1>), dim3(grid), dim3(threads), 0, stream, cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break;
case 3:hipLaunchKernelGGL(( compute_data_cost<T, 3>), dim3(grid), dim3(threads), 0, stream, cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break;
case 4:hipLaunchKernelGGL(( compute_data_cost<T, 4>), dim3(grid), dim3(threads), 0, stream, cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break;
default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count");
}
}
template <typename T, int winsz>
void compute_data_cost_reduce_caller_(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int rows, int cols,
int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, hipStream_t stream)
{
const int threadsNum = 256;
const size_t smem_size = threadsNum * sizeof(float);
dim3 threads(winsz, 1, threadsNum / winsz);
dim3 grid(w, h, 1);
grid.y *= divUp(nr_plane, threads.z);
switch (channels)
{
case 1:hipLaunchKernelGGL(( compute_data_cost_reduce<T, winsz, 1>), dim3(grid), dim3(threads), smem_size, stream, cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break;
case 3:hipLaunchKernelGGL(( compute_data_cost_reduce<T, winsz, 3>), dim3(grid), dim3(threads), smem_size, stream, cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break;
case 4:hipLaunchKernelGGL(( compute_data_cost_reduce<T, winsz, 4>), dim3(grid), dim3(threads), smem_size, stream, cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break;
default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count");
}
}
template<class T>
void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, size_t msg_step,
int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term,
int min_disp, hipStream_t stream)
{
typedef void (*ComputeDataCostCaller)(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int rows, int cols,
int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, hipStream_t stream);
static const ComputeDataCostCaller callers[] =
{
compute_data_cost_caller_<T>, compute_data_cost_caller_<T>, compute_data_cost_reduce_caller_<T, 4>,
compute_data_cost_reduce_caller_<T, 8>, compute_data_cost_reduce_caller_<T, 16>, compute_data_cost_reduce_caller_<T, 32>,
compute_data_cost_reduce_caller_<T, 64>, compute_data_cost_reduce_caller_<T, 128>, compute_data_cost_reduce_caller_<T, 256>
};
size_t disp_step1 = msg_step * h;
size_t disp_step2 = msg_step * h2;
callers[level](cleft, cright, cimg_step, disp_selected_pyr, data_cost, rows, cols, h, w, level, nr_plane, channels, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2, stream);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const short* disp_selected_pyr, short* data_cost, size_t msg_step,
int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, hipStream_t stream);
template void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const float* disp_selected_pyr, float* data_cost, size_t msg_step,
int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, hipStream_t stream);
///////////////////////////////////////////////////////////////
//////////////////////// init message /////////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__device__ void get_first_k_element_increase(T* u_new, T* d_new, T* l_new, T* r_new,
const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur,
T* data_cost_selected, T* disparity_selected_new, T* data_cost_new,
const T* data_cost_cur, const T* disparity_selected_cur,
int nr_plane, int nr_plane2, size_t disp_step1, size_t disp_step2)
{
for(int i = 0; i < nr_plane; i++)
{
T minimum = numeric_limits<T>::max();
int id = 0;
for(int j = 0; j < nr_plane2; j++)
{
T cur = data_cost_new[j * disp_step1];
if(cur < minimum)
{
minimum = cur;
id = j;
}
}
data_cost_selected[i * disp_step1] = data_cost_cur[id * disp_step1];
disparity_selected_new[i * disp_step1] = disparity_selected_cur[id * disp_step2];
u_new[i * disp_step1] = u_cur[id * disp_step2];
d_new[i * disp_step1] = d_cur[id * disp_step2];
l_new[i * disp_step1] = l_cur[id * disp_step2];
r_new[i * disp_step1] = r_cur[id * disp_step2];
data_cost_new[id * disp_step1] = numeric_limits<T>::max();
}
}
template <typename T>
__global__ void init_message(uchar *ctemp, T* u_new_, T* d_new_, T* l_new_, T* r_new_,
const T* u_cur_, const T* d_cur_, const T* l_cur_, const T* r_cur_,
T* selected_disp_pyr_new, const T* selected_disp_pyr_cur,
T* data_cost_selected_, const T* data_cost_,
int h, int w, int nr_plane, int h2, int w2, int nr_plane2,
size_t msg_step, size_t disp_step1, size_t disp_step2)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < h && x < w)
{
const T* u_cur = u_cur_ + ::min(h2-1, y/2 + 1) * msg_step + x/2;
const T* d_cur = d_cur_ + ::max(0, y/2 - 1) * msg_step + x/2;
const T* l_cur = l_cur_ + (y/2) * msg_step + ::min(w2-1, x/2 + 1);
const T* r_cur = r_cur_ + (y/2) * msg_step + ::max(0, x/2 - 1);
T* data_cost_new = (T*)ctemp + y * msg_step + x;
const T* disparity_selected_cur = selected_disp_pyr_cur + y/2 * msg_step + x/2;
const T* data_cost = data_cost_ + y * msg_step + x;
for(int d = 0; d < nr_plane2; d++)
{
int idx2 = d * disp_step2;
T val = data_cost[d * disp_step1] + u_cur[idx2] + d_cur[idx2] + l_cur[idx2] + r_cur[idx2];
data_cost_new[d * disp_step1] = val;
}
T* data_cost_selected = data_cost_selected_ + y * msg_step + x;
T* disparity_selected_new = selected_disp_pyr_new + y * msg_step + x;
T* u_new = u_new_ + y * msg_step + x;
T* d_new = d_new_ + y * msg_step + x;
T* l_new = l_new_ + y * msg_step + x;
T* r_new = r_new_ + y * msg_step + x;
u_cur = u_cur_ + y/2 * msg_step + x/2;
d_cur = d_cur_ + y/2 * msg_step + x/2;
l_cur = l_cur_ + y/2 * msg_step + x/2;
r_cur = r_cur_ + y/2 * msg_step + x/2;
get_first_k_element_increase(u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur,
data_cost_selected, disparity_selected_new, data_cost_new,
data_cost, disparity_selected_cur, nr_plane, nr_plane2,
disp_step1, disp_step2);
}
}
template<class T>
void init_message(uchar *ctemp, T* u_new, T* d_new, T* l_new, T* r_new,
const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur,
T* selected_disp_pyr_new, const T* selected_disp_pyr_cur,
T* data_cost_selected, const T* data_cost, size_t msg_step,
int h, int w, int nr_plane, int h2, int w2, int nr_plane2, hipStream_t stream)
{
size_t disp_step1 = msg_step * h;
size_t disp_step2 = msg_step * h2;
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(w, threads.x);
grid.y = divUp(h, threads.y);
hipLaunchKernelGGL(( init_message), dim3(grid), dim3(threads), 0, stream, ctemp, u_new, d_new, l_new, r_new,
u_cur, d_cur, l_cur, r_cur,
selected_disp_pyr_new, selected_disp_pyr_cur,
data_cost_selected, data_cost,
h, w, nr_plane, h2, w2, nr_plane2,
msg_step, disp_step1, disp_step2);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void init_message(uchar *ctemp, short* u_new, short* d_new, short* l_new, short* r_new,
const short* u_cur, const short* d_cur, const short* l_cur, const short* r_cur,
short* selected_disp_pyr_new, const short* selected_disp_pyr_cur,
short* data_cost_selected, const short* data_cost, size_t msg_step,
int h, int w, int nr_plane, int h2, int w2, int nr_plane2, hipStream_t stream);
template void init_message(uchar *ctemp, float* u_new, float* d_new, float* l_new, float* r_new,
const float* u_cur, const float* d_cur, const float* l_cur, const float* r_cur,
float* selected_disp_pyr_new, const float* selected_disp_pyr_cur,
float* data_cost_selected, const float* data_cost, size_t msg_step,
int h, int w, int nr_plane, int h2, int w2, int nr_plane2, hipStream_t stream);
///////////////////////////////////////////////////////////////
//////////////////// calc all iterations /////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__device__ void message_per_pixel(const T* data, T* msg_dst, const T* msg1, const T* msg2, const T* msg3,
const T* dst_disp, const T* src_disp, int nr_plane, int max_disc_term, float disc_single_jump, volatile T* temp,
size_t disp_step)
{
T minimum = numeric_limits<T>::max();
for(int d = 0; d < nr_plane; d++)
{
int idx = d * disp_step;
T val = data[idx] + msg1[idx] + msg2[idx] + msg3[idx];
if(val < minimum)
minimum = val;
msg_dst[idx] = val;
}
float sum = 0;
for(int d = 0; d < nr_plane; d++)
{
float cost_min = minimum + max_disc_term;
T src_disp_reg = src_disp[d * disp_step];
for(int d2 = 0; d2 < nr_plane; d2++)
cost_min = fmin(cost_min, msg_dst[d2 * disp_step] + disc_single_jump * ::abs(dst_disp[d2 * disp_step] - src_disp_reg));
temp[d * disp_step] = saturate_cast<T>(cost_min);
sum += cost_min;
}
sum /= nr_plane;
for(int d = 0; d < nr_plane; d++)
msg_dst[d * disp_step] = saturate_cast<T>(temp[d * disp_step] - sum);
}
template <typename T>
__global__ void compute_message(uchar *ctemp, T* u_, T* d_, T* l_, T* r_, const T* data_cost_selected, const T* selected_disp_pyr_cur, int h, int w, int nr_plane, int i, int max_disc_term, float disc_single_jump, size_t msg_step, size_t disp_step)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + i) & 1);
if (y > 0 && y < h - 1 && x > 0 && x < w - 1)
{
const T* data = data_cost_selected + y * msg_step + x;
T* u = u_ + y * msg_step + x;
T* d = d_ + y * msg_step + x;
T* l = l_ + y * msg_step + x;
T* r = r_ + y * msg_step + x;
const T* disp = selected_disp_pyr_cur + y * msg_step + x;
T* temp = (T*)ctemp + y * msg_step + x;
message_per_pixel(data, u, r - 1, u + msg_step, l + 1, disp, disp - msg_step, nr_plane, max_disc_term, disc_single_jump, temp, disp_step);
message_per_pixel(data, d, d - msg_step, r - 1, l + 1, disp, disp + msg_step, nr_plane, max_disc_term, disc_single_jump, temp, disp_step);
message_per_pixel(data, l, u + msg_step, d - msg_step, l + 1, disp, disp - 1, nr_plane, max_disc_term, disc_single_jump, temp, disp_step);
message_per_pixel(data, r, u + msg_step, d - msg_step, r - 1, disp, disp + 1, nr_plane, max_disc_term, disc_single_jump, temp, disp_step);
}
}
template<class T>
void calc_all_iterations(uchar *ctemp, T* u, T* d, T* l, T* r, const T* data_cost_selected,
const T* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, hipStream_t stream)
{
size_t disp_step = msg_step * h;
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(w, threads.x << 1);
grid.y = divUp(h, threads.y);
for(int t = 0; t < iters; ++t)
{
hipLaunchKernelGGL(( compute_message), dim3(grid), dim3(threads), 0, stream, ctemp, u, d, l, r, data_cost_selected, selected_disp_pyr_cur, h, w, nr_plane, t & 1, max_disc_term, disc_single_jump, msg_step, disp_step);
cudaSafeCall( hipGetLastError() );
}
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
};
template void calc_all_iterations(uchar *ctemp, short* u, short* d, short* l, short* r, const short* data_cost_selected, const short* selected_disp_pyr_cur, size_t msg_step,
int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, hipStream_t stream);
template void calc_all_iterations(uchar *ctemp, float* u, float* d, float* l, float* r, const float* data_cost_selected, const float* selected_disp_pyr_cur, size_t msg_step,
int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, hipStream_t stream);
///////////////////////////////////////////////////////////////
/////////////////////////// output ////////////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__global__ void compute_disp(const T* u_, const T* d_, const T* l_, const T* r_,
const T* data_cost_selected, const T* disp_selected_pyr,
PtrStepSz<short> disp, int nr_plane, size_t msg_step, size_t disp_step)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y > 0 && y < disp.rows - 1 && x > 0 && x < disp.cols - 1)
{
const T* data = data_cost_selected + y * msg_step + x;
const T* disp_selected = disp_selected_pyr + y * msg_step + x;
const T* u = u_ + (y+1) * msg_step + (x+0);
const T* d = d_ + (y-1) * msg_step + (x+0);
const T* l = l_ + (y+0) * msg_step + (x+1);
const T* r = r_ + (y+0) * msg_step + (x-1);
int best = 0;
T best_val = numeric_limits<T>::max();
for (int i = 0; i < nr_plane; ++i)
{
int idx = i * disp_step;
T val = data[idx]+ u[idx] + d[idx] + l[idx] + r[idx];
if (val < best_val)
{
best_val = val;
best = saturate_cast<short>(disp_selected[idx]);
}
}
disp(y, x) = best;
}
}
template<class T>
void compute_disp(const T* u, const T* d, const T* l, const T* r, const T* data_cost_selected, const T* disp_selected, size_t msg_step,
const PtrStepSz<short>& disp, int nr_plane, hipStream_t stream)
{
size_t disp_step = disp.rows * msg_step;
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(disp.cols, threads.x);
grid.y = divUp(disp.rows, threads.y);
hipLaunchKernelGGL(( compute_disp), dim3(grid), dim3(threads), 0, stream, u, d, l, r, data_cost_selected, disp_selected, disp, nr_plane, msg_step, disp_step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void compute_disp(const short* u, const short* d, const short* l, const short* r, const short* data_cost_selected, const short* disp_selected, size_t msg_step,
const PtrStepSz<short>& disp, int nr_plane, hipStream_t stream);
template void compute_disp(const float* u, const float* d, const float* l, const float* r, const float* data_cost_selected, const float* disp_selected, size_t msg_step,
const PtrStepSz<short>& disp, int nr_plane, hipStream_t stream);
} // namespace stereocsbp
}}} // namespace cv { namespace cuda { namespace cudev {
#endif /* CUDA_DISABLER */
| dfabe58756b8b5d6b0d6e1206106428a8234806f.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "cuda/stereocsbp.hpp"
namespace cv { namespace cuda { namespace device
{
namespace stereocsbp
{
///////////////////////////////////////////////////////////////
/////////////////////// init data cost ////////////////////////
///////////////////////////////////////////////////////////////
template <int channels> static float __device__ pixeldiff(const uchar* left, const uchar* right, float max_data_term);
template<> __device__ __forceinline__ float pixeldiff<1>(const uchar* left, const uchar* right, float max_data_term)
{
return fminf( ::abs((int)*left - *right), max_data_term);
}
template<> __device__ __forceinline__ float pixeldiff<3>(const uchar* left, const uchar* right, float max_data_term)
{
float tb = 0.114f * ::abs((int)left[0] - right[0]);
float tg = 0.587f * ::abs((int)left[1] - right[1]);
float tr = 0.299f * ::abs((int)left[2] - right[2]);
return fminf(tr + tg + tb, max_data_term);
}
template<> __device__ __forceinline__ float pixeldiff<4>(const uchar* left, const uchar* right, float max_data_term)
{
uchar4 l = *((const uchar4*)left);
uchar4 r = *((const uchar4*)right);
float tb = 0.114f * ::abs((int)l.x - r.x);
float tg = 0.587f * ::abs((int)l.y - r.y);
float tr = 0.299f * ::abs((int)l.z - r.z);
return fminf(tr + tg + tb, max_data_term);
}
template <typename T>
__global__ void get_first_k_initial_global(uchar *ctemp, T* data_cost_selected_, T *selected_disp_pyr, int h, int w, int nr_plane, int ndisp,
size_t msg_step, size_t disp_step)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < h && x < w)
{
T* selected_disparity = selected_disp_pyr + y * msg_step + x;
T* data_cost_selected = data_cost_selected_ + y * msg_step + x;
T* data_cost = (T*)ctemp + y * msg_step + x;
for(int i = 0; i < nr_plane; i++)
{
T minimum = device::numeric_limits<T>::max();
int id = 0;
for(int d = 0; d < ndisp; d++)
{
T cur = data_cost[d * disp_step];
if(cur < minimum)
{
minimum = cur;
id = d;
}
}
data_cost_selected[i * disp_step] = minimum;
selected_disparity[i * disp_step] = id;
data_cost [id * disp_step] = numeric_limits<T>::max();
}
}
}
template <typename T>
__global__ void get_first_k_initial_local(uchar *ctemp, T* data_cost_selected_, T* selected_disp_pyr, int h, int w, int nr_plane, int ndisp,
size_t msg_step, size_t disp_step)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < h && x < w)
{
T* selected_disparity = selected_disp_pyr + y * msg_step + x;
T* data_cost_selected = data_cost_selected_ + y * msg_step + x;
T* data_cost = (T*)ctemp + y * msg_step + x;
int nr_local_minimum = 0;
T prev = data_cost[0 * disp_step];
T cur = data_cost[1 * disp_step];
T next = data_cost[2 * disp_step];
for (int d = 1; d < ndisp - 1 && nr_local_minimum < nr_plane; d++)
{
if (cur < prev && cur < next)
{
data_cost_selected[nr_local_minimum * disp_step] = cur;
selected_disparity[nr_local_minimum * disp_step] = d;
data_cost[d * disp_step] = numeric_limits<T>::max();
nr_local_minimum++;
}
prev = cur;
cur = next;
next = data_cost[(d + 1) * disp_step];
}
for (int i = nr_local_minimum; i < nr_plane; i++)
{
T minimum = numeric_limits<T>::max();
int id = 0;
for (int d = 0; d < ndisp; d++)
{
cur = data_cost[d * disp_step];
if (cur < minimum)
{
minimum = cur;
id = d;
}
}
data_cost_selected[i * disp_step] = minimum;
selected_disparity[i * disp_step] = id;
data_cost[id * disp_step] = numeric_limits<T>::max();
}
}
}
template <typename T, int channels>
__global__ void init_data_cost(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step,
int h, int w, int level, int ndisp, float data_weight, float max_data_term,
int min_disp, size_t msg_step, size_t disp_step)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < h && x < w)
{
int y0 = y << level;
int yt = (y + 1) << level;
int x0 = x << level;
int xt = (x + 1) << level;
T* data_cost = (T*)ctemp + y * msg_step + x;
for(int d = 0; d < ndisp; ++d)
{
float val = 0.0f;
for(int yi = y0; yi < yt; yi++)
{
for(int xi = x0; xi < xt; xi++)
{
int xr = xi - d;
if(d < min_disp || xr < 0)
val += data_weight * max_data_term;
else
{
const uchar* lle = cleft + yi * cimg_step + xi * channels;
const uchar* lri = cright + yi * cimg_step + xr * channels;
val += data_weight * pixeldiff<channels>(lle, lri, max_data_term);
}
}
}
data_cost[disp_step * d] = saturate_cast<T>(val);
}
}
}
template <typename T, int winsz, int channels>
__global__ void init_data_cost_reduce(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step,
int level, int rows, int cols, int h, int ndisp, float data_weight, float max_data_term,
int min_disp, size_t msg_step, size_t disp_step)
{
int x_out = blockIdx.x;
int y_out = blockIdx.y % h;
int d = (blockIdx.y / h) * blockDim.z + threadIdx.z;
int tid = threadIdx.x;
if (d < ndisp)
{
int x0 = x_out << level;
int y0 = y_out << level;
int len = ::min(y0 + winsz, rows) - y0;
float val = 0.0f;
if (x0 + tid < cols)
{
if (x0 + tid - d < 0 || d < min_disp)
val = data_weight * max_data_term * len;
else
{
const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid );
const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - d);
for(int y = 0; y < len; ++y)
{
val += data_weight * pixeldiff<channels>(lle, lri, max_data_term);
lle += cimg_step;
lri += cimg_step;
}
}
}
extern __shared__ float smem[];
reduce<winsz>(smem + winsz * threadIdx.z, val, tid, plus<float>());
T* data_cost = (T*)ctemp + y_out * msg_step + x_out;
if (tid == 0)
data_cost[disp_step * d] = saturate_cast<T>(val);
}
}
template <typename T>
void init_data_cost_caller_(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int /*rows*/, int /*cols*/, int h, int w, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(w, threads.x);
grid.y = divUp(h, threads.y);
switch (channels)
{
case 1: init_data_cost<T, 1><<<grid, threads, 0, stream>>>(cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break;
case 3: init_data_cost<T, 3><<<grid, threads, 0, stream>>>(cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break;
case 4: init_data_cost<T, 4><<<grid, threads, 0, stream>>>(cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break;
default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count");
}
}
template <typename T, int winsz>
void init_data_cost_reduce_caller_(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, int h, int w, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, cudaStream_t stream)
{
const int threadsNum = 256;
const size_t smem_size = threadsNum * sizeof(float);
dim3 threads(winsz, 1, threadsNum / winsz);
dim3 grid(w, h, 1);
grid.y *= divUp(ndisp, threads.z);
switch (channels)
{
case 1: init_data_cost_reduce<T, winsz, 1><<<grid, threads, smem_size, stream>>>(cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break;
case 3: init_data_cost_reduce<T, winsz, 3><<<grid, threads, smem_size, stream>>>(cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break;
case 4: init_data_cost_reduce<T, winsz, 4><<<grid, threads, smem_size, stream>>>(cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break;
default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count");
}
}
template<class T>
void init_data_cost(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step,
int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, cudaStream_t stream)
{
typedef void (*InitDataCostCaller)(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int cols, int rows, int w, int h, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, cudaStream_t stream);
static const InitDataCostCaller init_data_cost_callers[] =
{
init_data_cost_caller_<T>, init_data_cost_caller_<T>, init_data_cost_reduce_caller_<T, 4>,
init_data_cost_reduce_caller_<T, 8>, init_data_cost_reduce_caller_<T, 16>, init_data_cost_reduce_caller_<T, 32>,
init_data_cost_reduce_caller_<T, 64>, init_data_cost_reduce_caller_<T, 128>, init_data_cost_reduce_caller_<T, 256>
};
size_t disp_step = msg_step * h;
init_data_cost_callers[level](cleft, cright, ctemp, cimg_step, rows, cols, h, w, level, ndisp, channels, data_weight, max_data_term, min_disp, msg_step, disp_step, stream);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(w, threads.x);
grid.y = divUp(h, threads.y);
if (use_local_init_data_cost == true)
get_first_k_initial_local<<<grid, threads, 0, stream>>> (ctemp, data_cost_selected, disp_selected_pyr, h, w, nr_plane, ndisp, msg_step, disp_step);
else
get_first_k_initial_global<<<grid, threads, 0, stream>>>(ctemp, data_cost_selected, disp_selected_pyr, h, w, nr_plane, ndisp, msg_step, disp_step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void init_data_cost<short>(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, short* disp_selected_pyr, short* data_cost_selected, size_t msg_step,
int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, cudaStream_t stream);
template void init_data_cost<float>(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, float* disp_selected_pyr, float* data_cost_selected, size_t msg_step,
int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, cudaStream_t stream);
///////////////////////////////////////////////////////////////
////////////////////// compute data cost //////////////////////
///////////////////////////////////////////////////////////////
template <typename T, int channels>
__global__ void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* selected_disp_pyr, T* data_cost_, int h, int w, int level, int nr_plane, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < h && x < w)
{
int y0 = y << level;
int yt = (y + 1) << level;
int x0 = x << level;
int xt = (x + 1) << level;
const T* selected_disparity = selected_disp_pyr + y/2 * msg_step + x/2;
T* data_cost = data_cost_ + y * msg_step + x;
for(int d = 0; d < nr_plane; d++)
{
float val = 0.0f;
for(int yi = y0; yi < yt; yi++)
{
for(int xi = x0; xi < xt; xi++)
{
int sel_disp = selected_disparity[d * disp_step2];
int xr = xi - sel_disp;
if (xr < 0 || sel_disp < min_disp)
val += data_weight * max_data_term;
else
{
const uchar* left_x = cleft + yi * cimg_step + xi * channels;
const uchar* right_x = cright + yi * cimg_step + xr * channels;
val += data_weight * pixeldiff<channels>(left_x, right_x, max_data_term);
}
}
}
data_cost[disp_step1 * d] = saturate_cast<T>(val);
}
}
}
template <typename T, int winsz, int channels>
__global__ void compute_data_cost_reduce(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* selected_disp_pyr, T* data_cost_, int level, int rows, int cols, int h, int nr_plane, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2)
{
int x_out = blockIdx.x;
int y_out = blockIdx.y % h;
int d = (blockIdx.y / h) * blockDim.z + threadIdx.z;
int tid = threadIdx.x;
const T* selected_disparity = selected_disp_pyr + y_out/2 * msg_step + x_out/2;
T* data_cost = data_cost_ + y_out * msg_step + x_out;
if (d < nr_plane)
{
int sel_disp = selected_disparity[d * disp_step2];
int x0 = x_out << level;
int y0 = y_out << level;
int len = ::min(y0 + winsz, rows) - y0;
float val = 0.0f;
if (x0 + tid < cols)
{
if (x0 + tid - sel_disp < 0 || sel_disp < min_disp)
val = data_weight * max_data_term * len;
else
{
const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid );
const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - sel_disp);
for(int y = 0; y < len; ++y)
{
val += data_weight * pixeldiff<channels>(lle, lri, max_data_term);
lle += cimg_step;
lri += cimg_step;
}
}
}
extern __shared__ float smem[];
reduce<winsz>(smem + winsz * threadIdx.z, val, tid, plus<float>());
if (tid == 0)
data_cost[disp_step1 * d] = saturate_cast<T>(val);
}
}
template <typename T>
void compute_data_cost_caller_(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int /*rows*/, int /*cols*/,
int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(w, threads.x);
grid.y = divUp(h, threads.y);
switch(channels)
{
case 1: compute_data_cost<T, 1><<<grid, threads, 0, stream>>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break;
case 3: compute_data_cost<T, 3><<<grid, threads, 0, stream>>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break;
case 4: compute_data_cost<T, 4><<<grid, threads, 0, stream>>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break;
default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count");
}
}
template <typename T, int winsz>
void compute_data_cost_reduce_caller_(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int rows, int cols,
int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, cudaStream_t stream)
{
const int threadsNum = 256;
const size_t smem_size = threadsNum * sizeof(float);
dim3 threads(winsz, 1, threadsNum / winsz);
dim3 grid(w, h, 1);
grid.y *= divUp(nr_plane, threads.z);
switch (channels)
{
case 1: compute_data_cost_reduce<T, winsz, 1><<<grid, threads, smem_size, stream>>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break;
case 3: compute_data_cost_reduce<T, winsz, 3><<<grid, threads, smem_size, stream>>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break;
case 4: compute_data_cost_reduce<T, winsz, 4><<<grid, threads, smem_size, stream>>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break;
default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count");
}
}
template<class T>
void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, size_t msg_step,
int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term,
int min_disp, cudaStream_t stream)
{
typedef void (*ComputeDataCostCaller)(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int rows, int cols,
int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, cudaStream_t stream);
static const ComputeDataCostCaller callers[] =
{
compute_data_cost_caller_<T>, compute_data_cost_caller_<T>, compute_data_cost_reduce_caller_<T, 4>,
compute_data_cost_reduce_caller_<T, 8>, compute_data_cost_reduce_caller_<T, 16>, compute_data_cost_reduce_caller_<T, 32>,
compute_data_cost_reduce_caller_<T, 64>, compute_data_cost_reduce_caller_<T, 128>, compute_data_cost_reduce_caller_<T, 256>
};
size_t disp_step1 = msg_step * h;
size_t disp_step2 = msg_step * h2;
callers[level](cleft, cright, cimg_step, disp_selected_pyr, data_cost, rows, cols, h, w, level, nr_plane, channels, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2, stream);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const short* disp_selected_pyr, short* data_cost, size_t msg_step,
int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, cudaStream_t stream);
template void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const float* disp_selected_pyr, float* data_cost, size_t msg_step,
int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, cudaStream_t stream);
///////////////////////////////////////////////////////////////
//////////////////////// init message /////////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__device__ void get_first_k_element_increase(T* u_new, T* d_new, T* l_new, T* r_new,
const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur,
T* data_cost_selected, T* disparity_selected_new, T* data_cost_new,
const T* data_cost_cur, const T* disparity_selected_cur,
int nr_plane, int nr_plane2, size_t disp_step1, size_t disp_step2)
{
for(int i = 0; i < nr_plane; i++)
{
T minimum = numeric_limits<T>::max();
int id = 0;
for(int j = 0; j < nr_plane2; j++)
{
T cur = data_cost_new[j * disp_step1];
if(cur < minimum)
{
minimum = cur;
id = j;
}
}
data_cost_selected[i * disp_step1] = data_cost_cur[id * disp_step1];
disparity_selected_new[i * disp_step1] = disparity_selected_cur[id * disp_step2];
u_new[i * disp_step1] = u_cur[id * disp_step2];
d_new[i * disp_step1] = d_cur[id * disp_step2];
l_new[i * disp_step1] = l_cur[id * disp_step2];
r_new[i * disp_step1] = r_cur[id * disp_step2];
data_cost_new[id * disp_step1] = numeric_limits<T>::max();
}
}
template <typename T>
__global__ void init_message(uchar *ctemp, T* u_new_, T* d_new_, T* l_new_, T* r_new_,
const T* u_cur_, const T* d_cur_, const T* l_cur_, const T* r_cur_,
T* selected_disp_pyr_new, const T* selected_disp_pyr_cur,
T* data_cost_selected_, const T* data_cost_,
int h, int w, int nr_plane, int h2, int w2, int nr_plane2,
size_t msg_step, size_t disp_step1, size_t disp_step2)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < h && x < w)
{
const T* u_cur = u_cur_ + ::min(h2-1, y/2 + 1) * msg_step + x/2;
const T* d_cur = d_cur_ + ::max(0, y/2 - 1) * msg_step + x/2;
const T* l_cur = l_cur_ + (y/2) * msg_step + ::min(w2-1, x/2 + 1);
const T* r_cur = r_cur_ + (y/2) * msg_step + ::max(0, x/2 - 1);
T* data_cost_new = (T*)ctemp + y * msg_step + x;
const T* disparity_selected_cur = selected_disp_pyr_cur + y/2 * msg_step + x/2;
const T* data_cost = data_cost_ + y * msg_step + x;
for(int d = 0; d < nr_plane2; d++)
{
int idx2 = d * disp_step2;
T val = data_cost[d * disp_step1] + u_cur[idx2] + d_cur[idx2] + l_cur[idx2] + r_cur[idx2];
data_cost_new[d * disp_step1] = val;
}
T* data_cost_selected = data_cost_selected_ + y * msg_step + x;
T* disparity_selected_new = selected_disp_pyr_new + y * msg_step + x;
T* u_new = u_new_ + y * msg_step + x;
T* d_new = d_new_ + y * msg_step + x;
T* l_new = l_new_ + y * msg_step + x;
T* r_new = r_new_ + y * msg_step + x;
u_cur = u_cur_ + y/2 * msg_step + x/2;
d_cur = d_cur_ + y/2 * msg_step + x/2;
l_cur = l_cur_ + y/2 * msg_step + x/2;
r_cur = r_cur_ + y/2 * msg_step + x/2;
get_first_k_element_increase(u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur,
data_cost_selected, disparity_selected_new, data_cost_new,
data_cost, disparity_selected_cur, nr_plane, nr_plane2,
disp_step1, disp_step2);
}
}
template<class T>
void init_message(uchar *ctemp, T* u_new, T* d_new, T* l_new, T* r_new,
const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur,
T* selected_disp_pyr_new, const T* selected_disp_pyr_cur,
T* data_cost_selected, const T* data_cost, size_t msg_step,
int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream)
{
size_t disp_step1 = msg_step * h;
size_t disp_step2 = msg_step * h2;
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(w, threads.x);
grid.y = divUp(h, threads.y);
init_message<<<grid, threads, 0, stream>>>(ctemp, u_new, d_new, l_new, r_new,
u_cur, d_cur, l_cur, r_cur,
selected_disp_pyr_new, selected_disp_pyr_cur,
data_cost_selected, data_cost,
h, w, nr_plane, h2, w2, nr_plane2,
msg_step, disp_step1, disp_step2);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void init_message(uchar *ctemp, short* u_new, short* d_new, short* l_new, short* r_new,
const short* u_cur, const short* d_cur, const short* l_cur, const short* r_cur,
short* selected_disp_pyr_new, const short* selected_disp_pyr_cur,
short* data_cost_selected, const short* data_cost, size_t msg_step,
int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream);
template void init_message(uchar *ctemp, float* u_new, float* d_new, float* l_new, float* r_new,
const float* u_cur, const float* d_cur, const float* l_cur, const float* r_cur,
float* selected_disp_pyr_new, const float* selected_disp_pyr_cur,
float* data_cost_selected, const float* data_cost, size_t msg_step,
int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream);
///////////////////////////////////////////////////////////////
//////////////////// calc all iterations /////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__device__ void message_per_pixel(const T* data, T* msg_dst, const T* msg1, const T* msg2, const T* msg3,
const T* dst_disp, const T* src_disp, int nr_plane, int max_disc_term, float disc_single_jump, volatile T* temp,
size_t disp_step)
{
T minimum = numeric_limits<T>::max();
for(int d = 0; d < nr_plane; d++)
{
int idx = d * disp_step;
T val = data[idx] + msg1[idx] + msg2[idx] + msg3[idx];
if(val < minimum)
minimum = val;
msg_dst[idx] = val;
}
float sum = 0;
for(int d = 0; d < nr_plane; d++)
{
float cost_min = minimum + max_disc_term;
T src_disp_reg = src_disp[d * disp_step];
for(int d2 = 0; d2 < nr_plane; d2++)
cost_min = fmin(cost_min, msg_dst[d2 * disp_step] + disc_single_jump * ::abs(dst_disp[d2 * disp_step] - src_disp_reg));
temp[d * disp_step] = saturate_cast<T>(cost_min);
sum += cost_min;
}
sum /= nr_plane;
for(int d = 0; d < nr_plane; d++)
msg_dst[d * disp_step] = saturate_cast<T>(temp[d * disp_step] - sum);
}
template <typename T>
__global__ void compute_message(uchar *ctemp, T* u_, T* d_, T* l_, T* r_, const T* data_cost_selected, const T* selected_disp_pyr_cur, int h, int w, int nr_plane, int i, int max_disc_term, float disc_single_jump, size_t msg_step, size_t disp_step)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + i) & 1);
if (y > 0 && y < h - 1 && x > 0 && x < w - 1)
{
const T* data = data_cost_selected + y * msg_step + x;
T* u = u_ + y * msg_step + x;
T* d = d_ + y * msg_step + x;
T* l = l_ + y * msg_step + x;
T* r = r_ + y * msg_step + x;
const T* disp = selected_disp_pyr_cur + y * msg_step + x;
T* temp = (T*)ctemp + y * msg_step + x;
message_per_pixel(data, u, r - 1, u + msg_step, l + 1, disp, disp - msg_step, nr_plane, max_disc_term, disc_single_jump, temp, disp_step);
message_per_pixel(data, d, d - msg_step, r - 1, l + 1, disp, disp + msg_step, nr_plane, max_disc_term, disc_single_jump, temp, disp_step);
message_per_pixel(data, l, u + msg_step, d - msg_step, l + 1, disp, disp - 1, nr_plane, max_disc_term, disc_single_jump, temp, disp_step);
message_per_pixel(data, r, u + msg_step, d - msg_step, r - 1, disp, disp + 1, nr_plane, max_disc_term, disc_single_jump, temp, disp_step);
}
}
template<class T>
void calc_all_iterations(uchar *ctemp, T* u, T* d, T* l, T* r, const T* data_cost_selected,
const T* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, cudaStream_t stream)
{
size_t disp_step = msg_step * h;
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(w, threads.x << 1);
grid.y = divUp(h, threads.y);
for(int t = 0; t < iters; ++t)
{
compute_message<<<grid, threads, 0, stream>>>(ctemp, u, d, l, r, data_cost_selected, selected_disp_pyr_cur, h, w, nr_plane, t & 1, max_disc_term, disc_single_jump, msg_step, disp_step);
cudaSafeCall( cudaGetLastError() );
}
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
};
template void calc_all_iterations(uchar *ctemp, short* u, short* d, short* l, short* r, const short* data_cost_selected, const short* selected_disp_pyr_cur, size_t msg_step,
int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, cudaStream_t stream);
template void calc_all_iterations(uchar *ctemp, float* u, float* d, float* l, float* r, const float* data_cost_selected, const float* selected_disp_pyr_cur, size_t msg_step,
int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, cudaStream_t stream);
///////////////////////////////////////////////////////////////
/////////////////////////// output ////////////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__global__ void compute_disp(const T* u_, const T* d_, const T* l_, const T* r_,
const T* data_cost_selected, const T* disp_selected_pyr,
PtrStepSz<short> disp, int nr_plane, size_t msg_step, size_t disp_step)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y > 0 && y < disp.rows - 1 && x > 0 && x < disp.cols - 1)
{
const T* data = data_cost_selected + y * msg_step + x;
const T* disp_selected = disp_selected_pyr + y * msg_step + x;
const T* u = u_ + (y+1) * msg_step + (x+0);
const T* d = d_ + (y-1) * msg_step + (x+0);
const T* l = l_ + (y+0) * msg_step + (x+1);
const T* r = r_ + (y+0) * msg_step + (x-1);
int best = 0;
T best_val = numeric_limits<T>::max();
for (int i = 0; i < nr_plane; ++i)
{
int idx = i * disp_step;
T val = data[idx]+ u[idx] + d[idx] + l[idx] + r[idx];
if (val < best_val)
{
best_val = val;
best = saturate_cast<short>(disp_selected[idx]);
}
}
disp(y, x) = best;
}
}
template<class T>
void compute_disp(const T* u, const T* d, const T* l, const T* r, const T* data_cost_selected, const T* disp_selected, size_t msg_step,
const PtrStepSz<short>& disp, int nr_plane, cudaStream_t stream)
{
size_t disp_step = disp.rows * msg_step;
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(disp.cols, threads.x);
grid.y = divUp(disp.rows, threads.y);
compute_disp<<<grid, threads, 0, stream>>>(u, d, l, r, data_cost_selected, disp_selected, disp, nr_plane, msg_step, disp_step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void compute_disp(const short* u, const short* d, const short* l, const short* r, const short* data_cost_selected, const short* disp_selected, size_t msg_step,
const PtrStepSz<short>& disp, int nr_plane, cudaStream_t stream);
template void compute_disp(const float* u, const float* d, const float* l, const float* r, const float* data_cost_selected, const float* disp_selected, size_t msg_step,
const PtrStepSz<short>& disp, int nr_plane, cudaStream_t stream);
} // namespace stereocsbp
}}} // namespace cv { namespace cuda { namespace cudev {
#endif /* CUDA_DISABLER */
|
41eaca838bd48b21905c8be67cc11c45efbe6913.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS _THREADS_
__global__ void box(const int n,
float *rnd,
float *xy,
const float *s,
const float *mid,
const int grains) {
const int i = blockIdx.x*THREADS + threadIdx.x;
if (i >= n) {
return;
}
const int ii = 2*i;
const int k = 2*(int)floor((float)i/(float)grains);
xy[ii] = (1.0 - 2.0*rnd[ii]) * s[0] + mid[k];
xy[ii+1] = (1.0 - 2.0*rnd[ii+1]) * s[1] + mid[k+1];
}
| 41eaca838bd48b21905c8be67cc11c45efbe6913.cu | #define THREADS _THREADS_
__global__ void box(const int n,
float *rnd,
float *xy,
const float *s,
const float *mid,
const int grains) {
const int i = blockIdx.x*THREADS + threadIdx.x;
if (i >= n) {
return;
}
const int ii = 2*i;
const int k = 2*(int)floor((float)i/(float)grains);
xy[ii] = (1.0 - 2.0*rnd[ii]) * s[0] + mid[k];
xy[ii+1] = (1.0 - 2.0*rnd[ii+1]) * s[1] + mid[k+1];
}
|
7d5fd72534796d7e70ba9b195dc1307935941788.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "multiplyKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Z = NULL;
hipMalloc(&Z, XSIZE*YSIZE);
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
multiplyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, Z,A,B,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
multiplyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, Z,A,B,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
multiplyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, Z,A,B,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7d5fd72534796d7e70ba9b195dc1307935941788.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "multiplyKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Z = NULL;
cudaMalloc(&Z, XSIZE*YSIZE);
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
multiplyKernel<<<gridBlock,threadBlock>>>(Z,A,B,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
multiplyKernel<<<gridBlock,threadBlock>>>(Z,A,B,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
multiplyKernel<<<gridBlock,threadBlock>>>(Z,A,B,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
17ce34755a93790ce80327ecaccdb7d82c77b5df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inference/unique_op/unique_op.hpp>
// Overload CUDA atomic for other 64bit unsinged/signed integer type
__forceinline__
__device__ long atomicAdd(long* address, long val)
{
return (long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
__forceinline__
__device__ long long atomicAdd(long long* address, long long val)
{
return (long long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
__forceinline__
__device__ unsigned long atomicAdd(unsigned long* address, unsigned long val)
{
return (unsigned long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
__forceinline__
__device__ long atomicCAS(long* address, long compare, long val)
{
return (long)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val);
}
__forceinline__
__device__ long long atomicCAS(long long* address, long long compare, long long val)
{
return (long long)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val);
}
__forceinline__
__device__ unsigned long atomicCAS(unsigned long* address, unsigned long compare, unsigned long val)
{
return (unsigned long)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val);
}
namespace HugeCTR {
namespace unique_op {
template<typename KeyType, typename CounterType>
__global__ void init_kernel(KeyType* keys,
CounterType* vals,
CounterType* counter,
const size_t capacity,
const KeyType empty_key,
const CounterType empty_val,
const CounterType init_counter_val){
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < capacity )
{
// Simply store every element a unused <K, V> pair
keys[idx] = empty_key;
vals[idx] = empty_val;
}
if( idx == 0 ){
counter[idx] = init_counter_val;
}
}
template<typename KeyType, typename CounterType>
__global__ void dump_kernel(KeyType* d_key,
const KeyType* keys,
const CounterType* vals,
const size_t offset,
const size_t search_length,
size_t* d_dump_counter,
const KeyType empty_key){
/* Per block accumulator */
__shared__ size_t block_acc;
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
/* Initialize */
if(threadIdx.x == 0){
block_acc = 0;
}
__syncthreads();
KeyType read_key;
CounterType read_val;
bool valid_slot = false;
// Each thread gather the key and value from slot assigned to them.
if(idx < search_length){
read_key = keys[offset + idx];
if(read_key != empty_key){
valid_slot = true;
atomicAdd(&block_acc, 1);
read_val = vals[offset + idx];
}
}
__syncthreads();
// Each block accumulate the dump count to global counter
if(threadIdx.x == 0){
atomicAdd(d_dump_counter, block_acc);
}
// Each thread store one slot's data back to global memory, d_dump_counter is how many slots in total dumped.
if(valid_slot){
d_key[read_val] = read_key;
}
}
template<typename KeyType, typename CounterType, typename hasher>
__global__ void get_insert_kernel(const KeyType* d_key,
CounterType* d_val,
const size_t len,
KeyType* keys,
CounterType* vals,
const size_t capacity,
CounterType* d_global_counter,
const KeyType empty_key,
const CounterType empty_val){
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < len){
KeyType target_key = d_key[idx];
size_t hash_index = hasher::hash(target_key) % capacity;
size_t counter = 0;
while(true){
// Have searched all the slot in the hashtable, but all slots in the hashtable are occupied by other keys
if(counter >= capacity){
assert(false && "error: unique op fails: hashtable is full");
}
// Try to set the key for the current slot to target key
const KeyType old_key = atomicCAS(keys + hash_index, empty_key, target_key);
volatile CounterType& target_val_pos = vals[hash_index];
if(empty_key == old_key){
CounterType result_val;
result_val = atomicAdd(d_global_counter, 1);
d_val[idx] = result_val;
target_val_pos = result_val;
break;
}
else if(target_key == old_key){
while(target_val_pos == empty_val)
;
d_val[idx] = target_val_pos;
break;
}
counter++;
hash_index = (hash_index + 1) % capacity;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
template<typename KeyType,
typename CounterType,
KeyType empty_key,
CounterType empty_val,
typename hasher>
unique_op<KeyType, CounterType, empty_key, empty_val, hasher>::unique_op(const size_t capacity,
const CounterType init_counter_val)
:capacity_(capacity),
init_counter_val_(init_counter_val){
// Check parameter
if(capacity_ == 0){
CK_THROW_(Error_t::WrongInput, "Invalid value for unique_op capacity");
return;
}
// Get the current CUDA dev
CK_CUDA_THROW_(hipGetDevice( &dev_ ));
// Allocate keys and vals buffer
CK_CUDA_THROW_(hipMalloc((void**)&keys_, sizeof(KeyType) * capacity_));
CK_CUDA_THROW_(hipMalloc((void**)&vals_, sizeof(CounterType) * capacity_));
// Allocate device-side counter
CK_CUDA_THROW_(hipMalloc((void **)&counter_, sizeof(CounterType)));
// Initialization kernel, set all entry to unused <K,V>, set counter to init value
hipLaunchKernelGGL(( init_kernel<KeyType, CounterType>)
, dim3(((capacity_-1)/BLOCK_SIZE_)+1), dim3(BLOCK_SIZE_), 0, 0,
keys_, vals_, counter_, capacity_, empty_key, empty_val, init_counter_val_);
// Wait for initialization to finish
CK_CUDA_THROW_(hipStreamSynchronize(0));
CK_CUDA_THROW_(hipGetLastError());
}
template<typename KeyType,
typename CounterType,
KeyType empty_key,
CounterType empty_val,
typename hasher>
unique_op<KeyType, CounterType, empty_key, empty_val, hasher>::~unique_op() noexcept(false) {
// Device Restorer
CudaDeviceContext dev_restorer;
// Set device
CK_CUDA_THROW_(hipSetDevice(dev_));
// Free keys and vals
CK_CUDA_THROW_(hipFree(keys_));
CK_CUDA_THROW_(hipFree(vals_));
// Free device-side counter
CK_CUDA_THROW_(hipFree(counter_));
}
template<typename KeyType,
typename CounterType,
KeyType empty_key,
CounterType empty_val,
typename hasher>
size_t unique_op<KeyType, CounterType, empty_key, empty_val, hasher>::get_capacity() const{
return capacity_;
}
template<typename KeyType,
typename CounterType,
KeyType empty_key,
CounterType empty_val,
typename hasher>
void unique_op<KeyType, CounterType, empty_key, empty_val, hasher>::unique(const KeyType* d_key,
const size_t len,
CounterType* d_output_index,
KeyType* d_unique_key,
size_t* d_output_counter,
hipStream_t stream){
// Device Restorer
CudaDeviceContext dev_restorer;
// Set to the device of this op
CK_CUDA_THROW_(hipSetDevice(dev_));
// Set the d_output_counter to 0
CK_CUDA_THROW_(hipMemsetAsync(d_output_counter, 0, sizeof(size_t), stream));
if (len == 0) {
return;
}
// Launch get_insert kernel to do unique
hipLaunchKernelGGL(( get_insert_kernel<KeyType, CounterType, hasher>)
, dim3((len-1)/BLOCK_SIZE_+1), dim3(BLOCK_SIZE_), 0, stream,
d_key, d_output_index, len, keys_, vals_, capacity_, counter_, empty_key, empty_val);
// Launch dump kernel
hipLaunchKernelGGL(( dump_kernel<KeyType, CounterType>)
, dim3((capacity_-1)/BLOCK_SIZE_+1), dim3(BLOCK_SIZE_), 0, stream,
d_unique_key, keys_, vals_, 0, capacity_, d_output_counter, empty_key);
CK_CUDA_THROW_(hipGetLastError());
}
template<typename KeyType,
typename CounterType,
KeyType empty_key,
CounterType empty_val,
typename hasher>
void unique_op<KeyType, CounterType, empty_key, empty_val, hasher>::clear(hipStream_t stream){
// Device Restorer
CudaDeviceContext dev_restorer;
// Set to the device of this op
CK_CUDA_THROW_(hipSetDevice(dev_));
// Initialization kernel, set all entry to unused <K,V>, set counter to init value
hipLaunchKernelGGL(( init_kernel<KeyType, CounterType>)
, dim3(((capacity_-1)/BLOCK_SIZE_)+1), dim3(BLOCK_SIZE_), 0, stream,
keys_, vals_, counter_, capacity_, empty_key, empty_val, init_counter_val_);
CK_CUDA_THROW_(hipGetLastError());
}
template class unique_op<unsigned int, uint64_t, std::numeric_limits<unsigned int>::max(), std::numeric_limits<uint64_t>::max()>;
template class unique_op<long long, uint64_t, std::numeric_limits<long long>::max(), std::numeric_limits<uint64_t>::max()>;
} // namespace unique_op
} // namespace HugeCTR
| 17ce34755a93790ce80327ecaccdb7d82c77b5df.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inference/unique_op/unique_op.hpp>
// Overload CUDA atomic for other 64bit unsinged/signed integer type
__forceinline__
__device__ long atomicAdd(long* address, long val)
{
return (long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
__forceinline__
__device__ long long atomicAdd(long long* address, long long val)
{
return (long long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
__forceinline__
__device__ unsigned long atomicAdd(unsigned long* address, unsigned long val)
{
return (unsigned long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
__forceinline__
__device__ long atomicCAS(long* address, long compare, long val)
{
return (long)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val);
}
__forceinline__
__device__ long long atomicCAS(long long* address, long long compare, long long val)
{
return (long long)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val);
}
__forceinline__
__device__ unsigned long atomicCAS(unsigned long* address, unsigned long compare, unsigned long val)
{
return (unsigned long)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val);
}
namespace HugeCTR {
namespace unique_op {
template<typename KeyType, typename CounterType>
__global__ void init_kernel(KeyType* keys,
CounterType* vals,
CounterType* counter,
const size_t capacity,
const KeyType empty_key,
const CounterType empty_val,
const CounterType init_counter_val){
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < capacity )
{
// Simply store every element a unused <K, V> pair
keys[idx] = empty_key;
vals[idx] = empty_val;
}
if( idx == 0 ){
counter[idx] = init_counter_val;
}
}
template<typename KeyType, typename CounterType>
__global__ void dump_kernel(KeyType* d_key,
const KeyType* keys,
const CounterType* vals,
const size_t offset,
const size_t search_length,
size_t* d_dump_counter,
const KeyType empty_key){
/* Per block accumulator */
__shared__ size_t block_acc;
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
/* Initialize */
if(threadIdx.x == 0){
block_acc = 0;
}
__syncthreads();
KeyType read_key;
CounterType read_val;
bool valid_slot = false;
// Each thread gather the key and value from slot assigned to them.
if(idx < search_length){
read_key = keys[offset + idx];
if(read_key != empty_key){
valid_slot = true;
atomicAdd(&block_acc, 1);
read_val = vals[offset + idx];
}
}
__syncthreads();
// Each block accumulate the dump count to global counter
if(threadIdx.x == 0){
atomicAdd(d_dump_counter, block_acc);
}
// Each thread store one slot's data back to global memory, d_dump_counter is how many slots in total dumped.
if(valid_slot){
d_key[read_val] = read_key;
}
}
template<typename KeyType, typename CounterType, typename hasher>
__global__ void get_insert_kernel(const KeyType* d_key,
CounterType* d_val,
const size_t len,
KeyType* keys,
CounterType* vals,
const size_t capacity,
CounterType* d_global_counter,
const KeyType empty_key,
const CounterType empty_val){
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < len){
KeyType target_key = d_key[idx];
size_t hash_index = hasher::hash(target_key) % capacity;
size_t counter = 0;
while(true){
// Have searched all the slot in the hashtable, but all slots in the hashtable are occupied by other keys
if(counter >= capacity){
assert(false && "error: unique op fails: hashtable is full");
}
// Try to set the key for the current slot to target key
const KeyType old_key = atomicCAS(keys + hash_index, empty_key, target_key);
volatile CounterType& target_val_pos = vals[hash_index];
if(empty_key == old_key){
CounterType result_val;
result_val = atomicAdd(d_global_counter, 1);
d_val[idx] = result_val;
target_val_pos = result_val;
break;
}
else if(target_key == old_key){
while(target_val_pos == empty_val)
;
d_val[idx] = target_val_pos;
break;
}
counter++;
hash_index = (hash_index + 1) % capacity;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
template<typename KeyType,
typename CounterType,
KeyType empty_key,
CounterType empty_val,
typename hasher>
unique_op<KeyType, CounterType, empty_key, empty_val, hasher>::unique_op(const size_t capacity,
const CounterType init_counter_val)
:capacity_(capacity),
init_counter_val_(init_counter_val){
// Check parameter
if(capacity_ == 0){
CK_THROW_(Error_t::WrongInput, "Invalid value for unique_op capacity");
return;
}
// Get the current CUDA dev
CK_CUDA_THROW_(cudaGetDevice( &dev_ ));
// Allocate keys and vals buffer
CK_CUDA_THROW_(cudaMalloc((void**)&keys_, sizeof(KeyType) * capacity_));
CK_CUDA_THROW_(cudaMalloc((void**)&vals_, sizeof(CounterType) * capacity_));
// Allocate device-side counter
CK_CUDA_THROW_(cudaMalloc((void **)&counter_, sizeof(CounterType)));
// Initialization kernel, set all entry to unused <K,V>, set counter to init value
init_kernel<KeyType, CounterType>
<<<((capacity_-1)/BLOCK_SIZE_)+1, BLOCK_SIZE_>>>
(keys_, vals_, counter_, capacity_, empty_key, empty_val, init_counter_val_);
// Wait for initialization to finish
CK_CUDA_THROW_(cudaStreamSynchronize(0));
CK_CUDA_THROW_(cudaGetLastError());
}
template<typename KeyType,
typename CounterType,
KeyType empty_key,
CounterType empty_val,
typename hasher>
unique_op<KeyType, CounterType, empty_key, empty_val, hasher>::~unique_op() noexcept(false) {
// Device Restorer
CudaDeviceContext dev_restorer;
// Set device
CK_CUDA_THROW_(cudaSetDevice(dev_));
// Free keys and vals
CK_CUDA_THROW_(cudaFree(keys_));
CK_CUDA_THROW_(cudaFree(vals_));
// Free device-side counter
CK_CUDA_THROW_(cudaFree(counter_));
}
template<typename KeyType,
typename CounterType,
KeyType empty_key,
CounterType empty_val,
typename hasher>
size_t unique_op<KeyType, CounterType, empty_key, empty_val, hasher>::get_capacity() const{
return capacity_;
}
template<typename KeyType,
typename CounterType,
KeyType empty_key,
CounterType empty_val,
typename hasher>
void unique_op<KeyType, CounterType, empty_key, empty_val, hasher>::unique(const KeyType* d_key,
const size_t len,
CounterType* d_output_index,
KeyType* d_unique_key,
size_t* d_output_counter,
cudaStream_t stream){
// Device Restorer
CudaDeviceContext dev_restorer;
// Set to the device of this op
CK_CUDA_THROW_(cudaSetDevice(dev_));
// Set the d_output_counter to 0
CK_CUDA_THROW_(cudaMemsetAsync(d_output_counter, 0, sizeof(size_t), stream));
if (len == 0) {
return;
}
// Launch get_insert kernel to do unique
get_insert_kernel<KeyType, CounterType, hasher>
<<<(len-1)/BLOCK_SIZE_+1, BLOCK_SIZE_, 0, stream>>>
(d_key, d_output_index, len, keys_, vals_, capacity_, counter_, empty_key, empty_val);
// Launch dump kernel
dump_kernel<KeyType, CounterType>
<<<(capacity_-1)/BLOCK_SIZE_+1, BLOCK_SIZE_, 0, stream>>>
(d_unique_key, keys_, vals_, 0, capacity_, d_output_counter, empty_key);
CK_CUDA_THROW_(cudaGetLastError());
}
template<typename KeyType,
typename CounterType,
KeyType empty_key,
CounterType empty_val,
typename hasher>
void unique_op<KeyType, CounterType, empty_key, empty_val, hasher>::clear(cudaStream_t stream){
// Device Restorer
CudaDeviceContext dev_restorer;
// Set to the device of this op
CK_CUDA_THROW_(cudaSetDevice(dev_));
// Initialization kernel, set all entry to unused <K,V>, set counter to init value
init_kernel<KeyType, CounterType>
<<<((capacity_-1)/BLOCK_SIZE_)+1, BLOCK_SIZE_, 0, stream>>>
(keys_, vals_, counter_, capacity_, empty_key, empty_val, init_counter_val_);
CK_CUDA_THROW_(cudaGetLastError());
}
template class unique_op<unsigned int, uint64_t, std::numeric_limits<unsigned int>::max(), std::numeric_limits<uint64_t>::max()>;
template class unique_op<long long, uint64_t, std::numeric_limits<long long>::max(), std::numeric_limits<uint64_t>::max()>;
} // namespace unique_op
} // namespace HugeCTR
|
71527d0b7dc720bbed3b19db0e63fb8d9bf5ea8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reduction/SumReduction.cuh"
/*
Number of blocks = number of chain rows
Number of threads = (number of chain columns + number of iterations - 1) / number of iterations
Sum-reduce each row, use the first thread in a block to set the result entry
1 1
2 2
3 3
4 4
5 5
6 6
number of blocks = 6
*/
__global__ void backwardBiasKernel (
float* chain,
int numberChainRows,
int numberChainColumns,
int numberIterations,
float* result) {
extern __shared__ float sharedData[];
int chainRowIndex = blockIdx.x;
int firstChainColumnIndex = threadIdx.x * numberIterations;
int lastChainColumnIndex = min(firstChainColumnIndex + numberIterations, numberChainColumns);
float thisValue = 0.0;
int chainColumnIndex = firstChainColumnIndex;
while(chainColumnIndex < lastChainColumnIndex) {
float chainEntry = chain[chainColumnIndex * numberChainRows + chainRowIndex];
float toBeAdded = isnan(chainEntry) ? 0 : chainEntry;
thisValue += toBeAdded;
chainColumnIndex++;
}
__syncthreads();
int warpId = threadIdx.x / warpSize;
int laneId = threadIdx.x % warpSize;
float sum = reduceWarpsToSum(thisValue, warpId, laneId, sharedData);
if(threadIdx.x == 0) {
result[blockIdx.x] = sum;
}
__syncthreads();
} | 71527d0b7dc720bbed3b19db0e63fb8d9bf5ea8f.cu | #include "reduction/SumReduction.cuh"
/*
Number of blocks = number of chain rows
Number of threads = (number of chain columns + number of iterations - 1) / number of iterations
Sum-reduce each row, use the first thread in a block to set the result entry
1 1
2 2
3 3
4 4
5 5
6 6
number of blocks = 6
*/
__global__ void backwardBiasKernel (
float* chain,
int numberChainRows,
int numberChainColumns,
int numberIterations,
float* result) {
extern __shared__ float sharedData[];
int chainRowIndex = blockIdx.x;
int firstChainColumnIndex = threadIdx.x * numberIterations;
int lastChainColumnIndex = min(firstChainColumnIndex + numberIterations, numberChainColumns);
float thisValue = 0.0;
int chainColumnIndex = firstChainColumnIndex;
while(chainColumnIndex < lastChainColumnIndex) {
float chainEntry = chain[chainColumnIndex * numberChainRows + chainRowIndex];
float toBeAdded = isnan(chainEntry) ? 0 : chainEntry;
thisValue += toBeAdded;
chainColumnIndex++;
}
__syncthreads();
int warpId = threadIdx.x / warpSize;
int laneId = threadIdx.x % warpSize;
float sum = reduceWarpsToSum(thisValue, warpId, laneId, sharedData);
if(threadIdx.x == 0) {
result[blockIdx.x] = sum;
}
__syncthreads();
} |
e916aded58419cecda5bf6559e2ddf98c0d2ff47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_ONE_HOT_LAYER_INSTANTIATE
#include "lbann/layers/misc/one_hot.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
/** See El::AbstractDistMatrix::RowOwner. */
__device__ __forceinline__
El::Int distmat_index_owner(El::Int global_index, El::Int align, El::Int stride) {
return (global_index + align) % stride;
}
/** See El::AbstractDistMatrix::LocalRow. */
__device__ __forceinline__
El::Int distmat_local_index(El::Int global_index, El::Int rank, El::Int align, El::Int stride) {
auto shift = (rank - align) % stride;
if (global_index > shift) {
return (global_index - shift - 1) / stride + 1;
}
else {
return 0;
}
}
/**
* On input, output is assumed to be filled with zeros.
*
* Block dimensions: bdim x 1 x 1
*
* Grid dimensions: (local_mini_batch_size / bdim) x 1 x 1
*/
template <typename TensorDataType>
__global__ void fp_kernel(
El::Int local_mini_batch_size,
El::Int output_size,
El::Int col_rank,
const TensorDataType* __restrict__ local_input,
El::Int input_ldim,
TensorDataType* __restrict__ local_output,
El::Int output_ldim,
El::Int output_col_align,
El::Int output_col_stride) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int nthreads = blockDim.x * gridDim.x;
for (El::Int j=gid; j<local_mini_batch_size; j+=nthreads) {
const auto& x = local_input[j*input_ldim];
const auto i_global = static_cast<El::Int>(gpu_lib::floor(x));
const auto owner_rank = distmat_index_owner(
i_global,
output_col_align,
output_col_stride);
if (0 <= i_global
&& i_global < output_size
&& owner_rank == col_rank) {
const auto i = distmat_local_index(
i_global,
col_rank,
output_col_align,
output_col_stride);
local_output[i+j*output_ldim] = TensorDataType(1.f);
}
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void one_hot_layer<TensorDataType, Layout, Device>::fp_compute() {
// Local matrices
using AbsLocalMat = El::AbstractMatrix<TensorDataType>;
using LocalMat = El::Matrix<TensorDataType, El::Device::GPU>;
const auto& input = this->get_prev_activations();
auto& output = this->get_activations();
auto& local_output = dynamic_cast<LocalMat&>(output.Matrix());
const El::Int local_mini_batch_size = output.LocalWidth();
const El::Int output_size = output.Height();
// Make sure all procs in column communicator have access to input
LocalMat local_input;
const auto& col_comm = input.ColComm();
const auto col_rank = El::mpi::Rank(col_comm);
const auto owner_rank = input.RowOwner(0);
if (col_rank == owner_rank) {
El::LockedView(local_input, input.LockedMatrix());
}
else {
local_input.Resize(1, input.LocalWidth());
}
/** @todo (tym1 3/12/21): We are working around a bug in Hydrogen.
* Broadcast with Matrix<T,D> is not instatiated. */
El::Broadcast(
static_cast<El::AbstractMatrix<TensorDataType>&>(local_input),
col_comm,
owner_rank);
// Populate one-hot vectors
El::Zero(output);
if (!local_output.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input),
gpu::get_sync_info(local_output));
constexpr size_t block_size = 64;
const size_t grid_size = (local_mini_batch_size + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
fp_kernel<TensorDataType>,
grid_size, block_size, 0, multisync,
local_mini_batch_size,
output_size,
col_rank,
local_input.LockedBuffer(),
local_input.LDim(),
output.Buffer(),
output.LDim(),
output.ColAlign(),
output.ColStride());
}
}
#define PROTO(T) \
template class one_hot_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class one_hot_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| e916aded58419cecda5bf6559e2ddf98c0d2ff47.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_ONE_HOT_LAYER_INSTANTIATE
#include "lbann/layers/misc/one_hot.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
/** See El::AbstractDistMatrix::RowOwner. */
__device__ __forceinline__
El::Int distmat_index_owner(El::Int global_index, El::Int align, El::Int stride) {
return (global_index + align) % stride;
}
/** See El::AbstractDistMatrix::LocalRow. */
__device__ __forceinline__
El::Int distmat_local_index(El::Int global_index, El::Int rank, El::Int align, El::Int stride) {
auto shift = (rank - align) % stride;
if (global_index > shift) {
return (global_index - shift - 1) / stride + 1;
}
else {
return 0;
}
}
/**
* On input, output is assumed to be filled with zeros.
*
* Block dimensions: bdim x 1 x 1
*
* Grid dimensions: (local_mini_batch_size / bdim) x 1 x 1
*/
template <typename TensorDataType>
__global__ void fp_kernel(
El::Int local_mini_batch_size,
El::Int output_size,
El::Int col_rank,
const TensorDataType* __restrict__ local_input,
El::Int input_ldim,
TensorDataType* __restrict__ local_output,
El::Int output_ldim,
El::Int output_col_align,
El::Int output_col_stride) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int nthreads = blockDim.x * gridDim.x;
for (El::Int j=gid; j<local_mini_batch_size; j+=nthreads) {
const auto& x = local_input[j*input_ldim];
const auto i_global = static_cast<El::Int>(gpu_lib::floor(x));
const auto owner_rank = distmat_index_owner(
i_global,
output_col_align,
output_col_stride);
if (0 <= i_global
&& i_global < output_size
&& owner_rank == col_rank) {
const auto i = distmat_local_index(
i_global,
col_rank,
output_col_align,
output_col_stride);
local_output[i+j*output_ldim] = TensorDataType(1.f);
}
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void one_hot_layer<TensorDataType, Layout, Device>::fp_compute() {
// Local matrices
using AbsLocalMat = El::AbstractMatrix<TensorDataType>;
using LocalMat = El::Matrix<TensorDataType, El::Device::GPU>;
const auto& input = this->get_prev_activations();
auto& output = this->get_activations();
auto& local_output = dynamic_cast<LocalMat&>(output.Matrix());
const El::Int local_mini_batch_size = output.LocalWidth();
const El::Int output_size = output.Height();
// Make sure all procs in column communicator have access to input
LocalMat local_input;
const auto& col_comm = input.ColComm();
const auto col_rank = El::mpi::Rank(col_comm);
const auto owner_rank = input.RowOwner(0);
if (col_rank == owner_rank) {
El::LockedView(local_input, input.LockedMatrix());
}
else {
local_input.Resize(1, input.LocalWidth());
}
/** @todo (tym1 3/12/21): We are working around a bug in Hydrogen.
* Broadcast with Matrix<T,D> is not instatiated. */
El::Broadcast(
static_cast<El::AbstractMatrix<TensorDataType>&>(local_input),
col_comm,
owner_rank);
// Populate one-hot vectors
El::Zero(output);
if (!local_output.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input),
gpu::get_sync_info(local_output));
constexpr size_t block_size = 64;
const size_t grid_size = (local_mini_batch_size + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
fp_kernel<TensorDataType>,
grid_size, block_size, 0, multisync,
local_mini_batch_size,
output_size,
col_rank,
local_input.LockedBuffer(),
local_input.LDim(),
output.Buffer(),
output.LDim(),
output.ColAlign(),
output.ColStride());
}
}
#define PROTO(T) \
template class one_hot_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class one_hot_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
0d6d934158711dc312ab1b989a14bf2186919211.hip | // !!! This is a file automatically generated by hipify!!!
/*
swsharp - CUDA parallelized Smith Waterman with applying Hirschberg's and
Ukkonen's algorithm and dynamic cell pruning.
Copyright (C) 2013 Matija Korpar, contributor Mile iki
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact the author by mkorpar@gmail.com.
*/
#include <stdlib.h>
#include <string.h>
#include "error.h"
#include "utils.h"
#include "cuda_utils.h"
extern void cudaGetCards(int** cards, int* cardsLen) {
#ifdef __HIPCC__
hipGetDeviceCount(cardsLen);
*cards = (int*) malloc(*cardsLen * sizeof(int));
for (int i = 0; i < *cardsLen; ++i) {
(*cards)[i] = i;
}
#else
*cards = NULL;
*cardsLen = 0;
#endif
}
extern int cudaCheckCards(int* cards, int cardsLen) {
#ifdef __HIPCC__
int maxDeviceId;
hipGetDeviceCount(&maxDeviceId);
for (int i = 0; i < cardsLen; ++i) {
if (cards[i] >= maxDeviceId) {
return 0;
}
}
return 1;
#else
return cardsLen == 0;
#endif
}
extern size_t cudaMinimalGlobalMemory(int* cards, int cardsLen) {
#ifdef __HIPCC__
if (cards == NULL || cardsLen == 0) {
return 0;
}
size_t minMem = (size_t) -1;
for (int i = 0; i < cardsLen; ++i) {
hipDeviceProp_t cdp;
hipGetDeviceProperties(&cdp, i);
minMem = MIN(minMem, cdp.totalGlobalMem);
}
return minMem;
#else
return 0;
#endif
}
| 0d6d934158711dc312ab1b989a14bf2186919211.cu | /*
swsharp - CUDA parallelized Smith Waterman with applying Hirschberg's and
Ukkonen's algorithm and dynamic cell pruning.
Copyright (C) 2013 Matija Korpar, contributor Mile Šikić
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact the author by mkorpar@gmail.com.
*/
#include <stdlib.h>
#include <string.h>
#include "error.h"
#include "utils.h"
#include "cuda_utils.h"
extern void cudaGetCards(int** cards, int* cardsLen) {
#ifdef __CUDACC__
cudaGetDeviceCount(cardsLen);
*cards = (int*) malloc(*cardsLen * sizeof(int));
for (int i = 0; i < *cardsLen; ++i) {
(*cards)[i] = i;
}
#else
*cards = NULL;
*cardsLen = 0;
#endif
}
extern int cudaCheckCards(int* cards, int cardsLen) {
#ifdef __CUDACC__
int maxDeviceId;
cudaGetDeviceCount(&maxDeviceId);
for (int i = 0; i < cardsLen; ++i) {
if (cards[i] >= maxDeviceId) {
return 0;
}
}
return 1;
#else
return cardsLen == 0;
#endif
}
extern size_t cudaMinimalGlobalMemory(int* cards, int cardsLen) {
#ifdef __CUDACC__
if (cards == NULL || cardsLen == 0) {
return 0;
}
size_t minMem = (size_t) -1;
for (int i = 0; i < cardsLen; ++i) {
cudaDeviceProp cdp;
cudaGetDeviceProperties(&cdp, i);
minMem = MIN(minMem, cdp.totalGlobalMem);
}
return minMem;
#else
return 0;
#endif
}
|
5a7053bc2b7536ace7b98017c4698c918eea8344.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_round.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_round), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_round), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_round), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5a7053bc2b7536ace7b98017c4698c918eea8344.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_round.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_round<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_round<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_round<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d0a62fdb8b4b5466c07df0e03c427478d4d1d8c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* insert correct index so that each element is calculated by a different thread */
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
/* experiment with different values of N */
/* how large can you make it? */
#define N 32
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
hipMalloc( (void **) &d_a, size );
hipMalloc( (void **) &d_b, size );
hipMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
hipMemcpy( d_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( d_b, b, size, hipMemcpyHostToDevice );
hipMemset( d_c, 0, size );
/* launch the kernel on the GPU */
/* insert the correct launch parameters to use 1 block and N threads */
hipLaunchKernelGGL(( add), dim3(1) , dim3(N), 0, 0, d_a, d_b, d_c );
/* copy result back to host */
hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost );
for( int i = 0; i < N; i++ )
{
printf("c[%d] = %d\n",i,c[i]);
} /* end for */
/* clean up */
free(a);
free(b);
free(c);
hipFree( d_a );
hipFree( d_b );
hipFree( d_c );
return 0;
} /* end main */
| d0a62fdb8b4b5466c07df0e03c427478d4d1d8c2.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* insert correct index so that each element is calculated by a different thread */
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
/* experiment with different values of N */
/* how large can you make it? */
#define N 32
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
cudaMemset( d_c, 0, size );
/* launch the kernel on the GPU */
/* insert the correct launch parameters to use 1 block and N threads */
add<<< 1 , N>>>( d_a, d_b, d_c );
/* copy result back to host */
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );
for( int i = 0; i < N; i++ )
{
printf("c[%d] = %d\n",i,c[i]);
} /* end for */
/* clean up */
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
1f7a0567abff657a6ff1dab0c635463303c8d673.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <random>
#include <cutf/math.hpp>
#include <cutf/type.hpp>
#include <cutf/memory.hpp>
#include <cutf/cublas.hpp>
#include <cutf/device.hpp>
#include <cutf/error.hpp>
template <class T>
__device__ T sign(const T v){
if( v < cutf::cuda::type::cast<T>(0.0f) ){
return -v;
}else{
return v;
}
}
template <class T>
__global__ void kernel_if(const T* const a, T* const b){
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
b[tid] = sign(a[tid]);
}
template <class T>
__global__ void kernel_cutf(const T* const a, T* const b){
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
b[tid] = cutf::cuda::math::sign(a[tid]);
}
template <std::size_t N, std::size_t C, class T, class Func>
void test(Func func){
std::cout<<__func__<<std::endl;
auto dF = cutf::cuda::memory::get_device_unique_ptr<T>(N);
auto hF = cutf::cuda::memory::get_host_unique_ptr<T>(N);
auto dI = cutf::cuda::memory::get_device_unique_ptr<T>(N);
auto hI = cutf::cuda::memory::get_host_unique_ptr<T>(N);
for(auto i = decltype(N)(0); i < N; i++){
hF.get()[i] = cutf::cuda::type::cast<T>((static_cast<float>(N/2) - i) * 10.0f);
}
cutf::cuda::memory::copy(dF.get(), hF.get(), N);
for(std::size_t c = 0; c < C; c++)
func(dF.get(), dI.get());
cutf::cuda::memory::copy(hI.get(), dI.get(), N);
}
template <std::size_t N, class T>
void test_if(const T* const a, T* const b){
hipLaunchKernelGGL(( kernel_if<T>), dim3(N), dim3(1), 0, 0, a, b);
}
template <std::size_t N, class T>
void test_cutf(const T* const a, T* const b){
hipLaunchKernelGGL(( kernel_cutf<T>), dim3(N), dim3(1), 0, 0, a, b);
}
int main(){
constexpr std::size_t N = 1 << 10;
constexpr std::size_t C = 1 << 13;
test<N, C, half>(test_if<N, half>);
test<N, C, float>(test_if<N, float>);
test<N, C, double>(test_if<N, double>);
test<N, C, half>(test_cutf<N, half>);
test<N, C, float>(test_cutf<N, float>);
test<N, C, double>(test_cutf<N, double>);
}
| 1f7a0567abff657a6ff1dab0c635463303c8d673.cu | #include <iostream>
#include <random>
#include <cutf/math.hpp>
#include <cutf/type.hpp>
#include <cutf/memory.hpp>
#include <cutf/cublas.hpp>
#include <cutf/device.hpp>
#include <cutf/error.hpp>
template <class T>
__device__ T sign(const T v){
if( v < cutf::cuda::type::cast<T>(0.0f) ){
return -v;
}else{
return v;
}
}
template <class T>
__global__ void kernel_if(const T* const a, T* const b){
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
b[tid] = sign(a[tid]);
}
template <class T>
__global__ void kernel_cutf(const T* const a, T* const b){
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
b[tid] = cutf::cuda::math::sign(a[tid]);
}
template <std::size_t N, std::size_t C, class T, class Func>
void test(Func func){
std::cout<<__func__<<std::endl;
auto dF = cutf::cuda::memory::get_device_unique_ptr<T>(N);
auto hF = cutf::cuda::memory::get_host_unique_ptr<T>(N);
auto dI = cutf::cuda::memory::get_device_unique_ptr<T>(N);
auto hI = cutf::cuda::memory::get_host_unique_ptr<T>(N);
for(auto i = decltype(N)(0); i < N; i++){
hF.get()[i] = cutf::cuda::type::cast<T>((static_cast<float>(N/2) - i) * 10.0f);
}
cutf::cuda::memory::copy(dF.get(), hF.get(), N);
for(std::size_t c = 0; c < C; c++)
func(dF.get(), dI.get());
cutf::cuda::memory::copy(hI.get(), dI.get(), N);
}
template <std::size_t N, class T>
void test_if(const T* const a, T* const b){
kernel_if<T><<<N, 1>>>(a, b);
}
template <std::size_t N, class T>
void test_cutf(const T* const a, T* const b){
kernel_cutf<T><<<N, 1>>>(a, b);
}
int main(){
constexpr std::size_t N = 1 << 10;
constexpr std::size_t C = 1 << 13;
test<N, C, half>(test_if<N, half>);
test<N, C, float>(test_if<N, float>);
test<N, C, double>(test_if<N, double>);
test<N, C, half>(test_cutf<N, half>);
test<N, C, float>(test_cutf<N, float>);
test<N, C, double>(test_cutf<N, double>);
}
|
f2583254a55d256c082b2fb902e0e70ef47c5f16.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_erf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_erf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_erf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_erf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f2583254a55d256c082b2fb902e0e70ef47c5f16.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_erf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_erf<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_erf<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_erf<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a8bd45ba63a449fa2e661914d1f45fa6f298f4a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "layer.h"
#include <random>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <cassert>
#include <math.h>
#include <algorithm>
#include <sstream>
#include <fstream>
#include <iostream>
using namespace cudl;
/****************************************************************
* Layer definition *
****************************************************************/
Layer::Layer()
{
/* do nothing */
}
Layer::~Layer()
{
#if (DEBUG_FORWARD > 0 || DEBUG_BACKWARD > 0)
std::cout << "Destroy Layer: " << name_ << std::endl;
#endif
if (output_ != nullptr) delete output_;
if (grad_input_ != nullptr) delete grad_input_;
if (weights_ != nullptr) delete weights_;
if (biases_ != nullptr) delete biases_;
if (grad_weights_ != nullptr) delete grad_weights_;
if (grad_biases_ != nullptr) delete grad_biases_;
}
void Layer::init_weight_bias(unsigned int seed)
{
checkCudaErrors(hipDeviceSynchronize());
if (weights_ == nullptr || biases_ == nullptr)
return;
// Create random network
std::random_device rd;
std::mt19937 gen(seed == 0 ? rd() : static_cast<unsigned int>(seed));
// He uniform distribution
float range = sqrt(6.f / input_->size()); // He's initialization
std::uniform_real_distribution<> dis(-range, range);
for (int i = 0; i < weights_->len(); i++)
weights_->ptr()[i] = static_cast<float>(dis(gen));
for (int i = 0; i < biases_->len(); i++)
biases_->ptr()[i] = 0.f;
// copy initialized value to the device
weights_->to(DeviceType::cuda);
biases_->to(DeviceType::cuda);
std::cout << ".. initialized " << name_ << " layer .." << std::endl;
}
void Layer::update_weights_biases(float learning_rate)
{
float eps = -1.f * learning_rate;
if (weights_ != nullptr && grad_weights_ != nullptr)
{
#if (DEBUG_UPDATE)
weights_->print(name_ + "::weights (before update)", true);
grad_weights_->print(name_ + "::gweights", true);
#endif // DEBUG_UPDATE
// w = w + eps * dw
checkCublasErrors(
hipblasSaxpy(cuda_->cublas(),
weights_->len(),
&eps,
grad_weights_->cuda(), 1,
weights_->cuda(), 1));
#if (DEBUG_UPDATE)
weights_->print(name_ + "weights (after update)", true);
// getchar();
#endif // DEBUG_UPDATE
}
if (biases_ != nullptr && grad_biases_ != nullptr)
{
#if (DEBUG_UPDATE)
biases_->print(name_ + "biases (before update)", true);
grad_biases_->print(name_ + "gbiases", true);
#endif // DEBUG_UPDATE
// b = b + eps * db
checkCublasErrors(
hipblasSaxpy(cuda_->cublas(),
biases_->len(),
&eps,
grad_biases_->cuda(), 1,
biases_->cuda(), 1));
#if (DEBUG_UPDATE)
biases_->print(name_ + "biases (after update)", true);
// getchar();
#endif // DEBUG_UPDATE
}
}
float Layer::get_loss(Blob<float> *target)
{
assert("No Loss layer has no loss." && false);
return EXIT_FAILURE;
}
int Layer::get_accuracy(Blob<float> *target)
{
assert("No Loss layer cannot estimate accuracy." && false);
return EXIT_FAILURE;
}
int Layer::load_parameter()
{
std::stringstream filename_weights, filename_biases;
// load weights and biases pretrained parameters
filename_weights << name_ << ".bin";
if (weights_->file_read(filename_weights.str()))
return -1;
filename_biases << name_ << ".bias.bin";
if (biases_->file_read(filename_biases.str()))
return -2;
std::cout << ".. loaded " << name_ << " pretrain parameter.." << std::endl;
return 0;
}
int Layer::save_parameter()
{
std::stringstream filename_weights, filename_biases;
std::cout << ".. saving " << name_ << " parameter ..";
// Write weights file
if (weights_)
{
filename_weights << name_ << ".bin";
if (weights_->file_write(filename_weights.str()))
return -1;
}
// Write bias file
if (biases_)
{
filename_biases << name_ << ".bias.bin";
if (biases_->file_write(filename_biases.str()))
return -2;
}
std::cout << " done .." << std::endl;
return 0;
}
/****************************************************************
* Dense Layer *
****************************************************************/
Dense::Dense(std::string name, int output_size)
{
name_ = name;
output_size_ = output_size;
}
Dense::~Dense()
{
if (d_one_vec != nullptr)
hipFree(d_one_vec);
}
__global__ void init_one_vec(float* d_one_vec, size_t length)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= length) return;
d_one_vec[i] = 1.f;
}
Blob<float> *Dense::forward(Blob<float> *input)
{
// initialize weights and biases
if (weights_ == nullptr)
{
// setup parameter size information
input_size_ = input->c() * input->h() * input->w();
// initialize weight, bias, and output
weights_ = new Blob<float>(1, 1, input_size_, output_size_);
biases_ = new Blob<float>(1, 1, output_size_);
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(batch_size_, output_size_);
else
output_->reset(batch_size_, output_size_);
output_->tensor();
if (d_one_vec != nullptr)
hipFree(d_one_vec);
checkCudaErrors(hipMalloc((void**)&d_one_vec, sizeof(float) * batch_size_));
hipLaunchKernelGGL(( init_one_vec), dim3((batch_size_+BLOCK_DIM_1D-1)/BLOCK_DIM_1D), dim3(BLOCK_DIM_1D) , 0, 0, d_one_vec, batch_size_);
// initialize weights and biases
if (load_pretrain_ && !freeze_)
{
if (load_parameter())
{
std::cout << "error occurred.." << std::endl;
exit(-1);
}
}
else if (!freeze_)
{
init_weight_bias();
}
else
{
/* do nothing */
}
}
// output = weights^T * input (without biases)
checkCublasErrors(
hipblasSgemm(cuda_->cublas(),
HIPBLAS_OP_T, HIPBLAS_OP_N,
output_size_, batch_size_, input_size_,
&cuda_->one,
weights_->cuda(), input_size_,
input_->cuda(), input_size_,
&cuda_->zero,
output_->cuda(), output_size_));
// output += biases * d_one_vec^T
checkCublasErrors(hipblasSgemm(cuda_->cublas(),
HIPBLAS_OP_N, HIPBLAS_OP_N,
output_size_, batch_size_, 1,
&cuda_->one,
biases_->cuda(), output_size_,
d_one_vec, 1,
&cuda_->one,
output_->cuda(), output_size_));
#if (DEBUG_DENSE & 0x01)
input_->print( name_ + "::input", true);
weights_->print(name_ + "::weight", true);
biases_->print( name_ + "::bias", true);
output_->print( name_ + "::output", true);
#endif // DEBUG_DENSE
return output_;
}
Blob<float> *Dense::backward(Blob<float> *grad_output)
{
if (grad_weights_ == nullptr)
{
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(biases_->shape());
}
if (grad_input_ == nullptr || batch_size_ != grad_output->n())
{
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
// db = (dy) * d_one_vec
hipblasSgemv(cuda_->cublas(),
HIPBLAS_OP_N,
output_size_, batch_size_,
&cuda_->one,
grad_output_->cuda(), output_size_,
d_one_vec, 1,
&cuda_->zero,
grad_biases_->cuda(), 1);
// dw = x * (dy)^T
hipblasSgemm(cuda_->cublas(),
HIPBLAS_OP_N, HIPBLAS_OP_T,
input_size_, output_size_, batch_size_,
&cuda_->one,
input_->cuda(), input_size_,
grad_output_->cuda(), output_size_,
&cuda_->zero,
grad_weights_->cuda(), input_size_);
// dx = W * dy
if (!gradient_stop_)
hipblasSgemm(cuda_->cublas(),
HIPBLAS_OP_N, HIPBLAS_OP_N,
input_size_, batch_size_, output_size_,
&cuda_->one,
weights_->cuda(), input_size_,
grad_output_->cuda(), output_size_,
&cuda_->zero,
grad_input_->cuda(), input_size_);
#if (DEBUG_DENSE & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output->print( name_ + "::gradients", true, grad_output->n());
grad_weights_->print(name_ + "::gfilter", true);
grad_biases_->print( name_ + "::gbias", true);
if (!gradient_stop_)
grad_input_->print( name_ + "::gdata", true);
#endif // DEBUG_DENSE
return grad_input_;
}
/****************************************************************
* Activation Layer *
****************************************************************/
Activation::Activation(std::string name, cudnnActivationMode_t mode, float coef)
{
name_ = name;
mode_ = mode;
coef_ = coef;
cudnnCreateActivationDescriptor(&act_desc_);
cudnnSetActivationDescriptor(act_desc_, mode, CUDNN_PROPAGATE_NAN, coef);
}
Activation::~Activation()
{
cudnnDestroyActivationDescriptor(act_desc_);
}
Blob<float> *Activation::forward(Blob<float> *input)
{
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
}
cudnnActivationForward(cuda_->cudnn(),
act_desc_,
&cuda_->one,
input_desc_,
input->cuda(),
&cuda_->zero,
output_desc_,
output_->cuda());
return output_;
}
Blob<float> *Activation::backward(Blob<float> *grad_output)
{
if (grad_input_ == nullptr || batch_size_ != grad_output->n())
{
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
cudnnActivationBackward(cuda_->cudnn(),
act_desc_,
&cuda_->one,
output_desc_, output_->cuda(),
output_desc_, grad_output->cuda(),
input_desc_, input_->cuda(),
&cuda_->zero,
input_desc_, grad_input_->cuda());
return grad_input_;
}
/****************************************************************
* Softmax definition *
****************************************************************/
Softmax::Softmax(std::string name)
{
name_ = name;
}
Softmax::~Softmax()
{
}
Blob<float> *Softmax::forward(Blob<float> *input)
{
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
}
#if (DEBUG_SOFTMAX & 0x01)
std::cout << name_ << "[FORWARD]" << std::endl;
input_->print(name_ + "::input", true, input->n());
#endif
checkCudnnErrors(
cudnnSoftmaxForward(cuda_->cudnn(), CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&cuda_->one, input_desc_, input->cuda(),
&cuda_->zero, output_desc_, output_->cuda()));
#if (DEBUG_SOFTMAX & 0x01)
output_->print(name_ + "::output", true, input->n());
#endif
return output_;
}
Blob<float> *Softmax::backward(Blob<float> *target)
{
checkCudaErrors(hipDeviceSynchronize());
if (grad_input_ == nullptr || batch_size_ != target->n())
{
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
// set grad_input_ as predict
checkCudaErrors(hipMemcpyAsync(grad_input_->cuda(),
output_->cuda(), output_->buf_size(),
hipMemcpyDeviceToDevice));
// set grad_input_ = predict - target
checkCublasErrors(
hipblasSaxpy(cuda_->cublas(), target->len(),
&cuda_->minus_one, target->cuda(), 1,
grad_input_->cuda(), 1));
// normalize the grad_output by the batch size
int grad_output_size = target->n() * target->c() * target->h() * target->w();
float scale = 1.f / static_cast<float>(target->n());
checkCublasErrors(hipblasSscal(cuda_->cublas(), grad_output_size, &scale, grad_input_->cuda(), 1));
#if (DEBUG_SOFTMAX & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
input_->print( name_ + "::input", true);
output_->print(name_ + "::predict", true);
target->print( name_ + "::y", true, target->n());
grad_input_->print(name_ + "::dx", true, target->n());
#endif
return grad_input_;
}
float Softmax::get_loss(Blob<float> *target)
{
return loss_.loss(output_, target);
}
int Softmax::get_accuracy(Blob<float> *target)
{
int batch_size = output_->n();
int output_size = output_->size();
assert(batch_size == target->n());
assert(output_size == target->size());
float *h_output, *h_target;
int idx_output, idx_target;
int hit_count = 0;
// get predicts and targets
h_output = output_->to(host);
h_target = target->to(host);
// idx_output = idx_target = 0;
for (int b = 0; b < batch_size; b++)
{
idx_output = 0;
idx_target = 0;
for (int i = 1; i < 10; i++)
{
if (h_output[b * output_size + i] > h_output[b * output_size + idx_output])
idx_output = i;
if (h_target[b * output_size + i] > h_target[b * output_size + idx_target])
idx_target = i;
}
if (idx_output == idx_target)
hit_count++;
}
return hit_count;
}
/****************************************************************
* Layer definition *
****************************************************************/
/**
* Convolutional layer with bias
*/
Conv2D::Conv2D(std::string name,
int out_channels,
int kernel_size,
int stride,
int padding,
int dilation):
out_channels_(out_channels),
kernel_size_(kernel_size),
stride_(stride),
padding_(padding),
dilation_(dilation)
{
name_ = name;
// create cudnn container handles
cudnnCreateFilterDescriptor(&filter_desc_);
cudnnCreateConvolutionDescriptor(&conv_desc_);
checkCudnnErrors(cudnnSetConvolution2dDescriptor(conv_desc_,
padding_, padding_, stride_, stride_, dilation_,dilation_,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
}
Conv2D::~Conv2D()
{
// distroy cudnn container resources
cudnnDestroyFilterDescriptor(filter_desc_);
cudnnDestroyConvolutionDescriptor(conv_desc_);
// terminate internal created blobs
if (d_workspace != nullptr) hipFree(d_workspace);
}
void Conv2D::set_workspace()
{
size_t temp_size = 0;
// forward
checkCudnnErrors(cudnnGetConvolutionForwardAlgorithm(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv_fwd_algo_));
checkCudnnErrors(cudnnGetConvolutionForwardWorkspaceSize(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
conv_fwd_algo_, &temp_size));
workspace_size = ::max(workspace_size, temp_size);
// todo trainable check
// bwd - filter
checkCudnnErrors(cudnnGetConvolutionBackwardFilterAlgorithm(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &conv_bwd_filter_algo_));
checkCudnnErrors(cudnnGetConvolutionBackwardFilterWorkspaceSize(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
conv_bwd_filter_algo_, &temp_size));
workspace_size = ::max(workspace_size, temp_size);
// bwd - data
checkCudnnErrors(cudnnGetConvolutionBackwardDataAlgorithm(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &conv_bwd_data_algo_));
checkCudnnErrors(cudnnGetConvolutionBackwardDataWorkspaceSize(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
conv_bwd_data_algo_, &temp_size));
workspace_size = ::max(workspace_size, temp_size);
if (workspace_size > 0)
{
if (d_workspace != nullptr)
checkCudaErrors(hipFree(d_workspace));
checkCudaErrors(hipMalloc((void**)&d_workspace, workspace_size));
}
}
Blob<float> *Conv2D::forward(Blob<float> *input)
{
// initialize weights and bias
if (weights_ == nullptr)
{
// initialize containers handles
checkCudnnErrors(cudnnSetFilter4dDescriptor(filter_desc_,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
out_channels_, input->c(), kernel_size_, kernel_size_));
weights_ = new Blob<float>(out_channels_, input->c(), kernel_size_, kernel_size_);
biases_ = new Blob<float>(1, out_channels_); // bias size
bias_desc_ = biases_->tensor();
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n())
{
// initialize input
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
// initilaize output
checkCudnnErrors(cudnnGetConvolution2dForwardOutputDim(
conv_desc_, input_desc_, filter_desc_,
&output_size_[0], &output_size_[1], &output_size_[2], &output_size_[3]));
if (output_ == nullptr)
output_ = new Blob<float>(output_size_);
else
output_->reset(output_size_);
output_desc_ = output_->tensor();
// initialize workspace for cudnn
set_workspace();
// initialize weights
if (load_pretrain_ && !freeze_)
{
if (load_parameter())
{
std::cout << "error occurred.." << std::endl;
exit(-1);
}
}
else if (!freeze_)
{
init_weight_bias();
}
else
{
/* do nothing */
}
}
checkCudnnErrors(cudnnConvolutionForward(cuda_->cudnn(),
&cuda_->one, input_desc_, input_->cuda(),
filter_desc_, weights_->cuda(), conv_desc_, conv_fwd_algo_, d_workspace, workspace_size,
&cuda_->zero, output_desc_, output_->cuda()));
checkCudnnErrors(cudnnAddTensor(cuda_->cudnn(),
&cuda_->one, bias_desc_, biases_->cuda(),
&cuda_->one, output_desc_, output_->cuda()));
#if (DEBUG_CONV & 0x01)
input_->print( name_ + "::input", true, input_->n(), 28);
weights_->print(name_ + "::weight", true);
biases_->print( name_ + "::bias", true);
output_->print( name_ + "::output", true);
#endif
return output_;
}
Blob<float> *Conv2D::backward(Blob<float> *grad_output)
{
// initialize grad_output back-propagation space
if (grad_input_ == nullptr || batch_size_ != grad_output->n()) {
grad_output_ = grad_output;
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(1, biases_->c());
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
// gradients of biases
checkCudnnErrors(
cudnnConvolutionBackwardBias(cuda_->cudnn(),
&cuda_->one,
output_desc_, grad_output->cuda(),
&cuda_->zero,
bias_desc_, grad_biases_->cuda()));
// gradients of weights
checkCudnnErrors(
cudnnConvolutionBackwardFilter(cuda_->cudnn(),
&cuda_->one,
input_desc_, input_->cuda(),
output_desc_, grad_output_->cuda(),
conv_desc_, conv_bwd_filter_algo_, d_workspace, workspace_size,
&cuda_->zero,
filter_desc_, grad_weights_->cuda()));
// gradients of input data
if (!gradient_stop_)
checkCudnnErrors(
cudnnConvolutionBackwardData(cuda_->cudnn(),
&cuda_->one,
filter_desc_, weights_->cuda(),
output_desc_, grad_output->cuda(),
conv_desc_, conv_bwd_data_algo_, d_workspace, workspace_size,
&cuda_->zero,
input_desc_, grad_input_->cuda()));
#if (DEBUG_CONV & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output->print( name_ + "::gradients", true);
grad_biases_->print(name_ + "gbias", true);
grad_weights_->print(name_+ "gfilter", true);
if (!gradient_stop_)
grad_input_->print(name_+"gdata", true);
#endif
#if (DEBUG_CONV & 0x04)
grad_output->print( name_ + "::gradients", true);
grad_biases_->print( name_ + "::gbias", true);
#endif
return grad_input_;
}
/****************************************************************
* Layer definition *
****************************************************************/
Pooling::Pooling(std::string name,
int kernel_size,
int padding,
int stride,
cudnnPoolingMode_t mode):
kernel_size_(kernel_size),
padding_(padding),
stride_(stride),
mode_(mode)
{
name_ = name;
cudnnCreatePoolingDescriptor(&pool_desc_);
cudnnSetPooling2dDescriptor(pool_desc_, mode_, CUDNN_PROPAGATE_NAN,
kernel_size_, kernel_size_, padding_, padding_, stride_, stride_);
}
Pooling::~Pooling()
{
cudnnDestroyPoolingDescriptor(pool_desc_);
}
Blob<float> *Pooling::forward(Blob<float> *input)
{
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
// resource initialize
input_desc_ = input_->tensor();
batch_size_ = input->n();
// setting output
cudnnGetPooling2dForwardOutputDim(pool_desc_, input_desc_,
&output_size_[0], &output_size_[1], &output_size_[2], &output_size_[3]);
if (output_ == nullptr)
output_ = new Blob<float>(output_size_);
else
output_->reset(output_size_);
output_desc_ = output_->tensor();
}
cudnnPoolingForward(cuda_->cudnn(), pool_desc_,
&cuda_->one, input_desc_, input_->cuda(),
&cuda_->zero, output_desc_, output_->cuda());
return output_;
}
Blob<float> *Pooling::backward(Blob<float> *grad_output)
{
if (grad_input_ == nullptr || batch_size_ != grad_output->n())
{
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
checkCudnnErrors(
cudnnPoolingBackward(cuda_->cudnn(), pool_desc_,
&cuda_->one,
output_desc_, output_->cuda(),
output_desc_, grad_output->cuda(),
input_desc_, input_->cuda(),
&cuda_->zero,
input_desc_, grad_input_->cuda()));
return grad_input_;
}
| a8bd45ba63a449fa2e661914d1f45fa6f298f4a2.cu | #include "layer.h"
#include <random>
#include <cuda_runtime.h>
#include <curand.h>
#include <cassert>
#include <math.h>
#include <algorithm>
#include <sstream>
#include <fstream>
#include <iostream>
using namespace cudl;
/****************************************************************
* Layer definition *
****************************************************************/
Layer::Layer()
{
/* do nothing */
}
Layer::~Layer()
{
#if (DEBUG_FORWARD > 0 || DEBUG_BACKWARD > 0)
std::cout << "Destroy Layer: " << name_ << std::endl;
#endif
if (output_ != nullptr) delete output_;
if (grad_input_ != nullptr) delete grad_input_;
if (weights_ != nullptr) delete weights_;
if (biases_ != nullptr) delete biases_;
if (grad_weights_ != nullptr) delete grad_weights_;
if (grad_biases_ != nullptr) delete grad_biases_;
}
void Layer::init_weight_bias(unsigned int seed)
{
checkCudaErrors(cudaDeviceSynchronize());
if (weights_ == nullptr || biases_ == nullptr)
return;
// Create random network
std::random_device rd;
std::mt19937 gen(seed == 0 ? rd() : static_cast<unsigned int>(seed));
// He uniform distribution
float range = sqrt(6.f / input_->size()); // He's initialization
std::uniform_real_distribution<> dis(-range, range);
for (int i = 0; i < weights_->len(); i++)
weights_->ptr()[i] = static_cast<float>(dis(gen));
for (int i = 0; i < biases_->len(); i++)
biases_->ptr()[i] = 0.f;
// copy initialized value to the device
weights_->to(DeviceType::cuda);
biases_->to(DeviceType::cuda);
std::cout << ".. initialized " << name_ << " layer .." << std::endl;
}
void Layer::update_weights_biases(float learning_rate)
{
float eps = -1.f * learning_rate;
if (weights_ != nullptr && grad_weights_ != nullptr)
{
#if (DEBUG_UPDATE)
weights_->print(name_ + "::weights (before update)", true);
grad_weights_->print(name_ + "::gweights", true);
#endif // DEBUG_UPDATE
// w = w + eps * dw
checkCublasErrors(
cublasSaxpy(cuda_->cublas(),
weights_->len(),
&eps,
grad_weights_->cuda(), 1,
weights_->cuda(), 1));
#if (DEBUG_UPDATE)
weights_->print(name_ + "weights (after update)", true);
// getchar();
#endif // DEBUG_UPDATE
}
if (biases_ != nullptr && grad_biases_ != nullptr)
{
#if (DEBUG_UPDATE)
biases_->print(name_ + "biases (before update)", true);
grad_biases_->print(name_ + "gbiases", true);
#endif // DEBUG_UPDATE
// b = b + eps * db
checkCublasErrors(
cublasSaxpy(cuda_->cublas(),
biases_->len(),
&eps,
grad_biases_->cuda(), 1,
biases_->cuda(), 1));
#if (DEBUG_UPDATE)
biases_->print(name_ + "biases (after update)", true);
// getchar();
#endif // DEBUG_UPDATE
}
}
float Layer::get_loss(Blob<float> *target)
{
assert("No Loss layer has no loss." && false);
return EXIT_FAILURE;
}
int Layer::get_accuracy(Blob<float> *target)
{
assert("No Loss layer cannot estimate accuracy." && false);
return EXIT_FAILURE;
}
int Layer::load_parameter()
{
std::stringstream filename_weights, filename_biases;
// load weights and biases pretrained parameters
filename_weights << name_ << ".bin";
if (weights_->file_read(filename_weights.str()))
return -1;
filename_biases << name_ << ".bias.bin";
if (biases_->file_read(filename_biases.str()))
return -2;
std::cout << ".. loaded " << name_ << " pretrain parameter.." << std::endl;
return 0;
}
int Layer::save_parameter()
{
std::stringstream filename_weights, filename_biases;
std::cout << ".. saving " << name_ << " parameter ..";
// Write weights file
if (weights_)
{
filename_weights << name_ << ".bin";
if (weights_->file_write(filename_weights.str()))
return -1;
}
// Write bias file
if (biases_)
{
filename_biases << name_ << ".bias.bin";
if (biases_->file_write(filename_biases.str()))
return -2;
}
std::cout << " done .." << std::endl;
return 0;
}
/****************************************************************
* Dense Layer *
****************************************************************/
Dense::Dense(std::string name, int output_size)
{
name_ = name;
output_size_ = output_size;
}
Dense::~Dense()
{
if (d_one_vec != nullptr)
cudaFree(d_one_vec);
}
__global__ void init_one_vec(float* d_one_vec, size_t length)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= length) return;
d_one_vec[i] = 1.f;
}
Blob<float> *Dense::forward(Blob<float> *input)
{
// initialize weights and biases
if (weights_ == nullptr)
{
// setup parameter size information
input_size_ = input->c() * input->h() * input->w();
// initialize weight, bias, and output
weights_ = new Blob<float>(1, 1, input_size_, output_size_);
biases_ = new Blob<float>(1, 1, output_size_);
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(batch_size_, output_size_);
else
output_->reset(batch_size_, output_size_);
output_->tensor();
if (d_one_vec != nullptr)
cudaFree(d_one_vec);
checkCudaErrors(cudaMalloc((void**)&d_one_vec, sizeof(float) * batch_size_));
init_one_vec<<< (batch_size_+BLOCK_DIM_1D-1)/BLOCK_DIM_1D, BLOCK_DIM_1D >>>(d_one_vec, batch_size_);
// initialize weights and biases
if (load_pretrain_ && !freeze_)
{
if (load_parameter())
{
std::cout << "error occurred.." << std::endl;
exit(-1);
}
}
else if (!freeze_)
{
init_weight_bias();
}
else
{
/* do nothing */
}
}
// output = weights^T * input (without biases)
checkCublasErrors(
cublasSgemm(cuda_->cublas(),
CUBLAS_OP_T, CUBLAS_OP_N,
output_size_, batch_size_, input_size_,
&cuda_->one,
weights_->cuda(), input_size_,
input_->cuda(), input_size_,
&cuda_->zero,
output_->cuda(), output_size_));
// output += biases * d_one_vec^T
checkCublasErrors(cublasSgemm(cuda_->cublas(),
CUBLAS_OP_N, CUBLAS_OP_N,
output_size_, batch_size_, 1,
&cuda_->one,
biases_->cuda(), output_size_,
d_one_vec, 1,
&cuda_->one,
output_->cuda(), output_size_));
#if (DEBUG_DENSE & 0x01)
input_->print( name_ + "::input", true);
weights_->print(name_ + "::weight", true);
biases_->print( name_ + "::bias", true);
output_->print( name_ + "::output", true);
#endif // DEBUG_DENSE
return output_;
}
Blob<float> *Dense::backward(Blob<float> *grad_output)
{
if (grad_weights_ == nullptr)
{
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(biases_->shape());
}
if (grad_input_ == nullptr || batch_size_ != grad_output->n())
{
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
// db = (dy) * d_one_vec
cublasSgemv(cuda_->cublas(),
CUBLAS_OP_N,
output_size_, batch_size_,
&cuda_->one,
grad_output_->cuda(), output_size_,
d_one_vec, 1,
&cuda_->zero,
grad_biases_->cuda(), 1);
// dw = x * (dy)^T
cublasSgemm(cuda_->cublas(),
CUBLAS_OP_N, CUBLAS_OP_T,
input_size_, output_size_, batch_size_,
&cuda_->one,
input_->cuda(), input_size_,
grad_output_->cuda(), output_size_,
&cuda_->zero,
grad_weights_->cuda(), input_size_);
// dx = W * dy
if (!gradient_stop_)
cublasSgemm(cuda_->cublas(),
CUBLAS_OP_N, CUBLAS_OP_N,
input_size_, batch_size_, output_size_,
&cuda_->one,
weights_->cuda(), input_size_,
grad_output_->cuda(), output_size_,
&cuda_->zero,
grad_input_->cuda(), input_size_);
#if (DEBUG_DENSE & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output->print( name_ + "::gradients", true, grad_output->n());
grad_weights_->print(name_ + "::gfilter", true);
grad_biases_->print( name_ + "::gbias", true);
if (!gradient_stop_)
grad_input_->print( name_ + "::gdata", true);
#endif // DEBUG_DENSE
return grad_input_;
}
/****************************************************************
* Activation Layer *
****************************************************************/
Activation::Activation(std::string name, cudnnActivationMode_t mode, float coef)
{
name_ = name;
mode_ = mode;
coef_ = coef;
cudnnCreateActivationDescriptor(&act_desc_);
cudnnSetActivationDescriptor(act_desc_, mode, CUDNN_PROPAGATE_NAN, coef);
}
Activation::~Activation()
{
cudnnDestroyActivationDescriptor(act_desc_);
}
Blob<float> *Activation::forward(Blob<float> *input)
{
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
}
cudnnActivationForward(cuda_->cudnn(),
act_desc_,
&cuda_->one,
input_desc_,
input->cuda(),
&cuda_->zero,
output_desc_,
output_->cuda());
return output_;
}
Blob<float> *Activation::backward(Blob<float> *grad_output)
{
if (grad_input_ == nullptr || batch_size_ != grad_output->n())
{
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
cudnnActivationBackward(cuda_->cudnn(),
act_desc_,
&cuda_->one,
output_desc_, output_->cuda(),
output_desc_, grad_output->cuda(),
input_desc_, input_->cuda(),
&cuda_->zero,
input_desc_, grad_input_->cuda());
return grad_input_;
}
/****************************************************************
* Softmax definition *
****************************************************************/
Softmax::Softmax(std::string name)
{
name_ = name;
}
Softmax::~Softmax()
{
}
Blob<float> *Softmax::forward(Blob<float> *input)
{
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
}
#if (DEBUG_SOFTMAX & 0x01)
std::cout << name_ << "[FORWARD]" << std::endl;
input_->print(name_ + "::input", true, input->n());
#endif
checkCudnnErrors(
cudnnSoftmaxForward(cuda_->cudnn(), CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&cuda_->one, input_desc_, input->cuda(),
&cuda_->zero, output_desc_, output_->cuda()));
#if (DEBUG_SOFTMAX & 0x01)
output_->print(name_ + "::output", true, input->n());
#endif
return output_;
}
Blob<float> *Softmax::backward(Blob<float> *target)
{
checkCudaErrors(cudaDeviceSynchronize());
if (grad_input_ == nullptr || batch_size_ != target->n())
{
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
// set grad_input_ as predict
checkCudaErrors(cudaMemcpyAsync(grad_input_->cuda(),
output_->cuda(), output_->buf_size(),
cudaMemcpyDeviceToDevice));
// set grad_input_ = predict - target
checkCublasErrors(
cublasSaxpy(cuda_->cublas(), target->len(),
&cuda_->minus_one, target->cuda(), 1,
grad_input_->cuda(), 1));
// normalize the grad_output by the batch size
int grad_output_size = target->n() * target->c() * target->h() * target->w();
float scale = 1.f / static_cast<float>(target->n());
checkCublasErrors(cublasSscal(cuda_->cublas(), grad_output_size, &scale, grad_input_->cuda(), 1));
#if (DEBUG_SOFTMAX & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
input_->print( name_ + "::input", true);
output_->print(name_ + "::predict", true);
target->print( name_ + "::y", true, target->n());
grad_input_->print(name_ + "::dx", true, target->n());
#endif
return grad_input_;
}
float Softmax::get_loss(Blob<float> *target)
{
return loss_.loss(output_, target);
}
int Softmax::get_accuracy(Blob<float> *target)
{
int batch_size = output_->n();
int output_size = output_->size();
assert(batch_size == target->n());
assert(output_size == target->size());
float *h_output, *h_target;
int idx_output, idx_target;
int hit_count = 0;
// get predicts and targets
h_output = output_->to(host);
h_target = target->to(host);
// idx_output = idx_target = 0;
for (int b = 0; b < batch_size; b++)
{
idx_output = 0;
idx_target = 0;
for (int i = 1; i < 10; i++)
{
if (h_output[b * output_size + i] > h_output[b * output_size + idx_output])
idx_output = i;
if (h_target[b * output_size + i] > h_target[b * output_size + idx_target])
idx_target = i;
}
if (idx_output == idx_target)
hit_count++;
}
return hit_count;
}
/****************************************************************
* Layer definition *
****************************************************************/
/**
* Convolutional layer with bias
*/
Conv2D::Conv2D(std::string name,
int out_channels,
int kernel_size,
int stride,
int padding,
int dilation):
out_channels_(out_channels),
kernel_size_(kernel_size),
stride_(stride),
padding_(padding),
dilation_(dilation)
{
name_ = name;
// create cudnn container handles
cudnnCreateFilterDescriptor(&filter_desc_);
cudnnCreateConvolutionDescriptor(&conv_desc_);
checkCudnnErrors(cudnnSetConvolution2dDescriptor(conv_desc_,
padding_, padding_, stride_, stride_, dilation_,dilation_,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
}
Conv2D::~Conv2D()
{
// distroy cudnn container resources
cudnnDestroyFilterDescriptor(filter_desc_);
cudnnDestroyConvolutionDescriptor(conv_desc_);
// terminate internal created blobs
if (d_workspace != nullptr) cudaFree(d_workspace);
}
void Conv2D::set_workspace()
{
size_t temp_size = 0;
// forward
checkCudnnErrors(cudnnGetConvolutionForwardAlgorithm(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv_fwd_algo_));
checkCudnnErrors(cudnnGetConvolutionForwardWorkspaceSize(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
conv_fwd_algo_, &temp_size));
workspace_size = std::max(workspace_size, temp_size);
// todo trainable check
// bwd - filter
checkCudnnErrors(cudnnGetConvolutionBackwardFilterAlgorithm(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &conv_bwd_filter_algo_));
checkCudnnErrors(cudnnGetConvolutionBackwardFilterWorkspaceSize(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
conv_bwd_filter_algo_, &temp_size));
workspace_size = std::max(workspace_size, temp_size);
// bwd - data
checkCudnnErrors(cudnnGetConvolutionBackwardDataAlgorithm(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &conv_bwd_data_algo_));
checkCudnnErrors(cudnnGetConvolutionBackwardDataWorkspaceSize(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
conv_bwd_data_algo_, &temp_size));
workspace_size = std::max(workspace_size, temp_size);
if (workspace_size > 0)
{
if (d_workspace != nullptr)
checkCudaErrors(cudaFree(d_workspace));
checkCudaErrors(cudaMalloc((void**)&d_workspace, workspace_size));
}
}
Blob<float> *Conv2D::forward(Blob<float> *input)
{
// initialize weights and bias
if (weights_ == nullptr)
{
// initialize containers handles
checkCudnnErrors(cudnnSetFilter4dDescriptor(filter_desc_,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
out_channels_, input->c(), kernel_size_, kernel_size_));
weights_ = new Blob<float>(out_channels_, input->c(), kernel_size_, kernel_size_);
biases_ = new Blob<float>(1, out_channels_); // bias size
bias_desc_ = biases_->tensor();
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n())
{
// initialize input
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
// initilaize output
checkCudnnErrors(cudnnGetConvolution2dForwardOutputDim(
conv_desc_, input_desc_, filter_desc_,
&output_size_[0], &output_size_[1], &output_size_[2], &output_size_[3]));
if (output_ == nullptr)
output_ = new Blob<float>(output_size_);
else
output_->reset(output_size_);
output_desc_ = output_->tensor();
// initialize workspace for cudnn
set_workspace();
// initialize weights
if (load_pretrain_ && !freeze_)
{
if (load_parameter())
{
std::cout << "error occurred.." << std::endl;
exit(-1);
}
}
else if (!freeze_)
{
init_weight_bias();
}
else
{
/* do nothing */
}
}
checkCudnnErrors(cudnnConvolutionForward(cuda_->cudnn(),
&cuda_->one, input_desc_, input_->cuda(),
filter_desc_, weights_->cuda(), conv_desc_, conv_fwd_algo_, d_workspace, workspace_size,
&cuda_->zero, output_desc_, output_->cuda()));
checkCudnnErrors(cudnnAddTensor(cuda_->cudnn(),
&cuda_->one, bias_desc_, biases_->cuda(),
&cuda_->one, output_desc_, output_->cuda()));
#if (DEBUG_CONV & 0x01)
input_->print( name_ + "::input", true, input_->n(), 28);
weights_->print(name_ + "::weight", true);
biases_->print( name_ + "::bias", true);
output_->print( name_ + "::output", true);
#endif
return output_;
}
Blob<float> *Conv2D::backward(Blob<float> *grad_output)
{
// initialize grad_output back-propagation space
if (grad_input_ == nullptr || batch_size_ != grad_output->n()) {
grad_output_ = grad_output;
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(1, biases_->c());
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
// gradients of biases
checkCudnnErrors(
cudnnConvolutionBackwardBias(cuda_->cudnn(),
&cuda_->one,
output_desc_, grad_output->cuda(),
&cuda_->zero,
bias_desc_, grad_biases_->cuda()));
// gradients of weights
checkCudnnErrors(
cudnnConvolutionBackwardFilter(cuda_->cudnn(),
&cuda_->one,
input_desc_, input_->cuda(),
output_desc_, grad_output_->cuda(),
conv_desc_, conv_bwd_filter_algo_, d_workspace, workspace_size,
&cuda_->zero,
filter_desc_, grad_weights_->cuda()));
// gradients of input data
if (!gradient_stop_)
checkCudnnErrors(
cudnnConvolutionBackwardData(cuda_->cudnn(),
&cuda_->one,
filter_desc_, weights_->cuda(),
output_desc_, grad_output->cuda(),
conv_desc_, conv_bwd_data_algo_, d_workspace, workspace_size,
&cuda_->zero,
input_desc_, grad_input_->cuda()));
#if (DEBUG_CONV & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output->print( name_ + "::gradients", true);
grad_biases_->print(name_ + "gbias", true);
grad_weights_->print(name_+ "gfilter", true);
if (!gradient_stop_)
grad_input_->print(name_+"gdata", true);
#endif
#if (DEBUG_CONV & 0x04)
grad_output->print( name_ + "::gradients", true);
grad_biases_->print( name_ + "::gbias", true);
#endif
return grad_input_;
}
/****************************************************************
* Layer definition *
****************************************************************/
Pooling::Pooling(std::string name,
int kernel_size,
int padding,
int stride,
cudnnPoolingMode_t mode):
kernel_size_(kernel_size),
padding_(padding),
stride_(stride),
mode_(mode)
{
name_ = name;
cudnnCreatePoolingDescriptor(&pool_desc_);
cudnnSetPooling2dDescriptor(pool_desc_, mode_, CUDNN_PROPAGATE_NAN,
kernel_size_, kernel_size_, padding_, padding_, stride_, stride_);
}
Pooling::~Pooling()
{
cudnnDestroyPoolingDescriptor(pool_desc_);
}
Blob<float> *Pooling::forward(Blob<float> *input)
{
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
// resource initialize
input_desc_ = input_->tensor();
batch_size_ = input->n();
// setting output
cudnnGetPooling2dForwardOutputDim(pool_desc_, input_desc_,
&output_size_[0], &output_size_[1], &output_size_[2], &output_size_[3]);
if (output_ == nullptr)
output_ = new Blob<float>(output_size_);
else
output_->reset(output_size_);
output_desc_ = output_->tensor();
}
cudnnPoolingForward(cuda_->cudnn(), pool_desc_,
&cuda_->one, input_desc_, input_->cuda(),
&cuda_->zero, output_desc_, output_->cuda());
return output_;
}
Blob<float> *Pooling::backward(Blob<float> *grad_output)
{
if (grad_input_ == nullptr || batch_size_ != grad_output->n())
{
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
checkCudnnErrors(
cudnnPoolingBackward(cuda_->cudnn(), pool_desc_,
&cuda_->one,
output_desc_, output_->cuda(),
output_desc_, grad_output->cuda(),
input_desc_, input_->cuda(),
&cuda_->zero,
input_desc_, grad_input_->cuda()));
return grad_input_;
}
|
c718c8c428de606787fc4d8cd94e5fae78094da6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define BLOCK_DIM 16
// Row Size & Column Size
const int N = 2;
const int SIZE = N * N;
__global__ void matrixMult(int *c, int *a, int *b, int n)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int sum = 0;
if (row < n && col < n)
{
for (int i = 0; i < n; ++i)
sum += a[row * n + i] * b[i * n + col];
c[row * n + col] = sum;
}
}
int main()
{
srand(time(NULL));
int a[N][N] = { { 1, 2 },
{ 2, 1 } };
int b[N][N] = { { 1, 2 },
{ 2, 1 } };
int *c;
int *dev_a, *dev_b, *dev_c;
c = (int *)malloc(SIZE * sizeof(int));
hipMalloc((void **)&dev_a, SIZE * sizeof(int));
hipMalloc((void **)&dev_b, SIZE * sizeof(int));
hipMalloc((void **)&dev_c, SIZE * sizeof(int));
hipMemcpy(dev_a, a, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, SIZE * sizeof(int), hipMemcpyHostToDevice);
dim3 dimBlock(N, N);
dim3 dimGrid((int)ceil(N / dimBlock.x), (int)ceil(N / dimBlock.y));
hipLaunchKernelGGL(( matrixMult) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dev_c, dev_a, dev_b, N);
hipMemcpy(c, dev_c, SIZE * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < SIZE; ++i)
printf("c[%d] = %d\n", i, c[i]);
// Free the Host array memory
free(c);
// Free the Device array memory
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| c718c8c428de606787fc4d8cd94e5fae78094da6.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define BLOCK_DIM 16
// Row Size & Column Size
const int N = 2;
const int SIZE = N * N;
__global__ void matrixMult(int *c, int *a, int *b, int n)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int sum = 0;
if (row < n && col < n)
{
for (int i = 0; i < n; ++i)
sum += a[row * n + i] * b[i * n + col];
c[row * n + col] = sum;
}
}
int main()
{
srand(time(NULL));
int a[N][N] = { { 1, 2 },
{ 2, 1 } };
int b[N][N] = { { 1, 2 },
{ 2, 1 } };
int *c;
int *dev_a, *dev_b, *dev_c;
c = (int *)malloc(SIZE * sizeof(int));
cudaMalloc((void **)&dev_a, SIZE * sizeof(int));
cudaMalloc((void **)&dev_b, SIZE * sizeof(int));
cudaMalloc((void **)&dev_c, SIZE * sizeof(int));
cudaMemcpy(dev_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(N, N);
dim3 dimGrid((int)ceil(N / dimBlock.x), (int)ceil(N / dimBlock.y));
matrixMult <<< dimGrid, dimBlock >>>(dev_c, dev_a, dev_b, N);
cudaMemcpy(c, dev_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < SIZE; ++i)
printf("c[%d] = %d\n", i, c[i]);
// Free the Host array memory
free(c);
// Free the Device array memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
c9b6a78ced1f3431d24008082e8cc25d33ef2d66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
template<typename srcT, typename dstT>
__global__
void yuv2rgb_kernel(srcT *src, dstT *dst, int width, int height)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height)
return;
int yIdx = j * width + i;
int uvIdx = (j/2) * (width/2) + i/2;
srcT *Y = src;
srcT *U = Y + width * height;
srcT *V = U + width * height / 4;
dstT *R = dst;
dstT *G = R + width * height;
dstT *B = G + width * height;
R[yIdx] = Y[yIdx] + 1.370705 * (V[uvIdx] - 128.0);
G[yIdx] = Y[yIdx] - 0.698001 * (V[uvIdx] - 128.0) - 0.337633 * (U[uvIdx] - 128.0);
B[yIdx] = Y[yIdx] + 1.732446 * (U[uvIdx] - 128.0);
}
template<typename srcT, typename dstT>
int yuv2rgb(srcT *src, dstT *dst, int width, int height)
{
dim3 blockSize(32, 12);
dim3 nBlocks((width+blockSize.x-1)/blockSize.x,
(height+blockSize.y-1)/blockSize.y);
hipLaunchKernelGGL(( yuv2rgb_kernel), dim3(nBlocks), dim3(blockSize), 0, 0, src, dst, width, height);
hipDeviceSynchronize();
return 0;
}
template
int yuv2rgb(unsigned char *src, float *dst, int width, int height);
| c9b6a78ced1f3431d24008082e8cc25d33ef2d66.cu | #include <stdio.h>
template<typename srcT, typename dstT>
__global__
void yuv2rgb_kernel(srcT *src, dstT *dst, int width, int height)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height)
return;
int yIdx = j * width + i;
int uvIdx = (j/2) * (width/2) + i/2;
srcT *Y = src;
srcT *U = Y + width * height;
srcT *V = U + width * height / 4;
dstT *R = dst;
dstT *G = R + width * height;
dstT *B = G + width * height;
R[yIdx] = Y[yIdx] + 1.370705 * (V[uvIdx] - 128.0);
G[yIdx] = Y[yIdx] - 0.698001 * (V[uvIdx] - 128.0) - 0.337633 * (U[uvIdx] - 128.0);
B[yIdx] = Y[yIdx] + 1.732446 * (U[uvIdx] - 128.0);
}
template<typename srcT, typename dstT>
int yuv2rgb(srcT *src, dstT *dst, int width, int height)
{
dim3 blockSize(32, 12);
dim3 nBlocks((width+blockSize.x-1)/blockSize.x,
(height+blockSize.y-1)/blockSize.y);
yuv2rgb_kernel<<<nBlocks, blockSize>>>(src, dst, width, height);
cudaDeviceSynchronize();
return 0;
}
template
int yuv2rgb(unsigned char *src, float *dst, int width, int height);
|
ac20fe9a9f1cec06b0bb36af60ff71a510333a73.hip | // !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <thrust/scan.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "sceneStructs.h"
#include <cutil_math.h>
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <ctime>
//#define DEPTHOFFIELD
//#define SUPERSAMPLING
float Lensdistance=5;
int NumberOfSampling=5;
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(-1,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
__global__ void calculateRaycastFromCameraKernel(cameraData cam, float time,ray* rayArray){
ray r;
r.origin = cam.position;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * cam.resolution.x);
thrust::default_random_engine rng(hash((int(index)*time)));
thrust::uniform_real_distribution<float> u01(0,3);
thrust::uniform_real_distribution<float> u02(-1,1);
//float noisex =((float)u01(rng))*0.5f;
//float noisey =((float)u01(rng))*0.5f;
float noisex ,noisey;
float dt=1.0f/6.0f;
float dt2=1.0/3.0f;
float russianRoulette = (float)u01(rng);
float random2=(float)u02(rng);
if(russianRoulette<0.33f){
noisex=-dt2+((float)u02(rng))*dt;
noisey=dt2+((float)u02(rng))*dt;;
}else if(russianRoulette<0.67f){
noisex=((float)u02(rng))*dt;
noisey=dt2+((float)u02(rng))*dt;
}else if(russianRoulette<1.0f){
noisex=dt2+((float)u02(rng))*dt;
noisey=dt2+((float)u02(rng))*dt;
}else if(russianRoulette<1.33f){
noisex=-dt2+((float)u02(rng))*dt;
noisey=((float)u02(rng))*dt;;
}else if(russianRoulette<1.67f){
noisex=((float)u02(rng))*dt;
noisey=((float)u02(rng))*dt;
}else if(russianRoulette<2.0f){
noisex=dt2+((float)u02(rng))*dt;
noisey=((float)u02(rng))*dt;
}if(russianRoulette<2.33f){
noisex=-dt2+((float)u02(rng))*dt;
noisey=-dt2+((float)u02(rng))*dt;;
}else if(russianRoulette<2.67f){
noisex=((float)u02(rng))*dt;
noisey=-dt2+((float)u02(rng))*dt;
}else if(russianRoulette<3.0f){
noisex=dt2+((float)u02(rng))*dt;
noisey=-dt2+((float)u02(rng))*dt;
}
if((x<=cam.resolution.x )&&( y<=cam.resolution.y)){
float y1=cam.resolution.y-y;
float x1=cam.resolution.x-x;
glm::vec3 A = glm::cross(cam.view,cam.up); //A= view^up
float ALength=glm::length(A);
glm::vec3 B = glm::cross(A,cam.view); //B <- A * C
float BLength=glm::length(B);
glm::vec3 M = cam.position + cam.view; //M=E+C
float viewLength=glm::length(cam.view);
glm::vec3 H = A*viewLength * (float)tan(cam.fov.x*(PI/180.0f))/ ALength; //H <- (A|C|tan)/|A|
glm::vec3 V = B*viewLength *(float)tan(cam.fov.y*(PI/180.0f)) / BLength; // V <- (B|C|tan)/|B|
//glm::vec3 P=M+(2*((float)x1/(float)(cam.resolution.x-1))-1)*H+(2*((float)y1/(float)(cam.resolution.y-1))-1)*V;
glm::vec3 P=M+(2*(float)(x1+noisex)/(float)(cam.resolution.x-1)-1)*H+(2*(float)(y1+noisey)/(float)(cam.resolution.y-1)-1)*V;
glm::vec3 D=P-cam.position;
r.direction=glm::normalize(D);
rayArray[index]=r;
}
return;
}
__global__ void calculateRaycastFromCameraKernelSuperSampling(cameraData cam, float time,int sampleround,ray* rayArray){
ray r;
r.origin = cam.position;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * cam.resolution.x);
thrust::default_random_engine rng(hash(sampleround*index*time));
thrust::uniform_real_distribution<float> u01(0,1);
thrust::uniform_real_distribution<float> u02(-1,1);
//float noisex =((float)u01(rng))*0.45f;
//float noisey =((float)u01(rng))*0.45f;
float noisex ,noisey;
float russianRoulette = (float)u01(rng);
if(russianRoulette<0.25){
noisex=0.25+((float)u02(rng))*0.125f;
noisey=0.25+((float)u02(rng))*0.125f;;
}else if(russianRoulette<0.5){
noisex=-0.25+((float)u02(rng))*0.125f;
noisey=0.25+((float)u02(rng))*0.125f;
}else if(russianRoulette<0.75){
noisex=0.25+((float)u02(rng))*0.125f;
noisey=-0.25+((float)u02(rng))*0.125f;
}else{
noisex=-0.25+((float)u02(rng))*0.125f;
noisey=-0.25+((float)u02(rng))*0.125f;
}
if((x<=cam.resolution.x )&&( y<=cam.resolution.y)){
float y1=cam.resolution.y-y;
float x1=cam.resolution.x-x;
glm::vec3 A = glm::cross(cam.view,cam.up); //A= view^up
float ALength=glm::length(A);
glm::vec3 B = glm::cross(A,cam.view); //B <- A * C
float BLength=glm::length(B);
glm::vec3 M = cam.position + cam.view; //M=E+C
float viewLength=glm::length(cam.view);
glm::vec3 H = A*viewLength * (float)tan(cam.fov.x*(PI/180.0f))/ ALength; //H <- (A|C|tan)/|A|
glm::vec3 V = B*viewLength *(float)tan(cam.fov.y*(PI/180.0f)) / BLength; // V <- (B|C|tan)/|B|
//glm::vec3 P=M+(2*((float)x1/(float)(cam.resolution.x-1))-1)*H+(2*((float)y1/(float)(cam.resolution.y-1))-1)*V;
glm::vec3 P=M+(2*(float)(x1+noisex)/(float)(cam.resolution.x-1)-1)*H+(2*(float)(y1+noisey)/(float)(cam.resolution.y-1)-1)*V;
glm::vec3 D=P-cam.position;
r.direction=glm::normalize(D);
rayArray[index]=r;
}
return;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
__global__ void raytracefromCameraKernel(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms,material* materials,ray* cudaFirstRays,rayData* rayList){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if((x>=resolution.x )||( y>=resolution.y))return;
if(index>=(resolution.x*resolution.y))return;
ray r=cudaFirstRays[index];
glm::vec3 finalColor=glm::vec3(0.0f,0.0f,0.0f);
//find first intersection
float distance=-1.0f;
glm::vec3 interestPoint=glm::vec3(0,0,0);
glm::vec3 normal=glm::vec3(0,0,0);
int geoID=-1;
for(int i=0; i<numberOfGeoms; i++){
float tempdistance=-1.0;
glm::vec3 tempInterestPoint=glm::vec3(0,0,0);
glm::vec3 tempNormal=glm::vec3(0,0,0);
if(geoms[i].type==SPHERE){
tempdistance=sphereIntersectionTest(geoms[i],r,tempInterestPoint,tempNormal);
}else if(geoms[i].type==CUBE){
tempdistance=boxIntersectionTest(geoms[i],r,tempInterestPoint,tempNormal);
}
if((abs(distance+1.0f)<1e-3)&&(tempdistance>0.001f)){
distance=tempdistance;
normal=tempNormal;
interestPoint=tempInterestPoint;
geoID=i;
}else if((tempdistance>0.001f)&&(tempdistance<distance)){
distance=tempdistance;
normal=tempNormal;
interestPoint=tempInterestPoint;
geoID=i;
}
}
rayData nextray;
//can not find intersection ,ray ends
if(geoID==-1){
finalColor=glm::vec3(0,0,0);
nextray.dirty=0;
}else {
material m=materials[geoms[geoID].materialid];
if(m.emittance>0){ ///light source
finalColor=m.emittance*m.color;
nextray.dirty=0;
}else{
glm::vec3 emittedColor=glm::vec3(0.0f,0.0f,0.0f);
glm::vec3 unabsorbedColor=glm::vec3(0.0f,0.0f,0.0f);
AbsorptionAndScatteringProperties currentAbsorptionAndScattering;
calculateBSDF(cam.position,r,interestPoint,normal,emittedColor, currentAbsorptionAndScattering,
finalColor, unabsorbedColor, m, (float)index*time, nextray);
nextray.dirty=1;
}
}
colors[index] =finalColor;
nextray.x=x;
nextray.y=y;
rayList[index]=nextray;
}
__global__ void iterationRaytrace(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms,material* materials,rayData* rayList,int maxnum,int WIDTH,int currentDepth){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int threadIndex=x+WIDTH*y;
if((x>=WIDTH )||( y>=WIDTH))return;
if(threadIndex>=maxnum) return;
rayData rd=rayList[threadIndex];
int colorIndex = rd.x + (rd.y * resolution.x);
ray r=rd.newray;
glm::vec3 finalColor=glm::vec3(0.0f,0.0f,0.0f);
//find first intersection
float distance=-1.0f;
glm::vec3 interestPoint=glm::vec3(0,0,0);
glm::vec3 normal=glm::vec3(0,0,0);
int geoID=-1;
for(int i=0; i<numberOfGeoms; i++){
float tempdistance=-1.0;
glm::vec3 tempInterestPoint=glm::vec3(0,0,0);
glm::vec3 tempNormal=glm::vec3(0,0,0);
if(geoms[i].type==SPHERE){
tempdistance=sphereIntersectionTest(geoms[i],r,tempInterestPoint,tempNormal);
}else if(geoms[i].type==CUBE){
tempdistance=boxIntersectionTest(geoms[i],r,tempInterestPoint,tempNormal);
}
if((abs(distance+1.0f)<1e-3)&&(tempdistance>0.001f)){
distance=tempdistance;
normal=tempNormal;
interestPoint=tempInterestPoint;
geoID=i;
}else if((tempdistance>0.001f)&&(tempdistance<distance)){
distance=tempdistance;
normal=tempNormal;
interestPoint=tempInterestPoint;
geoID=i;
}
}
rayData nextray;
//can not find intersection ,ray ends
if(geoID==-1){
finalColor=glm::vec3(0,0,0);
nextray.dirty=0;
}else {
material m=materials[geoms[geoID].materialid];
if(m.emittance>0){ ///light source
finalColor=m.emittance*m.color;
nextray.dirty=0;
}else{
glm::vec3 emittedColor=glm::vec3(0.0f,0.0f,0.0f);
glm::vec3 unabsorbedColor=glm::vec3(0.0f,0.0f,0.0f);
AbsorptionAndScatteringProperties currentAbsorptionAndScattering;
calculateBSDF(cam.position,r,interestPoint,normal,emittedColor, currentAbsorptionAndScattering,
finalColor, unabsorbedColor, m, (float)threadIndex*time*currentDepth, nextray);
nextray.dirty=1;
}
}
glm::vec3 precolor=colors[colorIndex];
colors[colorIndex] = glm::vec3(precolor.x*finalColor.x,precolor.y*finalColor.y,precolor.z*finalColor.z);
nextray.x=rd.x;
nextray.y=rd.y;
rayList[threadIndex]=nextray;
}
__global__ void mergeImage(glm::vec2 resolution,glm::vec3* previousColors,glm::vec3* currentColors,float time){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if((x<=resolution.x) && (y<=resolution.y)){
glm::vec3 currentColor=currentColors[index];
glm::vec3 previousColor=previousColors[index];
currentColors[index]=currentColor/time+previousColor*(time-1.0f)/time;
}
}
__global__ void scanRound( glm::vec2 dim,int* previewArray,int* nextarray, int boundary,int flag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * dim.x);
if((x>=dim.x)||(y>=dim.y))return;
if(index<boundary){
if(index>=flag)
nextarray[index]=previewArray[index]+previewArray[index-flag];
else
nextarray[index]=previewArray[index];
}
}
__global__ void copyBack( glm::vec2 dim,int* previewArray,int* nextarray, int boundary){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * dim.x);
if((x>=dim.x)||(y>=dim.y))return;
if(index<boundary){
previewArray[index]=nextarray[index];
}
}
__global__ void getValue(glm::vec2 dim, rayData* raydatalist,int* indexlist,int boundary){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * dim.x);
if((x>=dim.x)||(y>=dim.y))return;
if(index<boundary) {
indexlist[index]=raydatalist[index].dirty;
}
}
__global__ void inclusiveTOexclusive(glm::vec2 dim, rayData* raydatalist,int* indexlist,int boundary){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * dim.x);
if((x>=dim.x)||(y>=dim.y))return;
if(index<boundary) {
indexlist[index]-=raydatalist[index].dirty;
}
}
void scan(dim3 DimBlock, dim3 DimThread, glm::vec2 dim,rayData* rayList,int* indexArray,int* tempArray, int boundary){
hipLaunchKernelGGL(( getValue), dim3(DimBlock),dim3(DimThread), 0, 0, dim,rayList,indexArray,boundary);
checkCUDAError("Kernel failed! scan-1");
for(int d=1;d<=ceil((float)log2((float)boundary));d++){
int flag=(int)pow(2.0f,d-1);
//std::cout<<"d="<<d<<std::endl;
//std::cout<<"flag="<<flag<<std::endl;
hipLaunchKernelGGL(( scanRound), dim3(DimBlock),dim3(DimThread), 0, 0, dim,indexArray,tempArray,boundary,flag);
checkCUDAError("Kernel failed! scan-2-1");
hipLaunchKernelGGL(( copyBack), dim3(DimBlock),dim3(DimThread), 0, 0, dim,indexArray,tempArray,boundary);
// hipMemcpy( indexArray, tempArray, (boundary)*sizeof(int), hipMemcpyDeviceToDevice);
checkCUDAError("Kernel failed! scan-2-2");
}
hipLaunchKernelGGL(( inclusiveTOexclusive), dim3(DimBlock),dim3(DimThread), 0, 0, dim,rayList,indexArray,boundary);
checkCUDAError("Kernel failed! scan-3");
}
__global__ void stringcompaction(glm::vec2 dim,rayData* rayList,rayData *newdataArray,int* indexArray,int maxBoundary){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * dim.x);
if((x>=dim.x)||(y>=dim.y))return;
if(index<maxBoundary){
if(rayList[index].dirty){
newdataArray[indexArray[index]]=rayList[index];
}
}
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms, ray* firstRays){
int traceDepth =100; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
//send image to GPU
glm::vec3* cudaimage = NULL;
hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice);
//package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice);
//--------------------------------
//package materials
material* materialList=new material[numberOfMaterials];
for(int i=0;i<numberOfMaterials;i++){
material newmaterial;
newmaterial.color=materials[i].color;
newmaterial.specularExponent=materials[i].specularExponent;
newmaterial.specularColor=materials[i].specularColor;
newmaterial.hasReflective=materials[i].hasReflective;
newmaterial.hasRefractive=materials[i].hasRefractive;
newmaterial.indexOfRefraction=materials[i].indexOfRefraction;
newmaterial.hasScatter=materials[i].hasScatter;
newmaterial.absorptionCoefficient=materials[i].absorptionCoefficient;
newmaterial.reducedScatterCoefficient=materials[i].reducedScatterCoefficient;
newmaterial.emittance=materials[i].emittance;
materialList[i]=newmaterial;
}
material* cudamatrials=NULL;
hipMalloc((void**)&cudamatrials, numberOfMaterials*sizeof(material));
hipMemcpy( cudamatrials, materialList, numberOfMaterials*sizeof(material), hipMemcpyHostToDevice);
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
//first Rays cudaMemory Pointer
ray* cudaFirstRays = NULL;
hipMalloc((void**)&cudaFirstRays, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(ray));
//saved first ray Color cudaMemory Pointer
rayData* cudaRayList = NULL;
hipMalloc((void**)&cudaRayList, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(rayData));
//scan result cudaMemory Pointer
int* scanResultRayList = NULL;
hipMalloc((void**)&scanResultRayList, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(int));
//for string compaction
int *tempscanResultRayList=NULL;
hipMalloc((void**)&tempscanResultRayList, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(int));
rayData* newRayDataList=NULL;
hipMalloc((void**)&newRayDataList, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(rayData));
#ifdef SUPERSAMPLING
#ifdef DEPTHOFFIELD
thrust::default_random_engine rng(hash((float)iterations));
thrust::uniform_real_distribution<float> u01(-1,1);
float xDist=(float)u01(rng);
float yDist=(float)u01(rng);
float length=abs(glm::dot(glm::vec3(glm::vec3(0,0,0)-cam.position),cam.view));
glm::vec3 focalPos=cam.position+cam.view*length;
cam.position+=100.0f*glm::vec3(xDist*Lensdistance*1/cam.resolution.x,yDist*Lensdistance*1/cam.resolution.y,0.0f);
cam.view=glm::normalize(focalPos-cam.position);
#endif
glm::vec3* cudaimageSuperSamping = NULL;
hipMalloc((void**)&cudaimageSuperSamping, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
int SampleCount=1;
while(SampleCount<=NumberOfSampling){
#endif
// save the first Ray Direction
#ifndef SUPERSAMPLING
hipLaunchKernelGGL(( calculateRaycastFromCameraKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cam,(float)iterations,cudaFirstRays);
//hipMemcpy(firstRays, cudaFirstRays, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(ray), hipMemcpyDeviceToHost);
#else
hipLaunchKernelGGL(( calculateRaycastFromCameraKernelSuperSampling), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cam,(float)iterations,SampleCount,cudaFirstRays);
#endif
//calculate first ray color
hipLaunchKernelGGL(( raytracefromCameraKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms,cudamatrials,cudaFirstRays,cudaRayList);
checkCUDAError("Kernel failed! 2");
//scan
int maxBoundary=(int)renderCam->resolution.x*(int)renderCam->resolution.y;
scan(fullBlocksPerGrid, threadsPerBlock,renderCam->resolution,cudaRayList,scanResultRayList,tempscanResultRayList,maxBoundary);
checkCUDAError("Kernel failed! 3");
int flag=-1;
hipMemcpy(&flag,&(cudaRayList[maxBoundary-1].dirty),sizeof(int),hipMemcpyDeviceToHost);
// rays for string compaction
hipLaunchKernelGGL(( stringcompaction), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution,cudaRayList,newRayDataList,scanResultRayList,maxBoundary);
hipMemcpy(cudaRayList,newRayDataList,(int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(rayData), hipMemcpyDeviceToDevice);
//get number of rays in raylist
int numberOfRays=0;
hipMemcpy(&numberOfRays,&(scanResultRayList[maxBoundary-1]),sizeof(int),hipMemcpyDeviceToHost);
if(flag==1)
numberOfRays++;
checkCUDAError("Kernel failed! list-newlist");
//iteration kernel launches
int currDepth=1;
while((numberOfRays>0)&&(currDepth<=traceDepth)){
//std::cout<<"depth="<<currDepth<<std::endl;
int length=ceil(sqrt((float)(numberOfRays)));
dim3 newthreadsPerBlock(tileSize, tileSize);
dim3 newfullBlocksPerGrid((int)ceil(float(length)/float(tileSize)), (int)ceil(float(length)/float(tileSize)));
glm::vec2 dim=glm::vec2(length,length);
hipLaunchKernelGGL(( iterationRaytrace), dim3(newfullBlocksPerGrid),dim3(newthreadsPerBlock), 0, 0, renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms,cudamatrials,cudaRayList,numberOfRays,length,currDepth);
checkCUDAError("Kernel failed! 4-0");
//get flag
hipMemcpy(&flag,&(cudaRayList[numberOfRays-1].dirty),sizeof(int),hipMemcpyDeviceToHost);
checkCUDAError("Kernel failed! 4-1");
scan(newfullBlocksPerGrid,newthreadsPerBlock,dim,cudaRayList,scanResultRayList,tempscanResultRayList,numberOfRays);
checkCUDAError("Kernel failed! 4-2");
//string compaction
hipLaunchKernelGGL(( stringcompaction), dim3(newfullBlocksPerGrid),dim3(newthreadsPerBlock), 0, 0, dim,cudaRayList,newRayDataList,scanResultRayList,numberOfRays);
hipMemcpy(cudaRayList,newRayDataList,(int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(rayData), hipMemcpyDeviceToDevice);
//get number of rays
hipMemcpy(&numberOfRays,&scanResultRayList[numberOfRays-1],sizeof(int),hipMemcpyDeviceToHost);
if(flag==1)
numberOfRays++;
checkCUDAError("Kernel failed! 4-3");
//std::cout<<"number of rays"<<numberOfRays<<std::endl;
currDepth++;
}
#ifdef SUPERSAMPLING
if(SampleCount!=1)
hipLaunchKernelGGL(( mergeImage), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution,cudaimageSuperSamping,cudaimage,(float)SampleCount);
checkCUDAError("Kernel failed!supersamping-1");
hipMemcpy( cudaimageSuperSamping, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToDevice);
SampleCount++;
}
#endif
//combine several iteration together
// previous image cudaMemory Pointer
glm::vec3* previousImage = NULL;
hipMalloc((void**)&previousImage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
hipMemcpy( previousImage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice);
checkCUDAError("Kernel failed! 6");
hipLaunchKernelGGL(( mergeImage), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution,previousImage,cudaimage,(float)iterations),
checkCUDAError("Kernel failed! 7");
//retrieve image from GPU
hipMemcpy(renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaimage);
//free up stuff, or else we'll leak memory like a madman
hipFree( cudaimage );
hipFree(cudaFirstRays);
hipFree(cudaRayList);
hipFree(scanResultRayList);
hipFree( cudageoms );
hipFree(cudamatrials);
hipFree(previousImage);
hipFree(tempscanResultRayList);
hipFree(newRayDataList);
#ifdef SUPERSAMPLING
hipFree(cudaimageSuperSamping);
#endif
delete geomList;
delete materialList;
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("Kernel failed!");
}
| ac20fe9a9f1cec06b0bb36af60ff71a510333a73.cu | // CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <thrust/scan.h>
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "sceneStructs.h"
#include <cutil_math.h>
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <ctime>
//#define DEPTHOFFIELD
//#define SUPERSAMPLING
float Lensdistance=5;
int NumberOfSampling=5;
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(-1,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
__global__ void calculateRaycastFromCameraKernel(cameraData cam, float time,ray* rayArray){
ray r;
r.origin = cam.position;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * cam.resolution.x);
thrust::default_random_engine rng(hash((int(index)*time)));
thrust::uniform_real_distribution<float> u01(0,3);
thrust::uniform_real_distribution<float> u02(-1,1);
//float noisex =((float)u01(rng))*0.5f;
//float noisey =((float)u01(rng))*0.5f;
float noisex ,noisey;
float dt=1.0f/6.0f;
float dt2=1.0/3.0f;
float russianRoulette = (float)u01(rng);
float random2=(float)u02(rng);
if(russianRoulette<0.33f){
noisex=-dt2+((float)u02(rng))*dt;
noisey=dt2+((float)u02(rng))*dt;;
}else if(russianRoulette<0.67f){
noisex=((float)u02(rng))*dt;
noisey=dt2+((float)u02(rng))*dt;
}else if(russianRoulette<1.0f){
noisex=dt2+((float)u02(rng))*dt;
noisey=dt2+((float)u02(rng))*dt;
}else if(russianRoulette<1.33f){
noisex=-dt2+((float)u02(rng))*dt;
noisey=((float)u02(rng))*dt;;
}else if(russianRoulette<1.67f){
noisex=((float)u02(rng))*dt;
noisey=((float)u02(rng))*dt;
}else if(russianRoulette<2.0f){
noisex=dt2+((float)u02(rng))*dt;
noisey=((float)u02(rng))*dt;
}if(russianRoulette<2.33f){
noisex=-dt2+((float)u02(rng))*dt;
noisey=-dt2+((float)u02(rng))*dt;;
}else if(russianRoulette<2.67f){
noisex=((float)u02(rng))*dt;
noisey=-dt2+((float)u02(rng))*dt;
}else if(russianRoulette<3.0f){
noisex=dt2+((float)u02(rng))*dt;
noisey=-dt2+((float)u02(rng))*dt;
}
if((x<=cam.resolution.x )&&( y<=cam.resolution.y)){
float y1=cam.resolution.y-y;
float x1=cam.resolution.x-x;
glm::vec3 A = glm::cross(cam.view,cam.up); //A= view^up
float ALength=glm::length(A);
glm::vec3 B = glm::cross(A,cam.view); //B <- A * C
float BLength=glm::length(B);
glm::vec3 M = cam.position + cam.view; //M=E+C
float viewLength=glm::length(cam.view);
glm::vec3 H = A*viewLength * (float)tan(cam.fov.x*(PI/180.0f))/ ALength; //H <- (A|C|tan)/|A|
glm::vec3 V = B*viewLength *(float)tan(cam.fov.y*(PI/180.0f)) / BLength; // V <- (B|C|tan)/|B|
//glm::vec3 P=M+(2*((float)x1/(float)(cam.resolution.x-1))-1)*H+(2*((float)y1/(float)(cam.resolution.y-1))-1)*V;
glm::vec3 P=M+(2*(float)(x1+noisex)/(float)(cam.resolution.x-1)-1)*H+(2*(float)(y1+noisey)/(float)(cam.resolution.y-1)-1)*V;
glm::vec3 D=P-cam.position;
r.direction=glm::normalize(D);
rayArray[index]=r;
}
return;
}
__global__ void calculateRaycastFromCameraKernelSuperSampling(cameraData cam, float time,int sampleround,ray* rayArray){
ray r;
r.origin = cam.position;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * cam.resolution.x);
thrust::default_random_engine rng(hash(sampleround*index*time));
thrust::uniform_real_distribution<float> u01(0,1);
thrust::uniform_real_distribution<float> u02(-1,1);
//float noisex =((float)u01(rng))*0.45f;
//float noisey =((float)u01(rng))*0.45f;
float noisex ,noisey;
float russianRoulette = (float)u01(rng);
if(russianRoulette<0.25){
noisex=0.25+((float)u02(rng))*0.125f;
noisey=0.25+((float)u02(rng))*0.125f;;
}else if(russianRoulette<0.5){
noisex=-0.25+((float)u02(rng))*0.125f;
noisey=0.25+((float)u02(rng))*0.125f;
}else if(russianRoulette<0.75){
noisex=0.25+((float)u02(rng))*0.125f;
noisey=-0.25+((float)u02(rng))*0.125f;
}else{
noisex=-0.25+((float)u02(rng))*0.125f;
noisey=-0.25+((float)u02(rng))*0.125f;
}
if((x<=cam.resolution.x )&&( y<=cam.resolution.y)){
float y1=cam.resolution.y-y;
float x1=cam.resolution.x-x;
glm::vec3 A = glm::cross(cam.view,cam.up); //A= view^up
float ALength=glm::length(A);
glm::vec3 B = glm::cross(A,cam.view); //B <- A * C
float BLength=glm::length(B);
glm::vec3 M = cam.position + cam.view; //M=E+C
float viewLength=glm::length(cam.view);
glm::vec3 H = A*viewLength * (float)tan(cam.fov.x*(PI/180.0f))/ ALength; //H <- (A|C|tan)/|A|
glm::vec3 V = B*viewLength *(float)tan(cam.fov.y*(PI/180.0f)) / BLength; // V <- (B|C|tan)/|B|
//glm::vec3 P=M+(2*((float)x1/(float)(cam.resolution.x-1))-1)*H+(2*((float)y1/(float)(cam.resolution.y-1))-1)*V;
glm::vec3 P=M+(2*(float)(x1+noisex)/(float)(cam.resolution.x-1)-1)*H+(2*(float)(y1+noisey)/(float)(cam.resolution.y-1)-1)*V;
glm::vec3 D=P-cam.position;
r.direction=glm::normalize(D);
rayArray[index]=r;
}
return;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
__global__ void raytracefromCameraKernel(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms,material* materials,ray* cudaFirstRays,rayData* rayList){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if((x>=resolution.x )||( y>=resolution.y))return;
if(index>=(resolution.x*resolution.y))return;
ray r=cudaFirstRays[index];
glm::vec3 finalColor=glm::vec3(0.0f,0.0f,0.0f);
//find first intersection
float distance=-1.0f;
glm::vec3 interestPoint=glm::vec3(0,0,0);
glm::vec3 normal=glm::vec3(0,0,0);
int geoID=-1;
for(int i=0; i<numberOfGeoms; i++){
float tempdistance=-1.0;
glm::vec3 tempInterestPoint=glm::vec3(0,0,0);
glm::vec3 tempNormal=glm::vec3(0,0,0);
if(geoms[i].type==SPHERE){
tempdistance=sphereIntersectionTest(geoms[i],r,tempInterestPoint,tempNormal);
}else if(geoms[i].type==CUBE){
tempdistance=boxIntersectionTest(geoms[i],r,tempInterestPoint,tempNormal);
}
if((abs(distance+1.0f)<1e-3)&&(tempdistance>0.001f)){
distance=tempdistance;
normal=tempNormal;
interestPoint=tempInterestPoint;
geoID=i;
}else if((tempdistance>0.001f)&&(tempdistance<distance)){
distance=tempdistance;
normal=tempNormal;
interestPoint=tempInterestPoint;
geoID=i;
}
}
rayData nextray;
//can not find intersection ,ray ends
if(geoID==-1){
finalColor=glm::vec3(0,0,0);
nextray.dirty=0;
}else {
material m=materials[geoms[geoID].materialid];
if(m.emittance>0){ ///light source
finalColor=m.emittance*m.color;
nextray.dirty=0;
}else{
glm::vec3 emittedColor=glm::vec3(0.0f,0.0f,0.0f);
glm::vec3 unabsorbedColor=glm::vec3(0.0f,0.0f,0.0f);
AbsorptionAndScatteringProperties currentAbsorptionAndScattering;
calculateBSDF(cam.position,r,interestPoint,normal,emittedColor, currentAbsorptionAndScattering,
finalColor, unabsorbedColor, m, (float)index*time, nextray);
nextray.dirty=1;
}
}
colors[index] =finalColor;
nextray.x=x;
nextray.y=y;
rayList[index]=nextray;
}
__global__ void iterationRaytrace(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms,material* materials,rayData* rayList,int maxnum,int WIDTH,int currentDepth){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int threadIndex=x+WIDTH*y;
if((x>=WIDTH )||( y>=WIDTH))return;
if(threadIndex>=maxnum) return;
rayData rd=rayList[threadIndex];
int colorIndex = rd.x + (rd.y * resolution.x);
ray r=rd.newray;
glm::vec3 finalColor=glm::vec3(0.0f,0.0f,0.0f);
//find first intersection
float distance=-1.0f;
glm::vec3 interestPoint=glm::vec3(0,0,0);
glm::vec3 normal=glm::vec3(0,0,0);
int geoID=-1;
for(int i=0; i<numberOfGeoms; i++){
float tempdistance=-1.0;
glm::vec3 tempInterestPoint=glm::vec3(0,0,0);
glm::vec3 tempNormal=glm::vec3(0,0,0);
if(geoms[i].type==SPHERE){
tempdistance=sphereIntersectionTest(geoms[i],r,tempInterestPoint,tempNormal);
}else if(geoms[i].type==CUBE){
tempdistance=boxIntersectionTest(geoms[i],r,tempInterestPoint,tempNormal);
}
if((abs(distance+1.0f)<1e-3)&&(tempdistance>0.001f)){
distance=tempdistance;
normal=tempNormal;
interestPoint=tempInterestPoint;
geoID=i;
}else if((tempdistance>0.001f)&&(tempdistance<distance)){
distance=tempdistance;
normal=tempNormal;
interestPoint=tempInterestPoint;
geoID=i;
}
}
rayData nextray;
//can not find intersection ,ray ends
if(geoID==-1){
finalColor=glm::vec3(0,0,0);
nextray.dirty=0;
}else {
material m=materials[geoms[geoID].materialid];
if(m.emittance>0){ ///light source
finalColor=m.emittance*m.color;
nextray.dirty=0;
}else{
glm::vec3 emittedColor=glm::vec3(0.0f,0.0f,0.0f);
glm::vec3 unabsorbedColor=glm::vec3(0.0f,0.0f,0.0f);
AbsorptionAndScatteringProperties currentAbsorptionAndScattering;
calculateBSDF(cam.position,r,interestPoint,normal,emittedColor, currentAbsorptionAndScattering,
finalColor, unabsorbedColor, m, (float)threadIndex*time*currentDepth, nextray);
nextray.dirty=1;
}
}
glm::vec3 precolor=colors[colorIndex];
colors[colorIndex] = glm::vec3(precolor.x*finalColor.x,precolor.y*finalColor.y,precolor.z*finalColor.z);
nextray.x=rd.x;
nextray.y=rd.y;
rayList[threadIndex]=nextray;
}
__global__ void mergeImage(glm::vec2 resolution,glm::vec3* previousColors,glm::vec3* currentColors,float time){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if((x<=resolution.x) && (y<=resolution.y)){
glm::vec3 currentColor=currentColors[index];
glm::vec3 previousColor=previousColors[index];
currentColors[index]=currentColor/time+previousColor*(time-1.0f)/time;
}
}
__global__ void scanRound( glm::vec2 dim,int* previewArray,int* nextarray, int boundary,int flag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * dim.x);
if((x>=dim.x)||(y>=dim.y))return;
if(index<boundary){
if(index>=flag)
nextarray[index]=previewArray[index]+previewArray[index-flag];
else
nextarray[index]=previewArray[index];
}
}
__global__ void copyBack( glm::vec2 dim,int* previewArray,int* nextarray, int boundary){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * dim.x);
if((x>=dim.x)||(y>=dim.y))return;
if(index<boundary){
previewArray[index]=nextarray[index];
}
}
__global__ void getValue(glm::vec2 dim, rayData* raydatalist,int* indexlist,int boundary){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * dim.x);
if((x>=dim.x)||(y>=dim.y))return;
if(index<boundary) {
indexlist[index]=raydatalist[index].dirty;
}
}
__global__ void inclusiveTOexclusive(glm::vec2 dim, rayData* raydatalist,int* indexlist,int boundary){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * dim.x);
if((x>=dim.x)||(y>=dim.y))return;
if(index<boundary) {
indexlist[index]-=raydatalist[index].dirty;
}
}
void scan(dim3 DimBlock, dim3 DimThread, glm::vec2 dim,rayData* rayList,int* indexArray,int* tempArray, int boundary){
getValue<<<DimBlock,DimThread>>>(dim,rayList,indexArray,boundary);
checkCUDAError("Kernel failed! scan-1");
for(int d=1;d<=ceil((float)log2((float)boundary));d++){
int flag=(int)pow(2.0f,d-1);
//std::cout<<"d="<<d<<std::endl;
//std::cout<<"flag="<<flag<<std::endl;
scanRound<<<DimBlock,DimThread>>>(dim,indexArray,tempArray,boundary,flag);
checkCUDAError("Kernel failed! scan-2-1");
copyBack<<<DimBlock,DimThread>>>(dim,indexArray,tempArray,boundary);
// cudaMemcpy( indexArray, tempArray, (boundary)*sizeof(int), cudaMemcpyDeviceToDevice);
checkCUDAError("Kernel failed! scan-2-2");
}
inclusiveTOexclusive<<<DimBlock,DimThread>>>(dim,rayList,indexArray,boundary);
checkCUDAError("Kernel failed! scan-3");
}
__global__ void stringcompaction(glm::vec2 dim,rayData* rayList,rayData *newdataArray,int* indexArray,int maxBoundary){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * dim.x);
if((x>=dim.x)||(y>=dim.y))return;
if(index<maxBoundary){
if(rayList[index].dirty){
newdataArray[indexArray[index]]=rayList[index];
}
}
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms, ray* firstRays){
int traceDepth =100; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
//send image to GPU
glm::vec3* cudaimage = NULL;
cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice);
//package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice);
//--------------------------------
//package materials
material* materialList=new material[numberOfMaterials];
for(int i=0;i<numberOfMaterials;i++){
material newmaterial;
newmaterial.color=materials[i].color;
newmaterial.specularExponent=materials[i].specularExponent;
newmaterial.specularColor=materials[i].specularColor;
newmaterial.hasReflective=materials[i].hasReflective;
newmaterial.hasRefractive=materials[i].hasRefractive;
newmaterial.indexOfRefraction=materials[i].indexOfRefraction;
newmaterial.hasScatter=materials[i].hasScatter;
newmaterial.absorptionCoefficient=materials[i].absorptionCoefficient;
newmaterial.reducedScatterCoefficient=materials[i].reducedScatterCoefficient;
newmaterial.emittance=materials[i].emittance;
materialList[i]=newmaterial;
}
material* cudamatrials=NULL;
cudaMalloc((void**)&cudamatrials, numberOfMaterials*sizeof(material));
cudaMemcpy( cudamatrials, materialList, numberOfMaterials*sizeof(material), cudaMemcpyHostToDevice);
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
//first Rays cudaMemory Pointer
ray* cudaFirstRays = NULL;
cudaMalloc((void**)&cudaFirstRays, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(ray));
//saved first ray Color cudaMemory Pointer
rayData* cudaRayList = NULL;
cudaMalloc((void**)&cudaRayList, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(rayData));
//scan result cudaMemory Pointer
int* scanResultRayList = NULL;
cudaMalloc((void**)&scanResultRayList, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(int));
//for string compaction
int *tempscanResultRayList=NULL;
cudaMalloc((void**)&tempscanResultRayList, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(int));
rayData* newRayDataList=NULL;
cudaMalloc((void**)&newRayDataList, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(rayData));
#ifdef SUPERSAMPLING
#ifdef DEPTHOFFIELD
thrust::default_random_engine rng(hash((float)iterations));
thrust::uniform_real_distribution<float> u01(-1,1);
float xDist=(float)u01(rng);
float yDist=(float)u01(rng);
float length=abs(glm::dot(glm::vec3(glm::vec3(0,0,0)-cam.position),cam.view));
glm::vec3 focalPos=cam.position+cam.view*length;
cam.position+=100.0f*glm::vec3(xDist*Lensdistance*1/cam.resolution.x,yDist*Lensdistance*1/cam.resolution.y,0.0f);
cam.view=glm::normalize(focalPos-cam.position);
#endif
glm::vec3* cudaimageSuperSamping = NULL;
cudaMalloc((void**)&cudaimageSuperSamping, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
int SampleCount=1;
while(SampleCount<=NumberOfSampling){
#endif
// save the first Ray Direction
#ifndef SUPERSAMPLING
calculateRaycastFromCameraKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(cam,(float)iterations,cudaFirstRays);
//cudaMemcpy(firstRays, cudaFirstRays, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(ray), cudaMemcpyDeviceToHost);
#else
calculateRaycastFromCameraKernelSuperSampling<<<fullBlocksPerGrid, threadsPerBlock>>>(cam,(float)iterations,SampleCount,cudaFirstRays);
#endif
//calculate first ray color
raytracefromCameraKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms,cudamatrials,cudaFirstRays,cudaRayList);
checkCUDAError("Kernel failed! 2");
//scan
int maxBoundary=(int)renderCam->resolution.x*(int)renderCam->resolution.y;
scan(fullBlocksPerGrid, threadsPerBlock,renderCam->resolution,cudaRayList,scanResultRayList,tempscanResultRayList,maxBoundary);
checkCUDAError("Kernel failed! 3");
int flag=-1;
cudaMemcpy(&flag,&(cudaRayList[maxBoundary-1].dirty),sizeof(int),cudaMemcpyDeviceToHost);
// rays for string compaction
stringcompaction<<<fullBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution,cudaRayList,newRayDataList,scanResultRayList,maxBoundary);
cudaMemcpy(cudaRayList,newRayDataList,(int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(rayData), cudaMemcpyDeviceToDevice);
//get number of rays in raylist
int numberOfRays=0;
cudaMemcpy(&numberOfRays,&(scanResultRayList[maxBoundary-1]),sizeof(int),cudaMemcpyDeviceToHost);
if(flag==1)
numberOfRays++;
checkCUDAError("Kernel failed! list-newlist");
//iteration kernel launches
int currDepth=1;
while((numberOfRays>0)&&(currDepth<=traceDepth)){
//std::cout<<"depth="<<currDepth<<std::endl;
int length=ceil(sqrt((float)(numberOfRays)));
dim3 newthreadsPerBlock(tileSize, tileSize);
dim3 newfullBlocksPerGrid((int)ceil(float(length)/float(tileSize)), (int)ceil(float(length)/float(tileSize)));
glm::vec2 dim=glm::vec2(length,length);
iterationRaytrace<<<newfullBlocksPerGrid,newthreadsPerBlock>>>(renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms,cudamatrials,cudaRayList,numberOfRays,length,currDepth);
checkCUDAError("Kernel failed! 4-0");
//get flag
cudaMemcpy(&flag,&(cudaRayList[numberOfRays-1].dirty),sizeof(int),cudaMemcpyDeviceToHost);
checkCUDAError("Kernel failed! 4-1");
scan(newfullBlocksPerGrid,newthreadsPerBlock,dim,cudaRayList,scanResultRayList,tempscanResultRayList,numberOfRays);
checkCUDAError("Kernel failed! 4-2");
//string compaction
stringcompaction<<<newfullBlocksPerGrid,newthreadsPerBlock>>>(dim,cudaRayList,newRayDataList,scanResultRayList,numberOfRays);
cudaMemcpy(cudaRayList,newRayDataList,(int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(rayData), cudaMemcpyDeviceToDevice);
//get number of rays
cudaMemcpy(&numberOfRays,&scanResultRayList[numberOfRays-1],sizeof(int),cudaMemcpyDeviceToHost);
if(flag==1)
numberOfRays++;
checkCUDAError("Kernel failed! 4-3");
//std::cout<<"number of rays"<<numberOfRays<<std::endl;
currDepth++;
}
#ifdef SUPERSAMPLING
if(SampleCount!=1)
mergeImage<<<fullBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution,cudaimageSuperSamping,cudaimage,(float)SampleCount);
checkCUDAError("Kernel failed!supersamping-1");
cudaMemcpy( cudaimageSuperSamping, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
SampleCount++;
}
#endif
//combine several iteration together
// previous image cudaMemory Pointer
glm::vec3* previousImage = NULL;
cudaMalloc((void**)&previousImage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
cudaMemcpy( previousImage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice);
checkCUDAError("Kernel failed! 6");
mergeImage<<<fullBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution,previousImage,cudaimage,(float)iterations),
checkCUDAError("Kernel failed! 7");
//retrieve image from GPU
cudaMemcpy(renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost);
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaimage);
//free up stuff, or else we'll leak memory like a madman
cudaFree( cudaimage );
cudaFree(cudaFirstRays);
cudaFree(cudaRayList);
cudaFree(scanResultRayList);
cudaFree( cudageoms );
cudaFree(cudamatrials);
cudaFree(previousImage);
cudaFree(tempscanResultRayList);
cudaFree(newRayDataList);
#ifdef SUPERSAMPLING
cudaFree(cudaimageSuperSamping);
#endif
delete geomList;
delete materialList;
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("Kernel failed!");
}
|
dde953e39548a3c408da1b4fa74e774733340eaa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <highgui.h>
#include <cv.h>
#include <stdio.h>
#include <stdlib.h>
#define TILE_SIZE 32
#define MASK_WIDTH 9
#define Mask_size 3
__constant__ char M[MASK_WIDTH];
using namespace std;
using namespace cv;
__device__ unsigned char conv(int v){
if(v>255)
return 255;
else if(v<0)
return 0;
return v;
}
__global__ void KernelConvolutionBasic(unsigned char *In, unsigned char *Out,int maskWidth, int width, int height){
__shared__ float N_ds[TILE_SIZE + Mask_size - 1][TILE_SIZE+ Mask_size - 1];
int n = Mask_size/2;
int dest = threadIdx.y*TILE_SIZE+threadIdx.x, destY = dest / (TILE_SIZE+Mask_size-1), destX = dest % (TILE_SIZE+Mask_size-1),
srcY = blockIdx.y * TILE_SIZE + destY - n, srcX = blockIdx.x * TILE_SIZE + destX - n,
src = (srcY * width + srcX);
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = In[src];
else
N_ds[destY][destX] = 0;
// Second batch loading
dest = threadIdx.y * TILE_SIZE + threadIdx.x + TILE_SIZE * TILE_SIZE;
destY = dest /(TILE_SIZE + Mask_size - 1), destX = dest % (TILE_SIZE + Mask_size - 1);
srcY = blockIdx.y * TILE_SIZE + destY - n;
srcX = blockIdx.x * TILE_SIZE + destX - n;
src = (srcY * width + srcX);
if (destY < TILE_SIZE + Mask_size - 1) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = In[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
int accum = 0;
int y, x;
for (y = 0; y < maskWidth; y++)
for (x = 0; x < maskWidth; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] * M[y * maskWidth + x];
y = blockIdx.y * TILE_SIZE + threadIdx.y;
x = blockIdx.x * TILE_SIZE + threadIdx.x;
if (y < height && x < width)
Out[(y * width + x)] = conv(accum);
__syncthreads();
}
int main(){
int scale = 1;
int delta = 0;
int ddepth = CV_8UC1;
clock_t start,end;
double elapsed_seconds;
Mat image;
//Leer imagen en escala de grises
image = imread("inputs/img1.jpg",0);
Size s = image.size();
int row=s.width;
int col=s.height;
char Mask[9] = {-1,0,1,-2,0,2,-1,0,1};
//imwrite("./outputs/1089746672.png",image);
//Separo memoria para las imagenes en el host
int sizeM= sizeof(char)*9;
int size = sizeof(unsigned char)*row*col;
unsigned char *img=(unsigned char*)malloc(size);
unsigned char *img_out=(unsigned char*)malloc(size);
img=image.data;
/////////////////////////SECUENCIAL///////////////////////////////////////////
Mat grad_x, grad_y;
start=clock();
Sobel( image, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
end= clock();
elapsed_seconds=end-start;
printf("Tiempo transcurrido Secuencial: %lf\n", (elapsed_seconds / CLOCKS_PER_SEC));
//////////////////////////////////////////////////////////////////////////////
/////////////////////////////////PARALELO/////////////////////////////////////
//Grid y blocksize
float blocksize=32;
dim3 dimBlock((int)blocksize,(int)blocksize,1);
dim3 dimGrid(ceil(row/blocksize),ceil(col/blocksize),1);
//Separo memoria en el device
unsigned char *d_img;
unsigned char *d_img_out;
hipMalloc((void**)&d_img,size);
hipMalloc((void**)&d_img_out,size);
start=clock();
hipMemcpyToSymbol(M,Mask,sizeM);
hipMemcpy(d_img,img,size, hipMemcpyHostToDevice);
// Llamado al kernel
hipLaunchKernelGGL(( KernelConvolutionBasic), dim3(dimGrid),dim3(dimBlock), 0, 0, d_img,d_img_out,3,row,col);
hipDeviceSynchronize();
hipMemcpy(img_out,d_img_out,size,hipMemcpyDeviceToHost);
end=clock();
elapsed_seconds=end-start;
printf("Tiempo transcurrido Parelo: %lf\n", (elapsed_seconds / CLOCKS_PER_SEC));
//Creo la imagen
Mat gray_image;
gray_image.create(col,row,CV_8UC1);
gray_image.data = img_out;
imwrite("./outputs/1089746672.png",gray_image);
/////////////////////////////////////////////////////////////////////////////
hipFree(d_img);
hipFree(d_img_out);
return 0;
}
| dde953e39548a3c408da1b4fa74e774733340eaa.cu | #include <iostream>
#include <highgui.h>
#include <cv.h>
#include <stdio.h>
#include <stdlib.h>
#define TILE_SIZE 32
#define MASK_WIDTH 9
#define Mask_size 3
__constant__ char M[MASK_WIDTH];
using namespace std;
using namespace cv;
__device__ unsigned char conv(int v){
if(v>255)
return 255;
else if(v<0)
return 0;
return v;
}
__global__ void KernelConvolutionBasic(unsigned char *In, unsigned char *Out,int maskWidth, int width, int height){
__shared__ float N_ds[TILE_SIZE + Mask_size - 1][TILE_SIZE+ Mask_size - 1];
int n = Mask_size/2;
int dest = threadIdx.y*TILE_SIZE+threadIdx.x, destY = dest / (TILE_SIZE+Mask_size-1), destX = dest % (TILE_SIZE+Mask_size-1),
srcY = blockIdx.y * TILE_SIZE + destY - n, srcX = blockIdx.x * TILE_SIZE + destX - n,
src = (srcY * width + srcX);
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = In[src];
else
N_ds[destY][destX] = 0;
// Second batch loading
dest = threadIdx.y * TILE_SIZE + threadIdx.x + TILE_SIZE * TILE_SIZE;
destY = dest /(TILE_SIZE + Mask_size - 1), destX = dest % (TILE_SIZE + Mask_size - 1);
srcY = blockIdx.y * TILE_SIZE + destY - n;
srcX = blockIdx.x * TILE_SIZE + destX - n;
src = (srcY * width + srcX);
if (destY < TILE_SIZE + Mask_size - 1) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = In[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
int accum = 0;
int y, x;
for (y = 0; y < maskWidth; y++)
for (x = 0; x < maskWidth; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] * M[y * maskWidth + x];
y = blockIdx.y * TILE_SIZE + threadIdx.y;
x = blockIdx.x * TILE_SIZE + threadIdx.x;
if (y < height && x < width)
Out[(y * width + x)] = conv(accum);
__syncthreads();
}
int main(){
int scale = 1;
int delta = 0;
int ddepth = CV_8UC1;
clock_t start,end;
double elapsed_seconds;
Mat image;
//Leer imagen en escala de grises
image = imread("inputs/img1.jpg",0);
Size s = image.size();
int row=s.width;
int col=s.height;
char Mask[9] = {-1,0,1,-2,0,2,-1,0,1};
//imwrite("./outputs/1089746672.png",image);
//Separo memoria para las imagenes en el host
int sizeM= sizeof(char)*9;
int size = sizeof(unsigned char)*row*col;
unsigned char *img=(unsigned char*)malloc(size);
unsigned char *img_out=(unsigned char*)malloc(size);
img=image.data;
/////////////////////////SECUENCIAL///////////////////////////////////////////
Mat grad_x, grad_y;
start=clock();
Sobel( image, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
end= clock();
elapsed_seconds=end-start;
printf("Tiempo transcurrido Secuencial: %lf\n", (elapsed_seconds / CLOCKS_PER_SEC));
//////////////////////////////////////////////////////////////////////////////
/////////////////////////////////PARALELO/////////////////////////////////////
//Grid y blocksize
float blocksize=32;
dim3 dimBlock((int)blocksize,(int)blocksize,1);
dim3 dimGrid(ceil(row/blocksize),ceil(col/blocksize),1);
//Separo memoria en el device
unsigned char *d_img;
unsigned char *d_img_out;
cudaMalloc((void**)&d_img,size);
cudaMalloc((void**)&d_img_out,size);
start=clock();
cudaMemcpyToSymbol(M,Mask,sizeM);
cudaMemcpy(d_img,img,size, cudaMemcpyHostToDevice);
// Llamado al kernel
KernelConvolutionBasic<<<dimGrid,dimBlock>>>(d_img,d_img_out,3,row,col);
cudaDeviceSynchronize();
cudaMemcpy(img_out,d_img_out,size,cudaMemcpyDeviceToHost);
end=clock();
elapsed_seconds=end-start;
printf("Tiempo transcurrido Parelo: %lf\n", (elapsed_seconds / CLOCKS_PER_SEC));
//Creo la imagen
Mat gray_image;
gray_image.create(col,row,CV_8UC1);
gray_image.data = img_out;
imwrite("./outputs/1089746672.png",gray_image);
/////////////////////////////////////////////////////////////////////////////
cudaFree(d_img);
cudaFree(d_img_out);
return 0;
}
|
3cc5c8ae68f4f7a1d698b7f22a6dc5f6df3b0967.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2020 insaneyilin All Rights Reserved.
*
*
*/
#include "../common/common.h"
int main(int argc, char **argv) {
hipDeviceProp_t prop;
int dev_id = -1;
CHECK_CUDA_ERROR(hipGetDevice(&dev_id));
printf("ID of current CUDA device: %d\n", dev_id);
memset(&prop, 0, sizeof(hipDeviceProp_t));
prop.major = 1;
prop.minor = 3;
CHECK_CUDA_ERROR(hipChooseDevice(&dev_id, &prop));
printf("ID of CUDA device closest to revision 1.3: %d\n", dev_id);
CHECK_CUDA_ERROR(hipSetDevice(dev_id));
return 0;
}
| 3cc5c8ae68f4f7a1d698b7f22a6dc5f6df3b0967.cu | /*
* Copyright 2020 insaneyilin All Rights Reserved.
*
*
*/
#include "../common/common.h"
int main(int argc, char **argv) {
cudaDeviceProp prop;
int dev_id = -1;
CHECK_CUDA_ERROR(cudaGetDevice(&dev_id));
printf("ID of current CUDA device: %d\n", dev_id);
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 1;
prop.minor = 3;
CHECK_CUDA_ERROR(cudaChooseDevice(&dev_id, &prop));
printf("ID of CUDA device closest to revision 1.3: %d\n", dev_id);
CHECK_CUDA_ERROR(cudaSetDevice(dev_id));
return 0;
}
|
cd37a53ac63a5537aee6335d73b9f4bb50968439.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "mdCuda.h"
__device__ void cudaReposition(float PP0, float PP1, float PP2, float PA0, float PA1, float PA2, float PL0, float PL1, float PL2, float &Xi, float &Yi, float &Zi, float NA)
{
float PAPL, H, B;
if(PP0 > 0){
PAPL = PA0 + PL0;
H = (Xi-PAPL) / PL0;
B = H - 2.0*int(H);
Xi = B*PL0 + PAPL;
}
if(PP1 > 0){
PAPL = PA1 + PL1;
H = (Yi-PAPL) / PL1;
B = H - 2.0*int(H);
Yi = B*PL1 + PAPL;
}
if(PP2 > 0){
PAPL = PA2 + PL2;
H = (Zi-PAPL) / PL2;
B = H - 2.0*int(H);
Zi = B*PL2 + PAPL;
}
}
__global__ void mdKernel(int NA, float* FFX, float* FFY, float* FFZ, float* EE, float* X, float* Y, float* Z, int IPBC,
float PP0, float PP1, float PP2, float AL1, float AL2, float A1, float A2, float RL1, float RL2, float D21, float D22,
float PA0, float PA1, float PA2, float PB0, float PB1, float PB2, float PL0, float PL1, float PL2)
{
float XIJ, YIJ, ZIJ, RIJ, RIJ2, EPP, FX2, FY2, FZ2;
float ARG1, ARG2, EXP1, EXP2, UIJ1, UIJ2, UIJ;
float FAC1, FAC2, FAC12, XRIJ, YRIJ, ZRIJ;
int i = blockIdx.x * blockDim.x + threadIdx.x;
float Xi = X[i];
float Yi = Y[i];
float Zi = Z[i];
cudaReposition(PP0, PP1, PP2, PA0, PA1, PA2, PL0, PL1, PL2, Xi, Yi, Zi, NA);
///////////////////////////////////////////////////
// FORCE
EPP = 0;
// Forces that effect atoms indexed with i in all three axes
FX2 = 0;
FY2 = 0;
FZ2 = 0;
for(int j=0; j<NA; j++)
{
if(i == j)
continue;
// Apply periodic boundaries and find distances between atom I and j. RIJ2 is square of RIJ
XIJ = X[i] - X[j];
YIJ = Y[i] - Y[j];
ZIJ = Z[i] - Z[j];
double DD, ID;
if(IPBC != 0){
if(PP0 > 0){
DD = XIJ / PP0;
ID = int(DD);
XIJ = XIJ - PP0*(ID+int(2.0*(DD-ID)));
}
if(PP1 > 0){
DD = YIJ / PP1;
ID = int(DD);
YIJ = YIJ - PP1*(ID+int(2.0*(DD-ID)));
}
if(PP2 > 0){
DD = ZIJ / PP2;
ID = int(DD);
ZIJ = ZIJ - PP2*(ID+int(2.0*(DD-ID)));
}
}
RIJ2 = XIJ*XIJ + YIJ*YIJ + ZIJ*ZIJ;
RIJ = sqrt(RIJ2);
// Calculate potential energy U(r)
ARG1 = AL1*RIJ2;
ARG2 = AL2*RIJ2;
EXP1 = exp(-ARG1);
EXP2 = exp(-ARG2);
UIJ1 = A1*EXP1/(pow(RIJ,RL1));
UIJ2 = A2*EXP2/(pow(RIJ,RL2));
UIJ = D21*UIJ1 + D22*UIJ2;
EPP = EPP+UIJ;
// Calculate forces
FAC1 = -(RL1/RIJ + 2.0*AL1*RIJ);
FAC2 = -(RL2/RIJ + 2.0*AL2*RIJ);
FAC12 = FAC1*D21*UIJ1 + FAC2*D22*UIJ2;
XRIJ = XIJ/RIJ;
YRIJ = YIJ/RIJ;
ZRIJ = ZIJ/RIJ;
FX2 += FAC12*XRIJ;
FY2 += FAC12*YRIJ;
FZ2 += FAC12*ZRIJ;
}
FFX[i] = -FX2;
FFY[i] = -FY2;
FFZ[i] = -FZ2;
EE[i] = EPP;
} | cd37a53ac63a5537aee6335d73b9f4bb50968439.cu | //#include "mdCuda.h"
__device__ void cudaReposition(float PP0, float PP1, float PP2, float PA0, float PA1, float PA2, float PL0, float PL1, float PL2, float &Xi, float &Yi, float &Zi, float NA)
{
float PAPL, H, B;
if(PP0 > 0){
PAPL = PA0 + PL0;
H = (Xi-PAPL) / PL0;
B = H - 2.0*int(H);
Xi = B*PL0 + PAPL;
}
if(PP1 > 0){
PAPL = PA1 + PL1;
H = (Yi-PAPL) / PL1;
B = H - 2.0*int(H);
Yi = B*PL1 + PAPL;
}
if(PP2 > 0){
PAPL = PA2 + PL2;
H = (Zi-PAPL) / PL2;
B = H - 2.0*int(H);
Zi = B*PL2 + PAPL;
}
}
__global__ void mdKernel(int NA, float* FFX, float* FFY, float* FFZ, float* EE, float* X, float* Y, float* Z, int IPBC,
float PP0, float PP1, float PP2, float AL1, float AL2, float A1, float A2, float RL1, float RL2, float D21, float D22,
float PA0, float PA1, float PA2, float PB0, float PB1, float PB2, float PL0, float PL1, float PL2)
{
float XIJ, YIJ, ZIJ, RIJ, RIJ2, EPP, FX2, FY2, FZ2;
float ARG1, ARG2, EXP1, EXP2, UIJ1, UIJ2, UIJ;
float FAC1, FAC2, FAC12, XRIJ, YRIJ, ZRIJ;
int i = blockIdx.x * blockDim.x + threadIdx.x;
float Xi = X[i];
float Yi = Y[i];
float Zi = Z[i];
cudaReposition(PP0, PP1, PP2, PA0, PA1, PA2, PL0, PL1, PL2, Xi, Yi, Zi, NA);
///////////////////////////////////////////////////
// FORCE
EPP = 0;
// Forces that effect atoms indexed with i in all three axes
FX2 = 0;
FY2 = 0;
FZ2 = 0;
for(int j=0; j<NA; j++)
{
if(i == j)
continue;
// Apply periodic boundaries and find distances between atom I and j. RIJ2 is square of RIJ
XIJ = X[i] - X[j];
YIJ = Y[i] - Y[j];
ZIJ = Z[i] - Z[j];
double DD, ID;
if(IPBC != 0){
if(PP0 > 0){
DD = XIJ / PP0;
ID = int(DD);
XIJ = XIJ - PP0*(ID+int(2.0*(DD-ID)));
}
if(PP1 > 0){
DD = YIJ / PP1;
ID = int(DD);
YIJ = YIJ - PP1*(ID+int(2.0*(DD-ID)));
}
if(PP2 > 0){
DD = ZIJ / PP2;
ID = int(DD);
ZIJ = ZIJ - PP2*(ID+int(2.0*(DD-ID)));
}
}
RIJ2 = XIJ*XIJ + YIJ*YIJ + ZIJ*ZIJ;
RIJ = sqrt(RIJ2);
// Calculate potential energy U(r)
ARG1 = AL1*RIJ2;
ARG2 = AL2*RIJ2;
EXP1 = exp(-ARG1);
EXP2 = exp(-ARG2);
UIJ1 = A1*EXP1/(pow(RIJ,RL1));
UIJ2 = A2*EXP2/(pow(RIJ,RL2));
UIJ = D21*UIJ1 + D22*UIJ2;
EPP = EPP+UIJ;
// Calculate forces
FAC1 = -(RL1/RIJ + 2.0*AL1*RIJ);
FAC2 = -(RL2/RIJ + 2.0*AL2*RIJ);
FAC12 = FAC1*D21*UIJ1 + FAC2*D22*UIJ2;
XRIJ = XIJ/RIJ;
YRIJ = YIJ/RIJ;
ZRIJ = ZIJ/RIJ;
FX2 += FAC12*XRIJ;
FY2 += FAC12*YRIJ;
FZ2 += FAC12*ZRIJ;
}
FFX[i] = -FX2;
FFY[i] = -FY2;
FFZ[i] = -FZ2;
EE[i] = EPP;
} |
41c9d0d8f7e8a0d35ddf2636e0d947241aa4c632.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS 256
__global__ void hamming_matcher_unroll(
unsigned* out_idx,
unsigned* out_dist,
const unsigned max_dist)
{
unsigned f = blockDim.x * blockIdx.x + threadIdx.x;
unsigned tid = threadIdx.x;
unsigned feat_len = 6, nquery = 6;
__shared__ unsigned s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
s_dist[tid] = max_dist;
s_idx[tid] = 0xffffffff;
unsigned ntrain = 64;
bool valid_feat = (f < ntrain);
for (unsigned j = 0; j < nquery; j++) {
s_dist[tid] = max_dist;
// Load one query feature that will be tested against all training
// features in current block
if (tid < feat_len && f < ntrain) {
out_idx[tid] = tid * nquery + j;
}
__syncthreads();
// Find best match in training features from block to the current
// query feature
if (tid < 32) {
if (s_dist[tid + 128] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 128];
s_idx[tid] = s_idx[tid + 128];
}
}
__syncthreads();
if (tid < 16) {
if (s_dist[tid + 64] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 64];
s_idx[tid] = s_idx[tid + 64];
}
}
__syncthreads();
if (tid < 8) {
if (s_dist[tid + 32] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 32];
s_idx[tid] = s_idx[tid + 32];
}
}
__syncthreads();
// Store best match in training features from block to the current
// query feature
if (f < ntrain) {
out_idx[j * gridDim.x + blockIdx.x] = s_dist[0];
out_dist[j * gridDim.x + blockIdx.x] = s_idx[0];
}
}
}
| 41c9d0d8f7e8a0d35ddf2636e0d947241aa4c632.cu | #define THREADS 256
__global__ void hamming_matcher_unroll(
unsigned* out_idx,
unsigned* out_dist,
const unsigned max_dist)
{
unsigned f = blockDim.x * blockIdx.x + threadIdx.x;
unsigned tid = threadIdx.x;
unsigned feat_len = 6, nquery = 6;
__shared__ unsigned s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
s_dist[tid] = max_dist;
s_idx[tid] = 0xffffffff;
unsigned ntrain = 64;
bool valid_feat = (f < ntrain);
for (unsigned j = 0; j < nquery; j++) {
s_dist[tid] = max_dist;
// Load one query feature that will be tested against all training
// features in current block
if (tid < feat_len && f < ntrain) {
out_idx[tid] = tid * nquery + j;
}
__syncthreads();
// Find best match in training features from block to the current
// query feature
if (tid < 32) {
if (s_dist[tid + 128] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 128];
s_idx[tid] = s_idx[tid + 128];
}
}
__syncthreads();
if (tid < 16) {
if (s_dist[tid + 64] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 64];
s_idx[tid] = s_idx[tid + 64];
}
}
__syncthreads();
if (tid < 8) {
if (s_dist[tid + 32] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 32];
s_idx[tid] = s_idx[tid + 32];
}
}
__syncthreads();
// Store best match in training features from block to the current
// query feature
if (f < ntrain) {
out_idx[j * gridDim.x + blockIdx.x] = s_dist[0];
out_dist[j * gridDim.x + blockIdx.x] = s_idx[0];
}
}
}
|
df1ac28c295e0391680ad69fcc7d4b27a48b97bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/ceil_div.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/hip/UpSample.cuh>
namespace at {
namespace native {
namespace {
#define MAX_THREADS 512
// Define a typedef to dispatch to nearest_neighbor_compute_source_index or
// nearest_neighbor_exact_compute_source_index
typedef int (*nn_compute_source_index_fn_t)(const float, int, int);
// Define a typedef to dispatch to nearest_neighbor_bw_compute_source_index or
// nearest_neighbor_exact_bw_compute_source_index
typedef int (*nn_bw_compute_source_index_fn_t)(const float, int, int);
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
template <typename scalar_t, nn_compute_source_index_fn_t nn_compute_source_index_fn>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest1d_out_frame(
const scalar_t* input,
size_t dim_b,
size_t dim_c,
size_t src_dim_w,
size_t dst_dim_w,
scalar_t* output,
float scale_factor) {
int dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_w)
return;
int c = (dst_idx / dst_dim_w) % dim_c;
int dst_x = dst_idx % dst_dim_w;
int src_x = nn_compute_source_index_fn(scale_factor, dst_x, src_dim_w);
int src_idx = c * src_dim_w + src_x;
int src_stride = dim_c * src_dim_w;
int dst_stride = dim_c * dst_dim_w;
for (int b = 0; b < dim_b; b++) {
output[dst_idx] = input[src_idx];
src_idx += src_stride;
dst_idx += dst_stride;
}
}
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
// Backward operation
template <typename scalar_t, typename accscalar_t, nn_bw_compute_source_index_fn_t nn_bw_compute_source_index_fn>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest1d_backward_out_frame(
const scalar_t* grad_o,
size_t dim_b,
size_t dim_c,
size_t src_dim_w,
size_t dst_dim_w,
scalar_t* grad_i,
float scale_factor) {
int dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_w)
return;
int c = (dst_idx / (dst_dim_w)) % dim_c;
int dst_x = dst_idx % dst_dim_w;
// note that we do not want to clamp src_x to src_dim_w, since we might
// intentionally want to skip in case of scale_factor < 1.0
int src_x = nn_bw_compute_source_index_fn(scale_factor, dst_x, src_dim_w);
int src_x_up = nn_bw_compute_source_index_fn(scale_factor, dst_x+1, src_dim_w);
for (int b = 0; b < dim_b; b++) {
accscalar_t grad = 0;
int src_idx = b * dim_c * src_dim_w + c * src_dim_w + src_x;
for (int x = src_x; x < src_x_up; x++) {
grad += grad_o[src_idx++];
}
grad_i[dst_idx] = grad;
dst_idx += dim_c * dst_dim_w;
}
}
template<nn_compute_source_index_fn_t nn_compute_source_index_fn>
static void upsample_nearest1d_out_cuda_template(
const Tensor& output,
const Tensor& input_,
IntArrayRef output_size,
c10::optional<double> scales) {
TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_nearest1d_out_cuda", {input_arg, output_arg});
int output_width = output_size[0];
int nbatch = input_.size(0);
int channels = input_.size(1);
int input_width = input_.size(2);
Tensor input = input_.contiguous();
if (input.numel() == 0) {
return;
}
// upsample_nearest1d meta call makes sure `nbatch != 0`
unsigned int n = output.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{ceil_div(n, bdim.x)};
// safe check for int32 indexing; implicitly restrict launch config for kernel
TORCH_CHECK(output.numel() <= std::numeric_limits<int32_t>::max());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, input.scalar_type(), "upsample_nearest1d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.data_ptr<scalar_t>();
auto odata = output.data_ptr<scalar_t>();
const float scale_factor = compute_scales_value<float>(scales, input_width, output_width);
hipLaunchKernelGGL(( upsample_nearest1d_out_frame<scalar_t, nn_compute_source_index_fn>), dim3(gdim), dim3(bdim), 0, stream,
idata, nbatch, channels, input_width, output_width, odata, scale_factor);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
template<nn_compute_source_index_fn_t nn_bw_compute_source_index_fn>
static void upsample_nearest1d_backward_out_cuda_template(
const Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_nearest1d_backward_out_cuda_template",
{grad_output_arg, grad_input_arg});
int output_width = output_size[0];
int nbatch = input_size[0];
int channels = input_size[1];
int input_width = input_size[2];
Tensor grad_output = grad_output_.contiguous();
if (grad_input.numel() == 0) {
return;
}
// upsample_nearest1d meta call makes sure `nbatch != 0`
unsigned int n = grad_input.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{ceil_div(n, bdim.x)};
// safe check for int32 indexing; implicitly restrict launch config for kernel
TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest1d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
const float scale_factor = compute_scales_value_backwards<float>(scales, output_width, input_width);
hipLaunchKernelGGL(( upsample_nearest1d_backward_out_frame<scalar_t, accscalar_t, nn_bw_compute_source_index_fn>)
, dim3(gdim), dim3(bdim), 0, stream,
odata, nbatch, channels, output_width, input_width, idata, scale_factor);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} // namespace
TORCH_IMPL_FUNC(upsample_nearest1d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
c10::optional<double> scales,
const Tensor& output
) {
upsample_nearest1d_out_cuda_template<nearest_neighbor_compute_source_index>(
output, input, output_size, scales);
}
TORCH_IMPL_FUNC(_upsample_nearest_exact1d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
c10::optional<double> scales,
const Tensor& output
) {
upsample_nearest1d_out_cuda_template<nearest_neighbor_exact_compute_source_index>(output, input, output_size, scales);
}
TORCH_IMPL_FUNC(upsample_nearest1d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales,
const Tensor& grad_input
) {
upsample_nearest1d_backward_out_cuda_template<nearest_neighbor_bw_compute_source_index>(
grad_input, grad_output, output_size, input_size, scales);
}
TORCH_IMPL_FUNC(_upsample_nearest_exact1d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales,
const Tensor& grad_input
) {
upsample_nearest1d_backward_out_cuda_template<nearest_neighbor_exact_bw_compute_source_index>(
grad_input, grad_output, output_size, input_size, scales);
}
} // namespace native
} // namespace at
| df1ac28c295e0391680ad69fcc7d4b27a48b97bc.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/ceil_div.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/cuda/UpSample.cuh>
namespace at {
namespace native {
namespace {
#define MAX_THREADS 512
// Define a typedef to dispatch to nearest_neighbor_compute_source_index or
// nearest_neighbor_exact_compute_source_index
typedef int (*nn_compute_source_index_fn_t)(const float, int, int);
// Define a typedef to dispatch to nearest_neighbor_bw_compute_source_index or
// nearest_neighbor_exact_bw_compute_source_index
typedef int (*nn_bw_compute_source_index_fn_t)(const float, int, int);
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
template <typename scalar_t, nn_compute_source_index_fn_t nn_compute_source_index_fn>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest1d_out_frame(
const scalar_t* input,
size_t dim_b,
size_t dim_c,
size_t src_dim_w,
size_t dst_dim_w,
scalar_t* output,
float scale_factor) {
int dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_w)
return;
int c = (dst_idx / dst_dim_w) % dim_c;
int dst_x = dst_idx % dst_dim_w;
int src_x = nn_compute_source_index_fn(scale_factor, dst_x, src_dim_w);
int src_idx = c * src_dim_w + src_x;
int src_stride = dim_c * src_dim_w;
int dst_stride = dim_c * dst_dim_w;
for (int b = 0; b < dim_b; b++) {
output[dst_idx] = input[src_idx];
src_idx += src_stride;
dst_idx += dst_stride;
}
}
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
// Backward operation
template <typename scalar_t, typename accscalar_t, nn_bw_compute_source_index_fn_t nn_bw_compute_source_index_fn>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest1d_backward_out_frame(
const scalar_t* grad_o,
size_t dim_b,
size_t dim_c,
size_t src_dim_w,
size_t dst_dim_w,
scalar_t* grad_i,
float scale_factor) {
int dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_w)
return;
int c = (dst_idx / (dst_dim_w)) % dim_c;
int dst_x = dst_idx % dst_dim_w;
// note that we do not want to clamp src_x to src_dim_w, since we might
// intentionally want to skip in case of scale_factor < 1.0
int src_x = nn_bw_compute_source_index_fn(scale_factor, dst_x, src_dim_w);
int src_x_up = nn_bw_compute_source_index_fn(scale_factor, dst_x+1, src_dim_w);
for (int b = 0; b < dim_b; b++) {
accscalar_t grad = 0;
int src_idx = b * dim_c * src_dim_w + c * src_dim_w + src_x;
for (int x = src_x; x < src_x_up; x++) {
grad += grad_o[src_idx++];
}
grad_i[dst_idx] = grad;
dst_idx += dim_c * dst_dim_w;
}
}
template<nn_compute_source_index_fn_t nn_compute_source_index_fn>
static void upsample_nearest1d_out_cuda_template(
const Tensor& output,
const Tensor& input_,
IntArrayRef output_size,
c10::optional<double> scales) {
TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_nearest1d_out_cuda", {input_arg, output_arg});
int output_width = output_size[0];
int nbatch = input_.size(0);
int channels = input_.size(1);
int input_width = input_.size(2);
Tensor input = input_.contiguous();
if (input.numel() == 0) {
return;
}
// upsample_nearest1d meta call makes sure `nbatch != 0`
unsigned int n = output.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{ceil_div(n, bdim.x)};
// safe check for int32 indexing; implicitly restrict launch config for kernel
TORCH_CHECK(output.numel() <= std::numeric_limits<int32_t>::max());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, input.scalar_type(), "upsample_nearest1d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.data_ptr<scalar_t>();
auto odata = output.data_ptr<scalar_t>();
const float scale_factor = compute_scales_value<float>(scales, input_width, output_width);
upsample_nearest1d_out_frame<scalar_t, nn_compute_source_index_fn><<<gdim, bdim, 0, stream>>>(
idata, nbatch, channels, input_width, output_width, odata, scale_factor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
template<nn_compute_source_index_fn_t nn_bw_compute_source_index_fn>
static void upsample_nearest1d_backward_out_cuda_template(
const Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_nearest1d_backward_out_cuda_template",
{grad_output_arg, grad_input_arg});
int output_width = output_size[0];
int nbatch = input_size[0];
int channels = input_size[1];
int input_width = input_size[2];
Tensor grad_output = grad_output_.contiguous();
if (grad_input.numel() == 0) {
return;
}
// upsample_nearest1d meta call makes sure `nbatch != 0`
unsigned int n = grad_input.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{ceil_div(n, bdim.x)};
// safe check for int32 indexing; implicitly restrict launch config for kernel
TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest1d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
const float scale_factor = compute_scales_value_backwards<float>(scales, output_width, input_width);
upsample_nearest1d_backward_out_frame<scalar_t, accscalar_t, nn_bw_compute_source_index_fn>
<<<gdim, bdim, 0, stream>>>(
odata, nbatch, channels, output_width, input_width, idata, scale_factor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} // namespace
TORCH_IMPL_FUNC(upsample_nearest1d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
c10::optional<double> scales,
const Tensor& output
) {
upsample_nearest1d_out_cuda_template<nearest_neighbor_compute_source_index>(
output, input, output_size, scales);
}
TORCH_IMPL_FUNC(_upsample_nearest_exact1d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
c10::optional<double> scales,
const Tensor& output
) {
upsample_nearest1d_out_cuda_template<nearest_neighbor_exact_compute_source_index>(output, input, output_size, scales);
}
TORCH_IMPL_FUNC(upsample_nearest1d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales,
const Tensor& grad_input
) {
upsample_nearest1d_backward_out_cuda_template<nearest_neighbor_bw_compute_source_index>(
grad_input, grad_output, output_size, input_size, scales);
}
TORCH_IMPL_FUNC(_upsample_nearest_exact1d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales,
const Tensor& grad_input
) {
upsample_nearest1d_backward_out_cuda_template<nearest_neighbor_exact_bw_compute_source_index>(
grad_input, grad_output, output_size, input_size, scales);
}
} // namespace native
} // namespace at
|
d77054b1714dabfc87809ddc0b44edb47084ffc7.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for TensorReduce family of device-wide operators
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/reduction/device/tensor_reduce.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This reduces the C dimension, transforming an NHWC tensor into NHWC with C=1.
template <typename TensorReduction, typename ElementCompute = typename TensorReduction::ElementCompute>
bool TestAllReduction_NHWC_reduce_c(ElementCompute reduction_identity = ElementCompute()) {
using Layout = typename TensorReduction::Layout;
using ElementOutput = typename TensorReduction::ElementOutput;
using ElementSource = typename TensorReduction::ElementSource;
int const kV = TensorReduction::kVectorLength;
int const N_indices[] = {3, 13};
int const H_indices[] = {5, 17};
int const W_indices[] = {7, 19};
int const C_indices[] = {2049, 2048, 2047, 384, 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 1};
for (int N : N_indices) {
for (int H : H_indices) {
for (int W : W_indices) {
for (int Cx : C_indices) {
int C = Cx * kV;
cutlass::HostTensor<ElementSource, Layout> src_tensor({N, H, W, C});
cutlass::HostTensor<ElementOutput, Layout> dst_tensor({N, H, W, 1});
cutlass::reference::host::TensorFillRandomUniform(
src_tensor.host_view(), 17, 10, -10, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
// Execute a tensor reduction over rank 3 (the 'C' dimension is reduced; NHWC => NHW)
TensorReduction reduction(src_tensor.extent(), 3);
cutlass::DeviceAllocation<uint8_t> device_workspace(reduction.workspace_size());
cutlass::Status status = reduction.reduce(
dst_tensor.device_ref(),
src_tensor.device_ref(),
device_workspace.get(),
reduction_identity
);
EXPECT_EQ(status, cutlass::Status::kSuccess);
EXPECT_EQ(hipDeviceSynchronize(), hipSuccess);
dst_tensor.sync_host();
typename TensorReduction::ReductionOp reduction_op;
//
// Reference check
//
for (int n = 0; n < src_tensor.extent().n(); ++n) {
for (int h = 0; h < src_tensor.extent().h(); ++h) {
for (int w = 0; w < src_tensor.extent().w(); ++w) {
ElementCompute c_accum = reduction_identity;
for (int c = 0; c < src_tensor.extent().c(); ++c) {
c_accum = reduction_op(c_accum, ElementCompute(src_tensor.at({n, h, w, c})));
}
ElementCompute got = ElementCompute(dst_tensor.at({n, h, w, 0}));
bool equal = (c_accum == got);
EXPECT_TRUE(equal);
if (!equal) {
std::cerr
<< "Error at location (" << n << ", " << h << ", " << w << ", 0)" << std::endl;
std::cerr
<< " expected: " << c_accum << std::endl
<< " got: " << got << std::endl;
std::cerr
<< "Problem: " << src_tensor.extent() << " -> "
<< dst_tensor.extent() << std::endl;
std::cerr
<< " Grid: " << reduction.reduction_strided.grid_shape
<< "\n Block: " << reduction.reduction_strided.threadblock_shape << std::endl
<< " FInal: " << reduction.reduction_strided.grid_final
<< "\n Block: " << reduction.reduction_strided.threadblock_final << "\n";
return false;
}
} //w
} // h
} // n
//
// Next problem
//
} // C
} // W
} // H
} // N
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1_f16x1) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 2;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2_f16x2) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 2;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4_f16x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_maximum_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::maximum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( -std::numeric_limits<float>::max() ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_minimum_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::minimum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( std::numeric_limits<float>::max() ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ANY_c_s32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(0) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ALL_c_s32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(1) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ANY_c_f32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(0) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ALL_c_f32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(1) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| d77054b1714dabfc87809ddc0b44edb47084ffc7.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for TensorReduce family of device-wide operators
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/reduction/device/tensor_reduce.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This reduces the C dimension, transforming an NHWC tensor into NHWC with C=1.
template <typename TensorReduction, typename ElementCompute = typename TensorReduction::ElementCompute>
bool TestAllReduction_NHWC_reduce_c(ElementCompute reduction_identity = ElementCompute()) {
using Layout = typename TensorReduction::Layout;
using ElementOutput = typename TensorReduction::ElementOutput;
using ElementSource = typename TensorReduction::ElementSource;
int const kV = TensorReduction::kVectorLength;
int const N_indices[] = {3, 13};
int const H_indices[] = {5, 17};
int const W_indices[] = {7, 19};
int const C_indices[] = {2049, 2048, 2047, 384, 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 1};
for (int N : N_indices) {
for (int H : H_indices) {
for (int W : W_indices) {
for (int Cx : C_indices) {
int C = Cx * kV;
cutlass::HostTensor<ElementSource, Layout> src_tensor({N, H, W, C});
cutlass::HostTensor<ElementOutput, Layout> dst_tensor({N, H, W, 1});
cutlass::reference::host::TensorFillRandomUniform(
src_tensor.host_view(), 17, 10, -10, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
// Execute a tensor reduction over rank 3 (the 'C' dimension is reduced; NHWC => NHW)
TensorReduction reduction(src_tensor.extent(), 3);
cutlass::DeviceAllocation<uint8_t> device_workspace(reduction.workspace_size());
cutlass::Status status = reduction.reduce(
dst_tensor.device_ref(),
src_tensor.device_ref(),
device_workspace.get(),
reduction_identity
);
EXPECT_EQ(status, cutlass::Status::kSuccess);
EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess);
dst_tensor.sync_host();
typename TensorReduction::ReductionOp reduction_op;
//
// Reference check
//
for (int n = 0; n < src_tensor.extent().n(); ++n) {
for (int h = 0; h < src_tensor.extent().h(); ++h) {
for (int w = 0; w < src_tensor.extent().w(); ++w) {
ElementCompute c_accum = reduction_identity;
for (int c = 0; c < src_tensor.extent().c(); ++c) {
c_accum = reduction_op(c_accum, ElementCompute(src_tensor.at({n, h, w, c})));
}
ElementCompute got = ElementCompute(dst_tensor.at({n, h, w, 0}));
bool equal = (c_accum == got);
EXPECT_TRUE(equal);
if (!equal) {
std::cerr
<< "Error at location (" << n << ", " << h << ", " << w << ", 0)" << std::endl;
std::cerr
<< " expected: " << c_accum << std::endl
<< " got: " << got << std::endl;
std::cerr
<< "Problem: " << src_tensor.extent() << " -> "
<< dst_tensor.extent() << std::endl;
std::cerr
<< " Grid: " << reduction.reduction_strided.grid_shape
<< "\n Block: " << reduction.reduction_strided.threadblock_shape << std::endl
<< " FInal: " << reduction.reduction_strided.grid_final
<< "\n Block: " << reduction.reduction_strided.threadblock_final << "\n";
return false;
}
} //w
} // h
} // n
//
// Next problem
//
} // C
} // W
} // H
} // N
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1_f16x1) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 2;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2_f16x2) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 2;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4_f16x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_maximum_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::maximum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( -std::numeric_limits<float>::max() ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_minimum_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::minimum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( std::numeric_limits<float>::max() ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ANY_c_s32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(0) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ALL_c_s32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(1) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ANY_c_f32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(0) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ALL_c_f32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(1) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
41ef70a37ebf5d502d7a1a7349868f556af1e1f9.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __CUDACC_RTC__
#define __CUDACC_RTC__
#endif
#include <hip/device_functions.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "preprocess_hip.cuh"
#include "error_util.h"
using namespace std;
__global__ void resize(float* srcData, float* dstData, float* xpixelPerBlock, float* ypixelPerBlock, float interpolation_xgap, float interpolation_ygap, int original_image_w, int original_image_h, int resized_w, int resized_h)
{
int idxCol = (int)ypixelPerBlock[blockIdx.y] + 1 + threadIdx.y;
int idxRow = (int)xpixelPerBlock[blockIdx.x] + 1 + threadIdx.x;
if ((int)xpixelPerBlock[blockIdx.x] == (int)xpixelPerBlock[blockIdx.x + 1] || (int)ypixelPerBlock[blockIdx.y] == (int)ypixelPerBlock[blockIdx.y + 1])
{}
else {
//idxCol = min(idxCol, (int)ypixelPerBlock[blockIdx.y + 1]);
//idxRow = min(idxRow, (int)xpixelPerBlock[blockIdx.x + 1]);
idxCol = min(idxCol, (int)((blockIdx.y + 1)*interpolation_ygap));
idxRow = min(idxRow, (int)((blockIdx.x + 1)*interpolation_xgap));
float X2 = interpolation_xgap * (blockIdx.x + 1);
float X1 = interpolation_xgap * blockIdx.x;
float Y2 = interpolation_ygap * (blockIdx.y + 1);
float Y1 = interpolation_ygap * blockIdx.y;
float px = ((X2 - (float)idxRow) / (X2 - X1)) * srcData[blockIdx.y * (original_image_w) + blockIdx.x] + (((float)idxRow - X1) / (X2 - X1)) * srcData[blockIdx.y * (original_image_w) + blockIdx.x + 1];
float qx = ((X2 - (float)idxRow) / (X2 - X1)) * srcData[(blockIdx.y + 1) * (original_image_w) + blockIdx.x] + (((float)idxRow - X1) / (X2 - X1)) * srcData[(blockIdx.y + 1) * (original_image_w) + blockIdx.x + 1];
dstData[idxCol * resized_w + idxRow] = 2 * (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx) / float(255) - 1;
//dstData[idxCol * resized_w + idxRow] = (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx);
dstData[resized_w * resized_h * 1 + idxCol * resized_w + idxRow] = 2 * (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx) / float(255) - 1;
dstData[resized_w * resized_h * 2 + idxCol * resized_w + idxRow] = 2 * (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx) / float(255) - 1;
}
}
__global__ void copyChannel(float* dstData, float* xpixelPerBlock, float* ypixelPerBlock, float interpolation_xgap, float interpolation_ygap,int resized_w, int resized_h)
{
int idxCol = (int)ypixelPerBlock[blockIdx.y] + 1 + threadIdx.y;
int idxRow = (int)xpixelPerBlock[blockIdx.x] + 1 + threadIdx.x;
idxCol = min(idxCol, (int)ypixelPerBlock[blockIdx.y + 1]);
idxRow = min(idxRow, (int)xpixelPerBlock[blockIdx.x + 1]);
float X2 = interpolation_xgap * (blockIdx.x + 1);
float X1 = interpolation_xgap * blockIdx.x;
float Y2 = interpolation_ygap * (blockIdx.y + 1);
float Y1 = interpolation_ygap * blockIdx.y;
dstData[resized_w * resized_h * 1 + idxCol * resized_w + idxRow] = dstData[idxCol * resized_w + idxRow];
dstData[resized_w * resized_h * 2 + idxCol * resized_w + idxRow] = dstData[idxCol * resized_w + idxRow];
}
void resizeCuda(float* srcData, float* dstData, float* xpixelPerBlock, float* ypixelPerBlock, float interpolation_xgap, float interpolation_ygap, int original_image_w, int original_image_h, int resized_w, int resized_h, dim3 threadsPerBlock, dim3 numOfBlocks)
{
resize << < numOfBlocks, threadsPerBlock >> > ( srcData, dstData, xpixelPerBlock, ypixelPerBlock, interpolation_xgap, interpolation_ygap, original_image_w, original_image_h, resized_w, resized_h);
//hipDeviceSynchronize();
/*copyChannel <<<numOfBlocks, threadsPerBlock>>>(dstData, xpixelPerBlock, ypixelPerBlock, interpolation_xgap, interpolation_ygap, resized_w, resized_h);
hipDeviceSynchronize();*/
return;
} | 41ef70a37ebf5d502d7a1a7349868f556af1e1f9.cu | #ifndef __CUDACC_RTC__
#define __CUDACC_RTC__
#endif
#include <device_functions.h>
#include <math.h>
#include <cuda.h>
#include "preprocess.cuh"
#include "error_util.h"
using namespace std;
__global__ void resize(float* srcData, float* dstData, float* xpixelPerBlock, float* ypixelPerBlock, float interpolation_xgap, float interpolation_ygap, int original_image_w, int original_image_h, int resized_w, int resized_h)
{
int idxCol = (int)ypixelPerBlock[blockIdx.y] + 1 + threadIdx.y;
int idxRow = (int)xpixelPerBlock[blockIdx.x] + 1 + threadIdx.x;
if ((int)xpixelPerBlock[blockIdx.x] == (int)xpixelPerBlock[blockIdx.x + 1] || (int)ypixelPerBlock[blockIdx.y] == (int)ypixelPerBlock[blockIdx.y + 1])
{}
else {
//idxCol = min(idxCol, (int)ypixelPerBlock[blockIdx.y + 1]);
//idxRow = min(idxRow, (int)xpixelPerBlock[blockIdx.x + 1]);
idxCol = min(idxCol, (int)((blockIdx.y + 1)*interpolation_ygap));
idxRow = min(idxRow, (int)((blockIdx.x + 1)*interpolation_xgap));
float X2 = interpolation_xgap * (blockIdx.x + 1);
float X1 = interpolation_xgap * blockIdx.x;
float Y2 = interpolation_ygap * (blockIdx.y + 1);
float Y1 = interpolation_ygap * blockIdx.y;
float px = ((X2 - (float)idxRow) / (X2 - X1)) * srcData[blockIdx.y * (original_image_w) + blockIdx.x] + (((float)idxRow - X1) / (X2 - X1)) * srcData[blockIdx.y * (original_image_w) + blockIdx.x + 1];
float qx = ((X2 - (float)idxRow) / (X2 - X1)) * srcData[(blockIdx.y + 1) * (original_image_w) + blockIdx.x] + (((float)idxRow - X1) / (X2 - X1)) * srcData[(blockIdx.y + 1) * (original_image_w) + blockIdx.x + 1];
dstData[idxCol * resized_w + idxRow] = 2 * (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx) / float(255) - 1;
//dstData[idxCol * resized_w + idxRow] = (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx);
dstData[resized_w * resized_h * 1 + idxCol * resized_w + idxRow] = 2 * (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx) / float(255) - 1;
dstData[resized_w * resized_h * 2 + idxCol * resized_w + idxRow] = 2 * (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx) / float(255) - 1;
}
}
__global__ void copyChannel(float* dstData, float* xpixelPerBlock, float* ypixelPerBlock, float interpolation_xgap, float interpolation_ygap,int resized_w, int resized_h)
{
int idxCol = (int)ypixelPerBlock[blockIdx.y] + 1 + threadIdx.y;
int idxRow = (int)xpixelPerBlock[blockIdx.x] + 1 + threadIdx.x;
idxCol = min(idxCol, (int)ypixelPerBlock[blockIdx.y + 1]);
idxRow = min(idxRow, (int)xpixelPerBlock[blockIdx.x + 1]);
float X2 = interpolation_xgap * (blockIdx.x + 1);
float X1 = interpolation_xgap * blockIdx.x;
float Y2 = interpolation_ygap * (blockIdx.y + 1);
float Y1 = interpolation_ygap * blockIdx.y;
dstData[resized_w * resized_h * 1 + idxCol * resized_w + idxRow] = dstData[idxCol * resized_w + idxRow];
dstData[resized_w * resized_h * 2 + idxCol * resized_w + idxRow] = dstData[idxCol * resized_w + idxRow];
}
void resizeCuda(float* srcData, float* dstData, float* xpixelPerBlock, float* ypixelPerBlock, float interpolation_xgap, float interpolation_ygap, int original_image_w, int original_image_h, int resized_w, int resized_h, dim3 threadsPerBlock, dim3 numOfBlocks)
{
resize << < numOfBlocks, threadsPerBlock >> > ( srcData, dstData, xpixelPerBlock, ypixelPerBlock, interpolation_xgap, interpolation_ygap, original_image_w, original_image_h, resized_w, resized_h);
//cudaDeviceSynchronize();
/*copyChannel <<<numOfBlocks, threadsPerBlock>>>(dstData, xpixelPerBlock, ypixelPerBlock, interpolation_xgap, interpolation_ygap, resized_w, resized_h);
cudaDeviceSynchronize();*/
return;
} |
convolve.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#define TILE_SIZE 4
#define INPUT_SIZE 12
#define MASK_WIDTH 5
__constant__ float M[MASK_WIDTH];
__global__ void convolution_shared_memory(float *N, float *P){
int i = blockIdx.x*blockDim.x+threadIdx.x;
__shared__ float N_s[TILE_SIZE];
N_s[threadIdx.x]=N[i];
__syncthreads();
int this_title_start_point = blockIdx.x*blockDim.x;
int next_tile_start_point = (blockIdx.x+1)*blockDim.x;
int n_start_point = i-(MASK_WIDTH/2);
float Pvalue = 0;
for(int j =0; j < MASK_WIDTH; j++){
int N_index = n_start_point+j;
if(N_index >= 0 && N_index < INPUT_SIZE){
if((N_index >= this_title_start_point) && (N_index < next_tile_start_point)){
Pvalue+=N_s[threadIdx.x+j-(MASK_WIDTH/2)]*M[j];
}
else{
Pvalue+=N[N_index]*M[j];
}
}
}
P[i]=Pvalue;
}
__global__ void convolution_constant_memory(float *N, float *P, int Width){
int i = blockIdx.x*blockDim.x+threadIdx.x;
float Pvalue = 0;
int n_start_point = i-(MASK_WIDTH/2);
for(int j =0; j<MASK_WIDTH;j++){
if(n_start_point+j >=0 && n_start_point+j < Width){
Pvalue+= N[n_start_point+j]*M[j];
}
}
P[i]=Pvalue;
}
__global__ void convolution_global_memory(float *N, float *M, float *P, int Width){
int i = blockIdx.x*blockDim.x+threadIdx.x;
float Pvalue = 0;
int n_start_point = i-(MASK_WIDTH/2);
for(int j =0; j<MASK_WIDTH;j++){
if(n_start_point+j >=0 && n_start_point+j < Width){
Pvalue+= N[n_start_point+j]*M[j];
}
}
P[i]=Pvalue;
}
int main(){
//device input and output
float *d_N = 0;
float *d_P = 0;
hipMalloc(&d_N,INPUT_SIZE*sizeof(float));
hipMalloc(&d_P,INPUT_SIZE*sizeof(float));
//host input and output
float *h_N = (float*)malloc(INPUT_SIZE*sizeof(float));
float *h_P = (float*)malloc(INPUT_SIZE*sizeof(float));
float *h_M = (float*)malloc(MASK_WIDTH*sizeof(float));
//initialize input on host
for(int i=0;i<INPUT_SIZE;++i){
h_N[i]=(float)i;
}
//transfer input to device
hipMemcpy(d_N,h_N,INPUT_SIZE*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_P,h_P,INPUT_SIZE*sizeof(float),hipMemcpyHostToDevice);
//initialize mask on host
for(int j=0;j<MASK_WIDTH;++j){
h_M[j]=(float)j;
}
//transfer mask to constant memory
hipMemcpyToSymbol(M,h_M,MASK_WIDTH*sizeof(float));
//call convolution kernel
hipLaunchKernelGGL(( convolution_shared_memory), dim3((INPUT_SIZE+TILE_SIZE-1)/TILE_SIZE),dim3(TILE_SIZE) , 0, 0, d_N,d_P);
//retrieve result from device
hipMemcpy(h_P,d_P,INPUT_SIZE*sizeof(float),hipMemcpyDeviceToHost);
for(int i=0; i<INPUT_SIZE;++i){
printf("%f\n", h_P[i]);
}
hipFree(d_N);
hipFree(d_P);
hipFree(M);
free(h_N);
free(h_P);
free(h_M);
}
| convolve.cu | #include <cuda_runtime.h>
#include <stdio.h>
#define TILE_SIZE 4
#define INPUT_SIZE 12
#define MASK_WIDTH 5
__constant__ float M[MASK_WIDTH];
__global__ void convolution_shared_memory(float *N, float *P){
int i = blockIdx.x*blockDim.x+threadIdx.x;
__shared__ float N_s[TILE_SIZE];
N_s[threadIdx.x]=N[i];
__syncthreads();
int this_title_start_point = blockIdx.x*blockDim.x;
int next_tile_start_point = (blockIdx.x+1)*blockDim.x;
int n_start_point = i-(MASK_WIDTH/2);
float Pvalue = 0;
for(int j =0; j < MASK_WIDTH; j++){
int N_index = n_start_point+j;
if(N_index >= 0 && N_index < INPUT_SIZE){
if((N_index >= this_title_start_point) && (N_index < next_tile_start_point)){
Pvalue+=N_s[threadIdx.x+j-(MASK_WIDTH/2)]*M[j];
}
else{
Pvalue+=N[N_index]*M[j];
}
}
}
P[i]=Pvalue;
}
__global__ void convolution_constant_memory(float *N, float *P, int Width){
int i = blockIdx.x*blockDim.x+threadIdx.x;
float Pvalue = 0;
int n_start_point = i-(MASK_WIDTH/2);
for(int j =0; j<MASK_WIDTH;j++){
if(n_start_point+j >=0 && n_start_point+j < Width){
Pvalue+= N[n_start_point+j]*M[j];
}
}
P[i]=Pvalue;
}
__global__ void convolution_global_memory(float *N, float *M, float *P, int Width){
int i = blockIdx.x*blockDim.x+threadIdx.x;
float Pvalue = 0;
int n_start_point = i-(MASK_WIDTH/2);
for(int j =0; j<MASK_WIDTH;j++){
if(n_start_point+j >=0 && n_start_point+j < Width){
Pvalue+= N[n_start_point+j]*M[j];
}
}
P[i]=Pvalue;
}
int main(){
//device input and output
float *d_N = 0;
float *d_P = 0;
cudaMalloc(&d_N,INPUT_SIZE*sizeof(float));
cudaMalloc(&d_P,INPUT_SIZE*sizeof(float));
//host input and output
float *h_N = (float*)malloc(INPUT_SIZE*sizeof(float));
float *h_P = (float*)malloc(INPUT_SIZE*sizeof(float));
float *h_M = (float*)malloc(MASK_WIDTH*sizeof(float));
//initialize input on host
for(int i=0;i<INPUT_SIZE;++i){
h_N[i]=(float)i;
}
//transfer input to device
cudaMemcpy(d_N,h_N,INPUT_SIZE*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_P,h_P,INPUT_SIZE*sizeof(float),cudaMemcpyHostToDevice);
//initialize mask on host
for(int j=0;j<MASK_WIDTH;++j){
h_M[j]=(float)j;
}
//transfer mask to constant memory
cudaMemcpyToSymbol(M,h_M,MASK_WIDTH*sizeof(float));
//call convolution kernel
convolution_shared_memory<<<(INPUT_SIZE+TILE_SIZE-1)/TILE_SIZE,TILE_SIZE >>>(d_N,d_P);
//retrieve result from device
cudaMemcpy(h_P,d_P,INPUT_SIZE*sizeof(float),cudaMemcpyDeviceToHost);
for(int i=0; i<INPUT_SIZE;++i){
printf("%f\n", h_P[i]);
}
cudaFree(d_N);
cudaFree(d_P);
cudaFree(M);
free(h_N);
free(h_P);
free(h_M);
}
|
9c2dfa28c4e31fcb799f15a69533d11852a56d4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "EntityScene.h"
__global__ void moveKernel(float *data, float i, hipTextureObject_t texObj)
{
unsigned int id = threadIdx.x;
float id2 = (id*3.6)+i;
id2 = (id2 >= 360) ? id2-360 : id2;
data[(id * 4) + 0] = 100 * (float)sinf(id2*0.0174533);
data[(id * 4) + 2] = 100 * (float)cosf(id2*0.0174533);
}
void EntityScene::hipInit()
{
float *tempData = (float*)malloc(sizeof(float) * 4 * 100);
for (int i = 0; i < 100; i++)
{
tempData[(i * 4) + 0] = 100 * sin(i*3.6);
tempData[(i * 4) + 1] = -50.0f;
tempData[(i * 4) + 2] = 100 * cos(i*3.6);
}
hipMemcpy(this->cuTexBuf->d_mappedPointer, tempData, sizeof(float) * 4 * 100, hipMemcpyHostToDevice);
hipError_t status = hipGetLastError();
if (status != hipSuccess || (status = hipGetLastError()) != hipSuccess)
{
if (status == hipErrorUnknown)
{
printf("An Unknown CUDA Error Occurred :(\n");
printf("Perhaps performing the same operation under the CUDA debugger with Memory Checker enabled could help!\n");
printf("If this error only occurs outside of NSight debugging sessions, or causes the system to lock up. It may be caused by not passing the required amount of shared memory to a kernal launch that uses runtime sized shared memory.\n");
printf("Also possible you have forgotten to allocate texture memory you are trying to read\n");
printf("Passing a buffer to 'hipGraphicsSubResourceGetMappedArray' or a texture to 'hipGraphicsResourceGetMappedPointer'.\n");
getchar();
exit(1);
}
printf(" CUDA Error Occurred;\n%s\n", hipGetErrorString(status));
getchar();
exit(1);
}
free(tempData);
}
void EntityScene::cuUpdate()
{
static float i = 0;
i-=0.05f;
i = (i < 0) ? 359 : i;
moveKernel << <1, 100 >> >(this->cuTexBuf->d_mappedPointer, i, this->cuTexBuf->cuTextureObj);
hipDeviceSynchronize();
hipError_t status = hipGetLastError();
if (status != hipSuccess || (status = hipGetLastError()) != hipSuccess)
{
if (status == hipErrorUnknown)
{
printf("An Unknown CUDA Error Occurred :(\n");
printf("Perhaps performing the same operation under the CUDA debugger with Memory Checker enabled could help!\n");
printf("If this error only occurs outside of NSight debugging sessions, or causes the system to lock up. It may be caused by not passing the required amount of shared memory to a kernal launch that uses runtime sized shared memory.\n");
printf("Also possible you have forgotten to allocate texture memory you are trying to read\n");
printf("Passing a buffer to 'hipGraphicsSubResourceGetMappedArray' or a texture to 'hipGraphicsResourceGetMappedPointer'.\n");
getchar();
exit(1);
}
printf(" CUDA Error Occurred;\n%s\n", hipGetErrorString(status));
getchar();
exit(1);
}
}
| 9c2dfa28c4e31fcb799f15a69533d11852a56d4e.cu | #include "EntityScene.h"
__global__ void moveKernel(float *data, float i, cudaTextureObject_t texObj)
{
unsigned int id = threadIdx.x;
float id2 = (id*3.6)+i;
id2 = (id2 >= 360) ? id2-360 : id2;
data[(id * 4) + 0] = 100 * (float)sinf(id2*0.0174533);
data[(id * 4) + 2] = 100 * (float)cosf(id2*0.0174533);
}
void EntityScene::cuInit()
{
float *tempData = (float*)malloc(sizeof(float) * 4 * 100);
for (int i = 0; i < 100; i++)
{
tempData[(i * 4) + 0] = 100 * sin(i*3.6);
tempData[(i * 4) + 1] = -50.0f;
tempData[(i * 4) + 2] = 100 * cos(i*3.6);
}
cudaMemcpy(this->cuTexBuf->d_mappedPointer, tempData, sizeof(float) * 4 * 100, cudaMemcpyHostToDevice);
cudaError_t status = cudaGetLastError();
if (status != CUDA_SUCCESS || (status = cudaGetLastError()) != CUDA_SUCCESS)
{
if (status == cudaErrorUnknown)
{
printf("An Unknown CUDA Error Occurred :(\n");
printf("Perhaps performing the same operation under the CUDA debugger with Memory Checker enabled could help!\n");
printf("If this error only occurs outside of NSight debugging sessions, or causes the system to lock up. It may be caused by not passing the required amount of shared memory to a kernal launch that uses runtime sized shared memory.\n");
printf("Also possible you have forgotten to allocate texture memory you are trying to read\n");
printf("Passing a buffer to 'cudaGraphicsSubResourceGetMappedArray' or a texture to 'cudaGraphicsResourceGetMappedPointer'.\n");
getchar();
exit(1);
}
printf(" CUDA Error Occurred;\n%s\n", cudaGetErrorString(status));
getchar();
exit(1);
}
free(tempData);
}
void EntityScene::cuUpdate()
{
static float i = 0;
i-=0.05f;
i = (i < 0) ? 359 : i;
moveKernel << <1, 100 >> >(this->cuTexBuf->d_mappedPointer, i, this->cuTexBuf->cuTextureObj);
cudaDeviceSynchronize();
cudaError_t status = cudaGetLastError();
if (status != CUDA_SUCCESS || (status = cudaGetLastError()) != CUDA_SUCCESS)
{
if (status == cudaErrorUnknown)
{
printf("An Unknown CUDA Error Occurred :(\n");
printf("Perhaps performing the same operation under the CUDA debugger with Memory Checker enabled could help!\n");
printf("If this error only occurs outside of NSight debugging sessions, or causes the system to lock up. It may be caused by not passing the required amount of shared memory to a kernal launch that uses runtime sized shared memory.\n");
printf("Also possible you have forgotten to allocate texture memory you are trying to read\n");
printf("Passing a buffer to 'cudaGraphicsSubResourceGetMappedArray' or a texture to 'cudaGraphicsResourceGetMappedPointer'.\n");
getchar();
exit(1);
}
printf(" CUDA Error Occurred;\n%s\n", cudaGetErrorString(status));
getchar();
exit(1);
}
}
|
6266941c1058e4dcef7d8233dfc0e2f97aca288f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include <stdio.h>
const char* version_name = "cuda oblivious version";
void create_dist_grid(dist_grid_info_t *grid_info, int stencil_type) {
/* multi-threads in one machine */
grid_info->local_size_x = grid_info->global_size_x;
grid_info->local_size_y = grid_info->global_size_y;
grid_info->local_size_z = grid_info->global_size_z;
grid_info->offset_x = 0;
grid_info->offset_y = 0;
grid_info->offset_z = 0;
grid_info->halo_size_x = 1;
grid_info->halo_size_y = 1;
grid_info->halo_size_z = 1;
}
void destroy_dist_grid(dist_grid_info_t *grid_info) {
}
__global__ void stencil_7_naive_kernel_1step(cptr_t in, ptr_t out, \
int nx, int ny, int nz, \
int halo_x, int halo_y, int halo_z, int ldx, int ldy) {
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int ty = threadIdx.y + blockDim.y * blockIdx.y;
int tz = threadIdx.z + blockDim.z * blockIdx.z;
if(tx < nx && ty < ny && tz < nz) {
int x = tx + halo_x;
int y = ty + halo_y;
int z = tz + halo_z;
out[INDEX(x, y, z, ldx, ldy)] \
= ALPHA_ZZZ * in[INDEX(x, y, z, ldx, ldy)] \
+ ALPHA_NZZ * in[INDEX(x-1, y, z, ldx, ldy)] \
+ ALPHA_PZZ * in[INDEX(x+1, y, z, ldx, ldy)] \
+ ALPHA_ZNZ * in[INDEX(x, y-1, z, ldx, ldy)] \
+ ALPHA_ZPZ * in[INDEX(x, y+1, z, ldx, ldy)] \
+ ALPHA_ZZN * in[INDEX(x, y, z-1, ldx, ldy)] \
+ ALPHA_ZZP * in[INDEX(x, y, z+1, ldx, ldy)];
}
}
__global__ void stencil_27_naive_kernel_1step(cptr_t in, ptr_t out, \
int nx, int ny, int nz, \
int halo_x, int halo_y, int halo_z, int ldx, int ldy) {
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int ty = threadIdx.y + blockDim.y * blockIdx.y;
int tz = threadIdx.z + blockDim.z * blockIdx.z;
if(tx < nx && ty < ny && tz < nz) {
int x = tx + halo_x;
int y = ty + halo_y;
int z = tz + halo_z;
out[INDEX(x, y, z, ldx, ldy)] \
= ALPHA_ZZZ * in[INDEX(x, y, z, ldx, ldy)] \
+ ALPHA_NZZ * in[INDEX(x-1, y, z, ldx, ldy)] \
+ ALPHA_PZZ * in[INDEX(x+1, y, z, ldx, ldy)] \
+ ALPHA_ZNZ * in[INDEX(x, y-1, z, ldx, ldy)] \
+ ALPHA_ZPZ * in[INDEX(x, y+1, z, ldx, ldy)] \
+ ALPHA_ZZN * in[INDEX(x, y, z-1, ldx, ldy)] \
+ ALPHA_ZZP * in[INDEX(x, y, z+1, ldx, ldy)] \
+ ALPHA_NNZ * in[INDEX(x-1, y-1, z, ldx, ldy)] \
+ ALPHA_PNZ * in[INDEX(x+1, y-1, z, ldx, ldy)] \
+ ALPHA_NPZ * in[INDEX(x-1, y+1, z, ldx, ldy)] \
+ ALPHA_PPZ * in[INDEX(x+1, y+1, z, ldx, ldy)] \
+ ALPHA_NZN * in[INDEX(x-1, y, z-1, ldx, ldy)] \
+ ALPHA_PZN * in[INDEX(x+1, y, z-1, ldx, ldy)] \
+ ALPHA_NZP * in[INDEX(x-1, y, z+1, ldx, ldy)] \
+ ALPHA_PZP * in[INDEX(x+1, y, z+1, ldx, ldy)] \
+ ALPHA_ZNN * in[INDEX(x, y-1, z-1, ldx, ldy)] \
+ ALPHA_ZPN * in[INDEX(x, y+1, z-1, ldx, ldy)] \
+ ALPHA_ZNP * in[INDEX(x, y-1, z+1, ldx, ldy)] \
+ ALPHA_ZPP * in[INDEX(x, y+1, z+1, ldx, ldy)] \
+ ALPHA_NNN * in[INDEX(x-1, y-1, z-1, ldx, ldy)] \
+ ALPHA_PNN * in[INDEX(x+1, y-1, z-1, ldx, ldy)] \
+ ALPHA_NPN * in[INDEX(x-1, y+1, z-1, ldx, ldy)] \
+ ALPHA_PPN * in[INDEX(x+1, y+1, z-1, ldx, ldy)] \
+ ALPHA_NNP * in[INDEX(x-1, y-1, z+1, ldx, ldy)] \
+ ALPHA_PNP * in[INDEX(x+1, y-1, z+1, ldx, ldy)] \
+ ALPHA_NPP * in[INDEX(x-1, y+1, z+1, ldx, ldy)] \
+ ALPHA_PPP * in[INDEX(x+1, y+1, z+1, ldx, ldy)];
}
}
inline int ceiling(int num, int den) {
return (num - 1) / den + 1;
}
#define BLOCK_SIZE 8
void walk3(ptr_t buffer[], int nx, int ny, int nz, int t0, int t1,
int type, int x0, int dx0, int x1, int dx1,
int y0, int dy0, int y1, int dy1,
int z0, int dz0, int z1, int dz1){
int dt = t1 - t0;
if(dt == 1 || (x1-x0)*(y1-y0)*(z1-z0) < 2097152){
for(int t = t0; t < t1; t++){
dim3 grid_size (ceiling((x1 - x0) + (t - t0) * (dx1 - dx0), BLOCK_SIZE),
ceiling((y1 - y0) + (t - t0) * (dy1 - dy0), BLOCK_SIZE),
ceiling((z1 - z0) + (t - t0) * (dz1 - dz0), BLOCK_SIZE));
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
if(type == 7){
hipLaunchKernelGGL(( stencil_7_naive_kernel_1step), dim3(grid_size), dim3(block_size), 0, 0, \
buffer[t % 2], buffer[(t + 1) % 2], (x1 - x0) + (t - t0) * (dx1 - dx0),\
(y1 - y0) + (t - t0) * (dy1 - dy0), (z1 - z0) + (t - t0) * (dz1 - dz0), \
x0 + dx0 * (t - t0), y0 + dy0 * (t - t0),\
z0 + dz0 * (t - t0), nx, ny);
}
else{
hipLaunchKernelGGL(( stencil_27_naive_kernel_1step), dim3(grid_size), dim3(block_size), 0, 0, \
buffer[t % 2], buffer[(t + 1) % 2], (x1 - x0) + (t - t0) * (dx1 - dx0),\
(y1 - y0) + (t - t0) * (dy1 - dy0), (z1 - z0) + (t - t0) * (dz1 - dz0), \
x0 + dx0 * (t - t0), y0 + dy0 * (t - t0),\
z0 + dz0 * (t - t0), nx, ny);
}
hipDeviceSynchronize();
}
}
else if (dt > 1) {
if (2 * (z1 - z0) + (dz1 - dz0) * dt >= 4 * dt) {
int zm = (2 * (z0 + z1) + (2 + dz0 + dz1) * dt) / 4;
walk3(buffer, nx, ny, nz, t0, t1, type,
x0, dx0, x1, dx1,
y0, dy0, y1, dy1,
z0, dz0, zm, -1);
walk3(buffer, nx, ny, nz, t0, t1, type,
x0, dx0, x1, dx1,
y0, dy0, y1, dy1,
zm, -1, z1, dz1);
}
else if (2 * (y1 - y0) + (dy1 - dy0) * dt >= 4 * dt) {
int ym = (2 * (y0 + y1) + (2 + dy0 + dy1) * dt) / 4;
walk3(buffer, nx, ny, nz, t0, t1, type,
x0, dx0, x1, dx1,
y0, dy0, ym, -1,
z0, dz0, z1, dz1);
walk3(buffer, nx, ny, nz, t0, t1, type,
x0, dx0, x1, dx1,
ym, -1, y1, dy1,
z0, dz0, z1, dz1);
}
else {
int s = dt / 2;
walk3(buffer, nx, ny, nz, t0, t0 + s, type,
x0, dx0, x1, dx1,
y0, dy0, y1, dy1,
z0, dz0, z1, dz1);
walk3(buffer, nx, ny, nz, t0 + s, t1, type,
x0 + dx0 * s, dx0, x1 + dx1 * s, dx1,
y0 + dy0 * s, dy0, y1 + dy1 * s, dy1,
z0 + dz0 * s, dz0, z1 + dz1 * s, dz1);
}
}
}
ptr_t stencil_7(ptr_t grid, ptr_t aux, const dist_grid_info_t *grid_info, int nt) {
ptr_t buffer[2] = {grid, aux};
int x_start = grid_info->halo_size_x, x_end = grid_info->local_size_x + grid_info->halo_size_x;
int y_start = grid_info->halo_size_y, y_end = grid_info->local_size_y + grid_info->halo_size_y;
int z_start = grid_info->halo_size_z, z_end = grid_info->local_size_z + grid_info->halo_size_z;
int ldx = grid_info->local_size_x + 2 * grid_info->halo_size_x;
int ldy = grid_info->local_size_y + 2 * grid_info->halo_size_y;
int ldz = grid_info->local_size_z + 2 * grid_info->halo_size_z;
walk3(buffer, ldx, ldy, ldz, 0, nt, 7,
x_start, 0, x_end, 0,
y_start, 0, y_end, 0,
z_start, 0, z_end, 0);
return buffer[nt % 2];
}
ptr_t stencil_27(ptr_t grid, ptr_t aux, const dist_grid_info_t *grid_info, int nt) {
ptr_t buffer[2] = {grid, aux};
int x_start = grid_info->halo_size_x, x_end = grid_info->local_size_x + grid_info->halo_size_x;
int y_start = grid_info->halo_size_y, y_end = grid_info->local_size_y + grid_info->halo_size_y;
int z_start = grid_info->halo_size_z, z_end = grid_info->local_size_z + grid_info->halo_size_z;
int ldx = grid_info->local_size_x + 2 * grid_info->halo_size_x;
int ldy = grid_info->local_size_y + 2 * grid_info->halo_size_y;
int ldz = grid_info->local_size_z + 2 * grid_info->halo_size_z;
walk3(buffer, ldx, ldy, ldz, 0, nt, 27,
x_start, 0, x_end, 0,
y_start, 0, y_end, 0,
z_start, 0, z_end, 0);
return buffer[nt % 2];
} | 6266941c1058e4dcef7d8233dfc0e2f97aca288f.cu | #include "common.h"
#include <stdio.h>
const char* version_name = "cuda oblivious version";
void create_dist_grid(dist_grid_info_t *grid_info, int stencil_type) {
/* multi-threads in one machine */
grid_info->local_size_x = grid_info->global_size_x;
grid_info->local_size_y = grid_info->global_size_y;
grid_info->local_size_z = grid_info->global_size_z;
grid_info->offset_x = 0;
grid_info->offset_y = 0;
grid_info->offset_z = 0;
grid_info->halo_size_x = 1;
grid_info->halo_size_y = 1;
grid_info->halo_size_z = 1;
}
void destroy_dist_grid(dist_grid_info_t *grid_info) {
}
__global__ void stencil_7_naive_kernel_1step(cptr_t in, ptr_t out, \
int nx, int ny, int nz, \
int halo_x, int halo_y, int halo_z, int ldx, int ldy) {
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int ty = threadIdx.y + blockDim.y * blockIdx.y;
int tz = threadIdx.z + blockDim.z * blockIdx.z;
if(tx < nx && ty < ny && tz < nz) {
int x = tx + halo_x;
int y = ty + halo_y;
int z = tz + halo_z;
out[INDEX(x, y, z, ldx, ldy)] \
= ALPHA_ZZZ * in[INDEX(x, y, z, ldx, ldy)] \
+ ALPHA_NZZ * in[INDEX(x-1, y, z, ldx, ldy)] \
+ ALPHA_PZZ * in[INDEX(x+1, y, z, ldx, ldy)] \
+ ALPHA_ZNZ * in[INDEX(x, y-1, z, ldx, ldy)] \
+ ALPHA_ZPZ * in[INDEX(x, y+1, z, ldx, ldy)] \
+ ALPHA_ZZN * in[INDEX(x, y, z-1, ldx, ldy)] \
+ ALPHA_ZZP * in[INDEX(x, y, z+1, ldx, ldy)];
}
}
__global__ void stencil_27_naive_kernel_1step(cptr_t in, ptr_t out, \
int nx, int ny, int nz, \
int halo_x, int halo_y, int halo_z, int ldx, int ldy) {
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int ty = threadIdx.y + blockDim.y * blockIdx.y;
int tz = threadIdx.z + blockDim.z * blockIdx.z;
if(tx < nx && ty < ny && tz < nz) {
int x = tx + halo_x;
int y = ty + halo_y;
int z = tz + halo_z;
out[INDEX(x, y, z, ldx, ldy)] \
= ALPHA_ZZZ * in[INDEX(x, y, z, ldx, ldy)] \
+ ALPHA_NZZ * in[INDEX(x-1, y, z, ldx, ldy)] \
+ ALPHA_PZZ * in[INDEX(x+1, y, z, ldx, ldy)] \
+ ALPHA_ZNZ * in[INDEX(x, y-1, z, ldx, ldy)] \
+ ALPHA_ZPZ * in[INDEX(x, y+1, z, ldx, ldy)] \
+ ALPHA_ZZN * in[INDEX(x, y, z-1, ldx, ldy)] \
+ ALPHA_ZZP * in[INDEX(x, y, z+1, ldx, ldy)] \
+ ALPHA_NNZ * in[INDEX(x-1, y-1, z, ldx, ldy)] \
+ ALPHA_PNZ * in[INDEX(x+1, y-1, z, ldx, ldy)] \
+ ALPHA_NPZ * in[INDEX(x-1, y+1, z, ldx, ldy)] \
+ ALPHA_PPZ * in[INDEX(x+1, y+1, z, ldx, ldy)] \
+ ALPHA_NZN * in[INDEX(x-1, y, z-1, ldx, ldy)] \
+ ALPHA_PZN * in[INDEX(x+1, y, z-1, ldx, ldy)] \
+ ALPHA_NZP * in[INDEX(x-1, y, z+1, ldx, ldy)] \
+ ALPHA_PZP * in[INDEX(x+1, y, z+1, ldx, ldy)] \
+ ALPHA_ZNN * in[INDEX(x, y-1, z-1, ldx, ldy)] \
+ ALPHA_ZPN * in[INDEX(x, y+1, z-1, ldx, ldy)] \
+ ALPHA_ZNP * in[INDEX(x, y-1, z+1, ldx, ldy)] \
+ ALPHA_ZPP * in[INDEX(x, y+1, z+1, ldx, ldy)] \
+ ALPHA_NNN * in[INDEX(x-1, y-1, z-1, ldx, ldy)] \
+ ALPHA_PNN * in[INDEX(x+1, y-1, z-1, ldx, ldy)] \
+ ALPHA_NPN * in[INDEX(x-1, y+1, z-1, ldx, ldy)] \
+ ALPHA_PPN * in[INDEX(x+1, y+1, z-1, ldx, ldy)] \
+ ALPHA_NNP * in[INDEX(x-1, y-1, z+1, ldx, ldy)] \
+ ALPHA_PNP * in[INDEX(x+1, y-1, z+1, ldx, ldy)] \
+ ALPHA_NPP * in[INDEX(x-1, y+1, z+1, ldx, ldy)] \
+ ALPHA_PPP * in[INDEX(x+1, y+1, z+1, ldx, ldy)];
}
}
inline int ceiling(int num, int den) {
return (num - 1) / den + 1;
}
#define BLOCK_SIZE 8
void walk3(ptr_t buffer[], int nx, int ny, int nz, int t0, int t1,
int type, int x0, int dx0, int x1, int dx1,
int y0, int dy0, int y1, int dy1,
int z0, int dz0, int z1, int dz1){
int dt = t1 - t0;
if(dt == 1 || (x1-x0)*(y1-y0)*(z1-z0) < 2097152){
for(int t = t0; t < t1; t++){
dim3 grid_size (ceiling((x1 - x0) + (t - t0) * (dx1 - dx0), BLOCK_SIZE),
ceiling((y1 - y0) + (t - t0) * (dy1 - dy0), BLOCK_SIZE),
ceiling((z1 - z0) + (t - t0) * (dz1 - dz0), BLOCK_SIZE));
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
if(type == 7){
stencil_7_naive_kernel_1step<<<grid_size, block_size>>>(\
buffer[t % 2], buffer[(t + 1) % 2], (x1 - x0) + (t - t0) * (dx1 - dx0),\
(y1 - y0) + (t - t0) * (dy1 - dy0), (z1 - z0) + (t - t0) * (dz1 - dz0), \
x0 + dx0 * (t - t0), y0 + dy0 * (t - t0),\
z0 + dz0 * (t - t0), nx, ny);
}
else{
stencil_27_naive_kernel_1step<<<grid_size, block_size>>>(\
buffer[t % 2], buffer[(t + 1) % 2], (x1 - x0) + (t - t0) * (dx1 - dx0),\
(y1 - y0) + (t - t0) * (dy1 - dy0), (z1 - z0) + (t - t0) * (dz1 - dz0), \
x0 + dx0 * (t - t0), y0 + dy0 * (t - t0),\
z0 + dz0 * (t - t0), nx, ny);
}
cudaDeviceSynchronize();
}
}
else if (dt > 1) {
if (2 * (z1 - z0) + (dz1 - dz0) * dt >= 4 * dt) {
int zm = (2 * (z0 + z1) + (2 + dz0 + dz1) * dt) / 4;
walk3(buffer, nx, ny, nz, t0, t1, type,
x0, dx0, x1, dx1,
y0, dy0, y1, dy1,
z0, dz0, zm, -1);
walk3(buffer, nx, ny, nz, t0, t1, type,
x0, dx0, x1, dx1,
y0, dy0, y1, dy1,
zm, -1, z1, dz1);
}
else if (2 * (y1 - y0) + (dy1 - dy0) * dt >= 4 * dt) {
int ym = (2 * (y0 + y1) + (2 + dy0 + dy1) * dt) / 4;
walk3(buffer, nx, ny, nz, t0, t1, type,
x0, dx0, x1, dx1,
y0, dy0, ym, -1,
z0, dz0, z1, dz1);
walk3(buffer, nx, ny, nz, t0, t1, type,
x0, dx0, x1, dx1,
ym, -1, y1, dy1,
z0, dz0, z1, dz1);
}
else {
int s = dt / 2;
walk3(buffer, nx, ny, nz, t0, t0 + s, type,
x0, dx0, x1, dx1,
y0, dy0, y1, dy1,
z0, dz0, z1, dz1);
walk3(buffer, nx, ny, nz, t0 + s, t1, type,
x0 + dx0 * s, dx0, x1 + dx1 * s, dx1,
y0 + dy0 * s, dy0, y1 + dy1 * s, dy1,
z0 + dz0 * s, dz0, z1 + dz1 * s, dz1);
}
}
}
ptr_t stencil_7(ptr_t grid, ptr_t aux, const dist_grid_info_t *grid_info, int nt) {
ptr_t buffer[2] = {grid, aux};
int x_start = grid_info->halo_size_x, x_end = grid_info->local_size_x + grid_info->halo_size_x;
int y_start = grid_info->halo_size_y, y_end = grid_info->local_size_y + grid_info->halo_size_y;
int z_start = grid_info->halo_size_z, z_end = grid_info->local_size_z + grid_info->halo_size_z;
int ldx = grid_info->local_size_x + 2 * grid_info->halo_size_x;
int ldy = grid_info->local_size_y + 2 * grid_info->halo_size_y;
int ldz = grid_info->local_size_z + 2 * grid_info->halo_size_z;
walk3(buffer, ldx, ldy, ldz, 0, nt, 7,
x_start, 0, x_end, 0,
y_start, 0, y_end, 0,
z_start, 0, z_end, 0);
return buffer[nt % 2];
}
ptr_t stencil_27(ptr_t grid, ptr_t aux, const dist_grid_info_t *grid_info, int nt) {
ptr_t buffer[2] = {grid, aux};
int x_start = grid_info->halo_size_x, x_end = grid_info->local_size_x + grid_info->halo_size_x;
int y_start = grid_info->halo_size_y, y_end = grid_info->local_size_y + grid_info->halo_size_y;
int z_start = grid_info->halo_size_z, z_end = grid_info->local_size_z + grid_info->halo_size_z;
int ldx = grid_info->local_size_x + 2 * grid_info->halo_size_x;
int ldy = grid_info->local_size_y + 2 * grid_info->halo_size_y;
int ldz = grid_info->local_size_z + 2 * grid_info->halo_size_z;
walk3(buffer, ldx, ldy, ldz, 0, nt, 27,
x_start, 0, x_end, 0,
y_start, 0, y_end, 0,
z_start, 0, z_end, 0);
return buffer[nt % 2];
} |
30430773b41ab64f87d4ac89e48c36b6d54a90c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "concat_attention_op.h"
#include <stdio.h>
namespace caffe2 {
namespace {
template <typename T>
__global__ void ConcatAttentionForward(const int nthreads, const T* bottom_data,
const int channels, const int num_classes, const int pixels,
const int iter, T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int pidx = index % pixels;
const int idx = index / pixels;
const int target_index = (idx * num_classes + iter) * pixels + pidx;
top_data[target_index] = bottom_data[index];
}
}
template <typename T>
__global__ void ConcatAttentionBackward(const int nthreads, const T* input_grad,
const int channels, const int num_classes, const int pixels,
const int iter, T* output_grad) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int pidx = index % pixels;
const int idx = index / pixels;
const int target_index = (idx * num_classes + iter) * pixels + pidx;
output_grad[index] = input_grad[target_index];
} // CUDA_1D_KERNEL_LOOP
} // ConcatAttentionBackward
} // namespace
template<>
bool ConcatAttentionOp<float, CUDAContext>::RunOnDevice() {
// first calculate the final channel size
const int num_inputs = InputSize();
const int N = Input(0).dim32(0);
const int C = Input(0).dim32(1);
const int H = Input(0).dim32(2);
const int W = Input(0).dim32(3);
const int iter_size = Input(0).size();
const int CC = C * num_inputs;
const int pixels = H * W;
auto* Y = Output(0);
Y->Resize(N, CC, H, W);
for (int iter=0; iter<num_inputs; iter++) {
auto& X = Input(iter);
hipLaunchKernelGGL(( ConcatAttentionForward<float>)
, dim3(CAFFE_GET_BLOCKS(iter_size)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
iter_size,
X.data<float>(),
C, num_inputs, pixels, iter,
Y->mutable_data<float>());
}
return true;
}
template<>
bool ConcatAttentionGradientOp<float, CUDAContext>::RunOnDevice() {
const int num_inputs = InputSize() - 1;
auto& dY = Input(0);
const int N = dY.dim32(0);
const int CC = dY.dim32(1);
const int H = dY.dim32(2);
const int W = dY.dim32(3);
const int C = CC / num_inputs;
DCHECK_EQ(CC % num_inputs, 0);
const int pixels = H * W;
// Must zero-out dX before accumulating gradients
for (int iter=0; iter<num_inputs; iter++) {
auto& X = Input(iter+1);
auto* dX = Output(iter);
dX->ResizeLike(X);
int iter_size = X.size();
// no need to clean, as it is direct assignment to all the values
// math::Set<float, CUDAContext>(
// dX->size(), 0.f, dX->mutable_data<float>(), &context_);
hipLaunchKernelGGL(( ConcatAttentionBackward<float>)
, dim3(CAFFE_GET_BLOCKS(iter_size)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
iter_size,
dY.data<float>(),
C, num_inputs, pixels, iter,
dX->mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(ConcatAttention,
ConcatAttentionOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(ConcatAttentionGradient,
ConcatAttentionGradientOp<float, CUDAContext>);
} // namespace caffe2 | 30430773b41ab64f87d4ac89e48c36b6d54a90c5.cu | #include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "concat_attention_op.h"
#include <stdio.h>
namespace caffe2 {
namespace {
template <typename T>
__global__ void ConcatAttentionForward(const int nthreads, const T* bottom_data,
const int channels, const int num_classes, const int pixels,
const int iter, T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int pidx = index % pixels;
const int idx = index / pixels;
const int target_index = (idx * num_classes + iter) * pixels + pidx;
top_data[target_index] = bottom_data[index];
}
}
template <typename T>
__global__ void ConcatAttentionBackward(const int nthreads, const T* input_grad,
const int channels, const int num_classes, const int pixels,
const int iter, T* output_grad) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int pidx = index % pixels;
const int idx = index / pixels;
const int target_index = (idx * num_classes + iter) * pixels + pidx;
output_grad[index] = input_grad[target_index];
} // CUDA_1D_KERNEL_LOOP
} // ConcatAttentionBackward
} // namespace
template<>
bool ConcatAttentionOp<float, CUDAContext>::RunOnDevice() {
// first calculate the final channel size
const int num_inputs = InputSize();
const int N = Input(0).dim32(0);
const int C = Input(0).dim32(1);
const int H = Input(0).dim32(2);
const int W = Input(0).dim32(3);
const int iter_size = Input(0).size();
const int CC = C * num_inputs;
const int pixels = H * W;
auto* Y = Output(0);
Y->Resize(N, CC, H, W);
for (int iter=0; iter<num_inputs; iter++) {
auto& X = Input(iter);
ConcatAttentionForward<float>
<<<CAFFE_GET_BLOCKS(iter_size),
CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
iter_size,
X.data<float>(),
C, num_inputs, pixels, iter,
Y->mutable_data<float>());
}
return true;
}
template<>
bool ConcatAttentionGradientOp<float, CUDAContext>::RunOnDevice() {
const int num_inputs = InputSize() - 1;
auto& dY = Input(0);
const int N = dY.dim32(0);
const int CC = dY.dim32(1);
const int H = dY.dim32(2);
const int W = dY.dim32(3);
const int C = CC / num_inputs;
DCHECK_EQ(CC % num_inputs, 0);
const int pixels = H * W;
// Must zero-out dX before accumulating gradients
for (int iter=0; iter<num_inputs; iter++) {
auto& X = Input(iter+1);
auto* dX = Output(iter);
dX->ResizeLike(X);
int iter_size = X.size();
// no need to clean, as it is direct assignment to all the values
// math::Set<float, CUDAContext>(
// dX->size(), 0.f, dX->mutable_data<float>(), &context_);
ConcatAttentionBackward<float>
<<<CAFFE_GET_BLOCKS(iter_size),
CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
iter_size,
dY.data<float>(),
C, num_inputs, pixels, iter,
dX->mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(ConcatAttention,
ConcatAttentionOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(ConcatAttentionGradient,
ConcatAttentionGradientOp<float, CUDAContext>);
} // namespace caffe2 |
87149a854285882047f76693c3088eff1b69c9a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "symbols/NaN.cuh"
__global__ void biasKernel (
int batchSize,
int* lengths,
int numberEntriesPerInstance,
int numberRows,
int numberIterations,
float* input,
float* bias,
float* result) {
int indexInstance = blockIdx.x;
int indexColumn = blockIdx.y;
int startInstanceWithinBatch = indexInstance * numberEntriesPerInstance;
int startColumnWithinInstance = indexColumn * numberRows;
int startRowWithinColumn = threadIdx.x * numberIterations;
int firstEntryWithinInstance = startColumnWithinInstance + startRowWithinColumn;
if(firstEntryWithinInstance < numberEntriesPerInstance) {
int startColumnWithinBatch = startInstanceWithinBatch + startColumnWithinInstance;
int firstEntryWithinBatch = startColumnWithinBatch + startRowWithinColumn;
int startNextColumnWithinBatch = startColumnWithinBatch + numberRows;
int lastEntryWithinBatch = min(firstEntryWithinBatch + numberIterations, startNextColumnWithinBatch);
if(indexInstance < batchSize) {
int length = lengths[indexInstance];
if(indexColumn < length) {
for(int indexEntry = firstEntryWithinBatch; indexEntry < lastEntryWithinBatch; indexEntry++) {
int indexColumn = indexEntry % numberRows;
result[indexEntry] = input[indexEntry] + bias[indexColumn];
}
}
else {
setToNan(result, firstEntryWithinBatch, lastEntryWithinBatch);
}
}
else {
setToNan(result, firstEntryWithinBatch, lastEntryWithinBatch);
}
}
} | 87149a854285882047f76693c3088eff1b69c9a0.cu | #include "symbols/NaN.cuh"
__global__ void biasKernel (
int batchSize,
int* lengths,
int numberEntriesPerInstance,
int numberRows,
int numberIterations,
float* input,
float* bias,
float* result) {
int indexInstance = blockIdx.x;
int indexColumn = blockIdx.y;
int startInstanceWithinBatch = indexInstance * numberEntriesPerInstance;
int startColumnWithinInstance = indexColumn * numberRows;
int startRowWithinColumn = threadIdx.x * numberIterations;
int firstEntryWithinInstance = startColumnWithinInstance + startRowWithinColumn;
if(firstEntryWithinInstance < numberEntriesPerInstance) {
int startColumnWithinBatch = startInstanceWithinBatch + startColumnWithinInstance;
int firstEntryWithinBatch = startColumnWithinBatch + startRowWithinColumn;
int startNextColumnWithinBatch = startColumnWithinBatch + numberRows;
int lastEntryWithinBatch = min(firstEntryWithinBatch + numberIterations, startNextColumnWithinBatch);
if(indexInstance < batchSize) {
int length = lengths[indexInstance];
if(indexColumn < length) {
for(int indexEntry = firstEntryWithinBatch; indexEntry < lastEntryWithinBatch; indexEntry++) {
int indexColumn = indexEntry % numberRows;
result[indexEntry] = input[indexEntry] + bias[indexColumn];
}
}
else {
setToNan(result, firstEntryWithinBatch, lastEntryWithinBatch);
}
}
else {
setToNan(result, firstEntryWithinBatch, lastEntryWithinBatch);
}
}
} |
30680bb18417a060564c83e536aed70ad2ab5c10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/tile_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void TileCopyKernel(
int outer_dim,
int inner_dim,
int tiles,
const T* input_data,
T* output_data) {
CUDA_1D_KERNEL_LOOP(index, outer_dim * inner_dim * tiles) {
int col = index % inner_dim;
int row = index / (inner_dim * tiles);
output_data[index] = input_data[row * inner_dim + col];
}
}
template <typename T>
__global__ void TileGradientAxpyKernel(
int outer_dim,
int inner_dim,
int tiles,
const T* input_data,
T* output_data) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
for (int idx = blockIdx.x; idx < outer_dim * inner_dim; idx += gridDim.x) {
int i = idx / inner_dim;
int j = idx % inner_dim;
T* output_ptr = output_data + inner_dim * i;
T x = 0.0;
for (int t = threadIdx.x; t < tiles; t += blockDim.x) {
const T* input_ptr = input_data + (i * tiles + t) * inner_dim;
x += input_ptr[j];
}
__shared__ typename BlockReduce::TempStorage temp_storage;
T totx = BlockReduce(temp_storage).Sum(x);
if (threadIdx.x == 0) {
output_ptr[j] = totx;
}
__syncthreads();
}
}
} // namespace
template <>
void TileOp<CUDAContext>::DoTile(
const TypeMeta& meta,
int item_size,
int outer_dim,
int inner_dim,
const char* input_data,
char* output_data) {
hipLaunchKernelGGL(( TileCopyKernel<float>)
, dim3(::min(outer_dim * inner_dim * tiles_, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
outer_dim,
inner_dim,
tiles_,
reinterpret_cast<const float*>(input_data),
reinterpret_cast<float*>(output_data));
}
template <>
void TileGradientOp<float, CUDAContext>::DoTileGradient(
const TypeMeta& meta,
int item_size,
int outer_dim,
int inner_dim,
const char* input_data,
char* output_data) {
hipLaunchKernelGGL(( TileGradientAxpyKernel<float>),
dim3(::min(outer_dim * inner_dim, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
outer_dim,
inner_dim,
tiles_,
reinterpret_cast<const float*>(input_data),
reinterpret_cast<float*>(output_data));
}
REGISTER_CUDA_OPERATOR(Tile, TileOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(TileGradient, TileGradientOp<float, CUDAContext>);
} // namespace caffe2
| 30680bb18417a060564c83e536aed70ad2ab5c10.cu | #include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/tile_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void TileCopyKernel(
int outer_dim,
int inner_dim,
int tiles,
const T* input_data,
T* output_data) {
CUDA_1D_KERNEL_LOOP(index, outer_dim * inner_dim * tiles) {
int col = index % inner_dim;
int row = index / (inner_dim * tiles);
output_data[index] = input_data[row * inner_dim + col];
}
}
template <typename T>
__global__ void TileGradientAxpyKernel(
int outer_dim,
int inner_dim,
int tiles,
const T* input_data,
T* output_data) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
for (int idx = blockIdx.x; idx < outer_dim * inner_dim; idx += gridDim.x) {
int i = idx / inner_dim;
int j = idx % inner_dim;
T* output_ptr = output_data + inner_dim * i;
T x = 0.0;
for (int t = threadIdx.x; t < tiles; t += blockDim.x) {
const T* input_ptr = input_data + (i * tiles + t) * inner_dim;
x += input_ptr[j];
}
__shared__ typename BlockReduce::TempStorage temp_storage;
T totx = BlockReduce(temp_storage).Sum(x);
if (threadIdx.x == 0) {
output_ptr[j] = totx;
}
__syncthreads();
}
}
} // namespace
template <>
void TileOp<CUDAContext>::DoTile(
const TypeMeta& meta,
int item_size,
int outer_dim,
int inner_dim,
const char* input_data,
char* output_data) {
TileCopyKernel<float>
<<<std::min(outer_dim * inner_dim * tiles_, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
outer_dim,
inner_dim,
tiles_,
reinterpret_cast<const float*>(input_data),
reinterpret_cast<float*>(output_data));
}
template <>
void TileGradientOp<float, CUDAContext>::DoTileGradient(
const TypeMeta& meta,
int item_size,
int outer_dim,
int inner_dim,
const char* input_data,
char* output_data) {
TileGradientAxpyKernel<float><<<
std::min(outer_dim * inner_dim, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
outer_dim,
inner_dim,
tiles_,
reinterpret_cast<const float*>(input_data),
reinterpret_cast<float*>(output_data));
}
REGISTER_CUDA_OPERATOR(Tile, TileOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(TileGradient, TileGradientOp<float, CUDAContext>);
} // namespace caffe2
|
2c98a0cbde7668ff99df26b6069723b4df15a11b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void calculation( int *a, int *b, int *c, int constant, int vector_size ) {
// write your code here
// Declare shared memory
// Bring in the data from global memory into shared memory
// Synchronize
// Do calculation using the values in shared memory
// Write output
} | 2c98a0cbde7668ff99df26b6069723b4df15a11b.cu | #include "includes.h"
__global__ void calculation( int *a, int *b, int *c, int constant, int vector_size ) {
// write your code here
// Declare shared memory
// Bring in the data from global memory into shared memory
// Synchronize
// Do calculation using the values in shared memory
// Write output
} |
5e95a5d0e0cb5e334b86ca8ca113fd5be9236e24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <omp.h>
#include <stdio.h>
#include "AstroAccelerate/params.h"
#include "device_stretch_kernel.cu"
#include "helper_cuda.h"
//{{{ Dopler Stretch
void stretch_gpu(hipEvent_t event, hipStream_t stream, int acc, int samps, float tsamp, float *d_input, float *d_output)
{
//{{{ Simple corner turn on the GPU
int divisions_in_t = 32;
int num_blocks_t = samps / divisions_in_t;
float t_zero = ( (double) tsamp ) / ( 1.0 + ( ( acc * samps * (double) tsamp ) / 599584916.0 ) );
float multiplier = ( t_zero * acc * (double) tsamp ) / 599584916.0;
float tsamp_inverse = 1.0 / tsamp;
// printf("\nStretch!");
// printf("\n%d %d", samps, acc);
// printf("\n%d %d", divisions_in_t, num_blocks_t);
dim3 threads_per_block(divisions_in_t);
dim3 num_blocks(num_blocks_t);
//double start_t, end_t;
//start_t = omp_get_wtime();
hipStreamWaitEvent(stream, event, 0);
hipLaunchKernelGGL(( stretch_kernel), dim3(num_blocks), dim3(threads_per_block), 0, stream, acc, samps, tsamp, d_input, d_output, t_zero, multiplier, tsamp_inverse);
getLastCudaError("stretch_kernel failed");
hipEventRecord(event, stream);
//end_t = omp_get_wtime();
//float time = (float)(end_t-start_t);
//printf("\nPerformed Stretch: %f (GPU estimate)", time);
//printf("\nCT Gops based on %.2f ops per channel per tsamp: %f",10.0,((10.0*(divisions_in_t*divisions_in_f*num_blocks_t*num_blocks_f))/(time))/1000000000.0);
//printf("\nCT Device memory bandwidth in GB/s: %f", ((sizeof(float)+sizeof(unsigned short))*(divisions_in_t*divisions_in_f*num_blocks_t*num_blocks_f))/(time)/1000000000.0);
//}}}
}
//}}}
| 5e95a5d0e0cb5e334b86ca8ca113fd5be9236e24.cu | #include <omp.h>
#include <stdio.h>
#include "AstroAccelerate/params.h"
#include "device_stretch_kernel.cu"
#include "helper_cuda.h"
//{{{ Dopler Stretch
void stretch_gpu(cudaEvent_t event, cudaStream_t stream, int acc, int samps, float tsamp, float *d_input, float *d_output)
{
//{{{ Simple corner turn on the GPU
int divisions_in_t = 32;
int num_blocks_t = samps / divisions_in_t;
float t_zero = ( (double) tsamp ) / ( 1.0 + ( ( acc * samps * (double) tsamp ) / 599584916.0 ) );
float multiplier = ( t_zero * acc * (double) tsamp ) / 599584916.0;
float tsamp_inverse = 1.0 / tsamp;
// printf("\nStretch!");
// printf("\n%d %d", samps, acc);
// printf("\n%d %d", divisions_in_t, num_blocks_t);
dim3 threads_per_block(divisions_in_t);
dim3 num_blocks(num_blocks_t);
//double start_t, end_t;
//start_t = omp_get_wtime();
cudaStreamWaitEvent(stream, event, 0);
stretch_kernel<<<num_blocks, threads_per_block, 0, stream>>>(acc, samps, tsamp, d_input, d_output, t_zero, multiplier, tsamp_inverse);
getLastCudaError("stretch_kernel failed");
cudaEventRecord(event, stream);
//end_t = omp_get_wtime();
//float time = (float)(end_t-start_t);
//printf("\nPerformed Stretch: %f (GPU estimate)", time);
//printf("\nCT Gops based on %.2f ops per channel per tsamp: %f",10.0,((10.0*(divisions_in_t*divisions_in_f*num_blocks_t*num_blocks_f))/(time))/1000000000.0);
//printf("\nCT Device memory bandwidth in GB/s: %f", ((sizeof(float)+sizeof(unsigned short))*(divisions_in_t*divisions_in_f*num_blocks_t*num_blocks_f))/(time)/1000000000.0);
//}}}
}
//}}}
|
6189b5d8eadaf4383e0a944e960dae912274766f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h> // helper functions CUDA error checking and initialization
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#include "cudaio.h"
#include "timer.h"
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
const long NUM_ITERATIONS = 1;
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
static void
confer_load(FILE *fp, const char *fpath, void *ctx)
{
char buf[1024];
long *popt_n = (long *)ctx;
if (fgets(buf, 1024, fp) == NULL) {
fprintf(stderr, "cannot option count: %s\n", fpath);
exit(2);
}
if (sscanf(buf, "%ld", popt_n) != 1) {
fprintf(stderr, "invalid format: %s\n", fpath);
exit(3);
}
}
int
main(int argc, char *argv[])
{
long opt_n;
char *folder;
cuio_ptr_t stockPrice, optionStrike, optionYears;
cuio_ptr_t callResult, putResult;
unsigned ticks_pre, ticks_kern, ticks_post;
float resCall1, resCall2, resPut1, resPut2;
long i;
if (argc != 2) {
fprintf(stderr, "Usage: %s <folder>\n", argv[0]);
exit(EXIT_FAILURE);
}
folder = argv[1];
cuio_init(CUIO_TYPE_NONE, folder);
cuio_load_conf(confer_load, &opt_n);
printf("Initializing data...\n");
init_tickcount();
callResult = cuio_load_floats("CallResult.mem", opt_n, CUIO_MODE_WRITEONLY);
putResult = cuio_load_floats("PutResult.mem", opt_n, CUIO_MODE_WRITEONLY);
stockPrice = cuio_load_floats("StockPrice.mem", opt_n, CUIO_MODE_READONLY);
optionStrike = cuio_load_floats("OptionStrike.mem", opt_n, CUIO_MODE_READONLY);
optionYears = cuio_load_floats("OptionYears.mem", opt_n, CUIO_MODE_READONLY);
cuio_memset_d(&callResult, 0);
cuio_memset_d(&putResult, 0);
cuio_memcpy_h2d(&stockPrice);
cuio_memcpy_h2d(&optionStrike);
cuio_memcpy_h2d(&optionYears);
checkCudaErrors(hipDeviceSynchronize());
ticks_pre = get_tickcount();
printf("Executing Black-Scholes GPU kernel (%li iterations)...\n", NUM_ITERATIONS);
init_tickcount();
for (i = 0; i < NUM_ITERATIONS; i++) {
hipLaunchKernelGGL(( BlackScholesGPU), dim3(DIV_UP((opt_n/2), 128)), dim3(128/*480), 128*/, 0, 0,
(float2 *)CUIO_FLOATS_D(callResult), (float2 *)CUIO_FLOATS_D(putResult),
(float2 *)CUIO_FLOATS_D(stockPrice), (float2 *)CUIO_FLOATS_D(optionStrike), (float2 *)CUIO_FLOATS_D(optionYears),
RISKFREE, VOLATILITY, opt_n);
getLastCudaError("BlackScholesGPU() execution failed\n");
checkCudaErrors(hipDeviceSynchronize());
}
checkCudaErrors(hipDeviceSynchronize());
ticks_kern = get_tickcount();
printf("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
init_tickcount();
cuio_memcpy_d2h(&callResult);
cuio_memcpy_d2h(&putResult);
resCall1 = CUIO_FLOATS_ITEM(callResult, 0);
resCall2 = CUIO_FLOATS_ITEM(callResult, opt_n - 1);
resPut1 = CUIO_FLOATS_ITEM(callResult, 0);
resPut2 = CUIO_FLOATS_ITEM(callResult, opt_n - 1);
cuio_unload_floats("CallResult.mem", &callResult);
cuio_unload_floats("PutResult.mem", &putResult);
cuio_free_mem(&optionYears);
cuio_free_mem(&optionStrike);
cuio_free_mem(&stockPrice);
ticks_post = get_tickcount();
printf("Result: Call(%f,%f), Put(%f,%f)\n", resCall1, resCall2, resPut1, resPut2);
printf("pre time(us): %u\n", ticks_pre);
printf("kernel time(us): %u\n", ticks_kern);
printf("post time(us): %u\n", ticks_post);
exit(EXIT_SUCCESS);
}
| 6189b5d8eadaf4383e0a944e960dae912274766f.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h> // helper functions CUDA error checking and initialization
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#include "cudaio.h"
#include "timer.h"
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
const long NUM_ITERATIONS = 1;
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
static void
confer_load(FILE *fp, const char *fpath, void *ctx)
{
char buf[1024];
long *popt_n = (long *)ctx;
if (fgets(buf, 1024, fp) == NULL) {
fprintf(stderr, "cannot option count: %s\n", fpath);
exit(2);
}
if (sscanf(buf, "%ld", popt_n) != 1) {
fprintf(stderr, "invalid format: %s\n", fpath);
exit(3);
}
}
int
main(int argc, char *argv[])
{
long opt_n;
char *folder;
cuio_ptr_t stockPrice, optionStrike, optionYears;
cuio_ptr_t callResult, putResult;
unsigned ticks_pre, ticks_kern, ticks_post;
float resCall1, resCall2, resPut1, resPut2;
long i;
if (argc != 2) {
fprintf(stderr, "Usage: %s <folder>\n", argv[0]);
exit(EXIT_FAILURE);
}
folder = argv[1];
cuio_init(CUIO_TYPE_NONE, folder);
cuio_load_conf(confer_load, &opt_n);
printf("Initializing data...\n");
init_tickcount();
callResult = cuio_load_floats("CallResult.mem", opt_n, CUIO_MODE_WRITEONLY);
putResult = cuio_load_floats("PutResult.mem", opt_n, CUIO_MODE_WRITEONLY);
stockPrice = cuio_load_floats("StockPrice.mem", opt_n, CUIO_MODE_READONLY);
optionStrike = cuio_load_floats("OptionStrike.mem", opt_n, CUIO_MODE_READONLY);
optionYears = cuio_load_floats("OptionYears.mem", opt_n, CUIO_MODE_READONLY);
cuio_memset_d(&callResult, 0);
cuio_memset_d(&putResult, 0);
cuio_memcpy_h2d(&stockPrice);
cuio_memcpy_h2d(&optionStrike);
cuio_memcpy_h2d(&optionYears);
checkCudaErrors(cudaDeviceSynchronize());
ticks_pre = get_tickcount();
printf("Executing Black-Scholes GPU kernel (%li iterations)...\n", NUM_ITERATIONS);
init_tickcount();
for (i = 0; i < NUM_ITERATIONS; i++) {
BlackScholesGPU<<<DIV_UP((opt_n/2), 128), 128/*480, 128*/>>>(
(float2 *)CUIO_FLOATS_D(callResult), (float2 *)CUIO_FLOATS_D(putResult),
(float2 *)CUIO_FLOATS_D(stockPrice), (float2 *)CUIO_FLOATS_D(optionStrike), (float2 *)CUIO_FLOATS_D(optionYears),
RISKFREE, VOLATILITY, opt_n);
getLastCudaError("BlackScholesGPU() execution failed\n");
checkCudaErrors(cudaDeviceSynchronize());
}
checkCudaErrors(cudaDeviceSynchronize());
ticks_kern = get_tickcount();
printf("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
init_tickcount();
cuio_memcpy_d2h(&callResult);
cuio_memcpy_d2h(&putResult);
resCall1 = CUIO_FLOATS_ITEM(callResult, 0);
resCall2 = CUIO_FLOATS_ITEM(callResult, opt_n - 1);
resPut1 = CUIO_FLOATS_ITEM(callResult, 0);
resPut2 = CUIO_FLOATS_ITEM(callResult, opt_n - 1);
cuio_unload_floats("CallResult.mem", &callResult);
cuio_unload_floats("PutResult.mem", &putResult);
cuio_free_mem(&optionYears);
cuio_free_mem(&optionStrike);
cuio_free_mem(&stockPrice);
ticks_post = get_tickcount();
printf("Result: Call(%f,%f), Put(%f,%f)\n", resCall1, resCall2, resPut1, resPut2);
printf("pre time(us): %u\n", ticks_pre);
printf("kernel time(us): %u\n", ticks_kern);
printf("post time(us): %u\n", ticks_post);
exit(EXIT_SUCCESS);
}
|
daf9deb300b0c982c31f8ff91b15d02ee0309636.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by dispatch_00_generate.py
*
* Make changes there and run in this directory:
*
* > python dispatch_00_generate.py
*
*/
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/distance/detail/distance_ops/all_ops.cuh> // ops::*
#include <raft/distance/detail/pairwise_matrix/dispatch-inl.cuh> // dispatch
#include <raft/distance/detail/pairwise_matrix/dispatch_sm60.cuh>
#define instantiate_raft_distance_detail_pairwise_matrix_dispatch( \
OpT, DataT, AccT, OutT, FinOpT, IdxT) \
template void raft::distance::detail:: \
pairwise_matrix_dispatch<OpT<DataT, AccT, IdxT>, DataT, AccT, OutT, FinOpT, IdxT>( \
OpT<DataT, AccT, IdxT> distance_op, \
IdxT m, \
IdxT n, \
IdxT k, \
const DataT* x, \
const DataT* y, \
const DataT* x_norm, \
const DataT* y_norm, \
OutT* out, \
FinOpT fin_op, \
hipStream_t stream, \
bool is_row_major)
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l2_unexp_distance_op, float, float, float, raft::identity_op, int);
#undef instantiate_raft_distance_detail_pairwise_matrix_dispatch
| daf9deb300b0c982c31f8ff91b15d02ee0309636.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by dispatch_00_generate.py
*
* Make changes there and run in this directory:
*
* > python dispatch_00_generate.py
*
*/
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/distance/detail/distance_ops/all_ops.cuh> // ops::*
#include <raft/distance/detail/pairwise_matrix/dispatch-inl.cuh> // dispatch
#include <raft/distance/detail/pairwise_matrix/dispatch_sm60.cuh>
#define instantiate_raft_distance_detail_pairwise_matrix_dispatch( \
OpT, DataT, AccT, OutT, FinOpT, IdxT) \
template void raft::distance::detail:: \
pairwise_matrix_dispatch<OpT<DataT, AccT, IdxT>, DataT, AccT, OutT, FinOpT, IdxT>( \
OpT<DataT, AccT, IdxT> distance_op, \
IdxT m, \
IdxT n, \
IdxT k, \
const DataT* x, \
const DataT* y, \
const DataT* x_norm, \
const DataT* y_norm, \
OutT* out, \
FinOpT fin_op, \
cudaStream_t stream, \
bool is_row_major)
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l2_unexp_distance_op, float, float, float, raft::identity_op, int);
#undef instantiate_raft_distance_detail_pairwise_matrix_dispatch
|
4c8b3a49b907fb3ceaacea99a3ff1d5758fd4b8c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Parallel bitonic sort using CUDA.
* Compile with
* nvcc -arch=sm_11 bitonic_sort.cu
* Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm
* License: BSD 3
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <iostream>
using namespace std;
/* Every thread gets exactly one value in the unsorted array. */
//#define THREADS 512 // 2^9
//#define BLOCKS 32768 // 2^15
//#define NUM_VALS THREADS*BLOCKS
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
float random_float()
{
return (float)rand() / (float)RAND_MAX;
}
void array_print(float* arr, int length)
{
int i;
for (i = 0; i < length; ++i) {
printf("%1.3f ", arr[i]);
}
printf("\n");
}
void array_fill(float* arr, int length)
{
srand(time(NULL));
int i;
for (i = 0; i < length; ++i) {
arr[i] = random_float();
}
}
__global__ void bitonic_sort_step(float* dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
/* The threads with the lowest ids sort the array. */
if ((ixj) > i) {
if ((i & k) == 0) {
/* Sort ascending */
if (dev_values[i] > dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0) {
/* Sort descending */
if (dev_values[i] < dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
__global__ void bitonic_sort_step(int* dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
/* The threads with the lowest ids sort the array. */
if ((ixj) > i) {
if ((i & k) == 0) {
/* Sort ascending */
if (dev_values[i] > dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0) {
/* Sort descending */
if (dev_values[i] < dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
/**
* Inplace bitonic sort using CUDA.
* values was created with hipMallocManaged
*/
void bitonic_sort(float* values, long numElems)
{
float* dev_values = values;
size_t size = numElems * sizeof(float);
//hipMalloc((void**)&dev_values, size);
//hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice);
const int THREADS = 256;
const int BLOCKS = numElems / THREADS;
dim3 blocks(BLOCKS, 1); /* Number of blocks */
dim3 threads(THREADS, 1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= numElems; k <<= 1) {
/* Minor step */
for (j = k >> 1; j > 0; j = j >> 1) {
bitonic_sort_step << <blocks, threads >> > (dev_values, j, k);
}
}
hipDeviceSynchronize();
//hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost);
//hipFree(dev_values);
}
/**
* Inplace bitonic sort using CUDA.
* values was created with hipMallocManaged
*/
void bitonic_sort(int* values, long numElems)
{
cout << "calling int" << endl;
int* dev_values = values;
size_t size = numElems * sizeof(int);
//hipMalloc((void**)&dev_values, size);
//hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice);
const int THREADS = 256;
const int BLOCKS = numElems / THREADS;
dim3 blocks(BLOCKS, 1); /* Number of blocks */
dim3 threads(THREADS, 1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= numElems; k <<= 1) {
/* Minor step */
for (j = k >> 1; j > 0; j = j >> 1) {
bitonic_sort_step << <blocks, threads >> > (dev_values, j, k);
}
}
hipDeviceSynchronize();
//hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost);
//hipFree(dev_values);
}
//int main(void)
//{
// clock_t start, stop;
//
// float* values = (float*)malloc(NUM_VALS * sizeof(float));
// array_fill(values, NUM_VALS);
//
// start = clock();
// bitonic_sort(values); /* Inplace */
// stop = clock();
//
// print_elapsed(start, stop);
//}
| 4c8b3a49b907fb3ceaacea99a3ff1d5758fd4b8c.cu | /*
* Parallel bitonic sort using CUDA.
* Compile with
* nvcc -arch=sm_11 bitonic_sort.cu
* Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm
* License: BSD 3
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <curand.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <iostream>
using namespace std;
/* Every thread gets exactly one value in the unsorted array. */
//#define THREADS 512 // 2^9
//#define BLOCKS 32768 // 2^15
//#define NUM_VALS THREADS*BLOCKS
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
float random_float()
{
return (float)rand() / (float)RAND_MAX;
}
void array_print(float* arr, int length)
{
int i;
for (i = 0; i < length; ++i) {
printf("%1.3f ", arr[i]);
}
printf("\n");
}
void array_fill(float* arr, int length)
{
srand(time(NULL));
int i;
for (i = 0; i < length; ++i) {
arr[i] = random_float();
}
}
__global__ void bitonic_sort_step(float* dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
/* The threads with the lowest ids sort the array. */
if ((ixj) > i) {
if ((i & k) == 0) {
/* Sort ascending */
if (dev_values[i] > dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0) {
/* Sort descending */
if (dev_values[i] < dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
__global__ void bitonic_sort_step(int* dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
/* The threads with the lowest ids sort the array. */
if ((ixj) > i) {
if ((i & k) == 0) {
/* Sort ascending */
if (dev_values[i] > dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0) {
/* Sort descending */
if (dev_values[i] < dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
/**
* Inplace bitonic sort using CUDA.
* values was created with cudaMallocManaged
*/
void bitonic_sort(float* values, long numElems)
{
float* dev_values = values;
size_t size = numElems * sizeof(float);
//cudaMalloc((void**)&dev_values, size);
//cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
const int THREADS = 256;
const int BLOCKS = numElems / THREADS;
dim3 blocks(BLOCKS, 1); /* Number of blocks */
dim3 threads(THREADS, 1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= numElems; k <<= 1) {
/* Minor step */
for (j = k >> 1; j > 0; j = j >> 1) {
bitonic_sort_step << <blocks, threads >> > (dev_values, j, k);
}
}
cudaDeviceSynchronize();
//cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
//cudaFree(dev_values);
}
/**
* Inplace bitonic sort using CUDA.
* values was created with cudaMallocManaged
*/
void bitonic_sort(int* values, long numElems)
{
cout << "calling int" << endl;
int* dev_values = values;
size_t size = numElems * sizeof(int);
//cudaMalloc((void**)&dev_values, size);
//cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
const int THREADS = 256;
const int BLOCKS = numElems / THREADS;
dim3 blocks(BLOCKS, 1); /* Number of blocks */
dim3 threads(THREADS, 1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= numElems; k <<= 1) {
/* Minor step */
for (j = k >> 1; j > 0; j = j >> 1) {
bitonic_sort_step << <blocks, threads >> > (dev_values, j, k);
}
}
cudaDeviceSynchronize();
//cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
//cudaFree(dev_values);
}
//int main(void)
//{
// clock_t start, stop;
//
// float* values = (float*)malloc(NUM_VALS * sizeof(float));
// array_fill(values, NUM_VALS);
//
// start = clock();
// bitonic_sort(values); /* Inplace */
// stop = clock();
//
// print_elapsed(start, stop);
//}
|
38a8d87c818494c0323ea9cd82bb4cec110a665e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "naive_histogram.h"
namespace naive_histogram {
using namespace ::dali;
/**
* Computes a histogram for a single-channel image.
*
* @param input One-channel image.
* @param input_size Size (in pixels) of the input image.
* @param n_bins Number of histogram bins.
* @param histogram Output array. Shall be allocated accordingly to `n_bins`.
*/
__global__ void naive_histogram_kernel(
const uint8_t *input, const int input_size, const int n_bins,
int32_t *histogram) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= input_size) return;
auto value = input[tid];
int bin = value % n_bins;
atomicAdd(&histogram[bin], 1);
}
template<>
void NaiveHistogram<GPUBackend>::RunImpl(Workspace &ws) {
const auto &input = ws.Input<GPUBackend>(0); // Input is a batch of samples.
const auto &shape = input.shape();
auto &output = ws.Output<GPUBackend>(0);
for (int sample_idx = 0;
sample_idx < shape.num_samples(); sample_idx++) { // Iterating over all samples in a batch.
dim3 block_size(32);
auto input_size = volume(input.tensor_shape(sample_idx));
dim3 grid_size((input_size + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( naive_histogram_kernel), dim3(grid_size), dim3(block_size), 0, ws.stream(),
input[sample_idx].data<uint8_t>(),
input_size,
n_histogram_bins_,
output[sample_idx].mutable_data<int32_t>()
);
}
}
} // namespace naive_histogram
| 38a8d87c818494c0323ea9cd82bb4cec110a665e.cu | // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "naive_histogram.h"
namespace naive_histogram {
using namespace ::dali;
/**
* Computes a histogram for a single-channel image.
*
* @param input One-channel image.
* @param input_size Size (in pixels) of the input image.
* @param n_bins Number of histogram bins.
* @param histogram Output array. Shall be allocated accordingly to `n_bins`.
*/
__global__ void naive_histogram_kernel(
const uint8_t *input, const int input_size, const int n_bins,
int32_t *histogram) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= input_size) return;
auto value = input[tid];
int bin = value % n_bins;
atomicAdd(&histogram[bin], 1);
}
template<>
void NaiveHistogram<GPUBackend>::RunImpl(Workspace &ws) {
const auto &input = ws.Input<GPUBackend>(0); // Input is a batch of samples.
const auto &shape = input.shape();
auto &output = ws.Output<GPUBackend>(0);
for (int sample_idx = 0;
sample_idx < shape.num_samples(); sample_idx++) { // Iterating over all samples in a batch.
dim3 block_size(32);
auto input_size = volume(input.tensor_shape(sample_idx));
dim3 grid_size((input_size + block_size.x - 1) / block_size.x);
naive_histogram_kernel<<<grid_size, block_size, 0, ws.stream()>>>(
input[sample_idx].data<uint8_t>(),
input_size,
n_histogram_bins_,
output[sample_idx].mutable_data<int32_t>()
);
}
}
} // namespace naive_histogram
|
497587fcb91ad713a510ddf197173cdd8eca6326.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma region
// . ,
// . , , .
//
// : data.config
// dimension (n, size) - ;
// dimension - [a_i, b_i] ( )
// (initset_proections);
// discr (d) - , 2 ,
// ( (grid), . );
// skip_iter (N, skip, amt_skip_iter) - , ;
// max_iter (M, max_amt_iter) - ( );
// max_cycle_len (K, lim_cycle_len) - ;
// T (check) - ;
// eps - ( - 2 eps ,
// , );
// tau - ( 2
// tau , );
// name_math_mapping - ;
// math_mapping_params - , (
// - ).
//
// : all_cycles.txt ;
// different_cycles.txt .
#pragma endregion
#include "program.h" // .h
int NumberMathMapping(char name_math_mapping[MAX_ARRAY_SIZE]) {
string string_math_mapping(name_math_mapping, 0, strlen(name_math_mapping)-1);
if (string_math_mapping == "ROTATION")
return 1;
if (string_math_mapping == "BERNULLI")
return 2;
if (string_math_mapping == "LOGICAL")
return 3;
if (string_math_mapping == "FILTERING")
return 4;
if (string_math_mapping == "RELAX")
return 5;
return 0;
}
int NumberGridKind(char name_grid[MAX_ARRAY_SIZE]) {
string string_name_grid(name_grid, 0, strlen(name_grid)-1);
if (string_name_grid == "UNIFORM")
return 1;
if (string_name_grid == "RANDOM")
return 2;
return 0;
}
bool AreParseDataFromConfig(string configfile, string readsection) { // ,
FILE *script_file;
char buffer[MAX_ARRAY_SIZE]; //
string callcommand; // _popen
callcommand = "python " + pyscriptname + " " + configfile + " " + readsection; // _popen
if (!(script_file = _popen(callcommand.c_str(), "r"))) { // ,
printf("Error in launch Python script!");
return false;
}
while (fgets(buffer, sizeof(buffer), script_file) != NULL) //
printf("%s", buffer);
_pclose(script_file);
return true;
}
//
bool ReadData(string configfile, string readsection, int *dimension, float *a, float *b, int *num_points_source, float *discr, int *skip_iter, int *max_iter, int *max_cycle_len, int *T, float *eps, float *tau, int *num_math_mapping, float *math_mapping_params) {
if (!AreParseDataFromConfig(configfile, readsection))
return false;
FILE *input_file; // ,
input_file = fopen("input.txt", "rt");
//
fscanf(input_file, "%d", dimension);
if (*dimension > MAX_DIMENSION) {
printf("Too much dimension of the multidimensional cube! Please reduce it...\n");
return false;
}
for (int i=0; i<MAX_DIMENSION; i++)
a[i] = b[i] = 0.;
for (int i = 0; i < *dimension; i++)
fscanf(input_file, "%f %f", &a[i], &b[i]);
char ch = NULL;
fscanf(input_file, "%c", &ch);
char name_points_source[MAX_ARRAY_SIZE];
fgets(name_points_source, MAX_ARRAY_SIZE, input_file);
*num_points_source = NumberGridKind(name_points_source);
fscanf(input_file, "%f", discr);
if (*discr > MAX_DISCR) {
printf("Too high a value of the discretization! Please reduce it...\n");
return false;
}
fscanf(input_file, "%d %d %d %d", skip_iter, max_iter , max_cycle_len, T);
if (*max_cycle_len > LIM_MAX_CYCLE_LENGTH) {
printf("Too high a value of cycle length! Please reduce it...\n");
return false;
}
fscanf(input_file,"%f %f", eps, tau);
memset(math_mapping_params, 0, MAX_ARRAY_SIZE);
fscanf(input_file, "%c", &ch);
char name_math_mapping[MAX_ARRAY_SIZE];
fgets(name_math_mapping, MAX_ARRAY_SIZE, input_file);
*num_math_mapping = NumberMathMapping(name_math_mapping);
int idx = 0;
fscanf(input_file, "%d", &idx);
for (int i=0; i<idx; i++)
fscanf(input_file, "%f", &math_mapping_params[i]);
printf("Program data is READ!\n"); //
fclose(input_file);
remove("input.txt"); //
return true;
}
void OutputAllCycles(float a[MAX_ARRAY_SIZE], float discr, dim3 grid, int start_points, CycleData *cycle_list_host) { //
FILE *output_file; //
output_file = fopen("all_cycles.txt", "wt");
fprintf(output_file, "Start point \t\t\t\t Cyclelength \t\t\t Cycle\n");
for (int i = 0; i < start_points; ++i) { // ,
fprintf (output_file, "\n(%f,%f) \t\t cyclelength: %d \t\t", a[0]+(i%grid.x)*discr, a[1]+i/grid.y*discr, cycle_list_host[i].length);
for (int j = 0; j < cycle_list_host[i].length; ++j) //
fprintf (output_file, " (%f,%f) ", cycle_list_host[i].points[j].x, cycle_list_host[i].points[j].y) ;
}
fprintf (output_file, "\n\nAll start points: %d ", start_points) ;
fclose(output_file);
printf("Results of work program WRITE to output files!\n"); //
}
void CudaOutputErrors() { // , CUDA
printf("CUDA errors: ");
printf(hipGetErrorString(hipGetLastError()));
printf("\n\n");
}
void ResultsOfWorkProgram(bool state) { //
if (!state)
printf("Program is interrupted!\n");
else {
printf("Work with GPU are OVER!\nProgram has successfully completed its work!\n");
CudaOutputErrors();
}
}
bool IsCudaDataCreated(void *cuda_pointer) { // , GPU
if (cuda_pointer == NULL) { // GPU
printf("\nData for device is not created!\n");
CudaOutputErrors();
return 0;
}
return 1;
}
//
void FreeUsedDynamicObjects(CycleData *cycle_list_host, CycleData *cycle_list_device, float *a_device, float *math_mapping_params, float2 *point_seq_device) {
free(cycle_list_host);
hipFree(cycle_list_device);
hipFree(a_device);
hipFree(math_mapping_params);
hipFree(point_seq_device);
}
float RandomBetweenTwoValues(float p, float q) {
float rand_num = ((float)rand()) / (float)RAND_MAX;
float diff = q - p;
float r = rand_num * diff;
return p + r;
}
bool MakeGrid(int num_points_source, dim3 *grid, float a[MAX_ARRAY_SIZE], float b[MAX_ARRAY_SIZE], float discr) { // GPU
if (num_points_source == 0) { //
printf("\nGrid was not formed!\n");
CudaOutputErrors();
return false;
}
if (num_points_source == 1) {
grid->x = ((b[0] - a[0]) / discr) + 1;
grid->y = ((b[1] - a[1]) / discr) + 1;
}
if (num_points_source == 2) {
grid->x = RandomBetweenTwoValues(a[0], b[0]);
grid->y = RandomBetweenTwoValues(a[1], b[1]);
}
return true;
}
__device__ void SkipIterations(float2 *point, int num_iter, int skip_iter, int num_math_mapping, float math_mapping_params[MAX_ARRAY_SIZE]) { // N
for (int i = 0; i < skip_iter; ++i)
*point = MathMapping(*point, num_iter+i, num_math_mapping, math_mapping_params);
}
__device__ bool IsEqualPoints(float2 first_point, float2 second_point, float eps) { // eps
if ((fabs(first_point.x - second_point.x) < eps) && (fabs(first_point.y - second_point.y) < eps))
return 1;
return 0;
}
float hostEuclideanDifference(float2 first_point, float2 second_point) { // 2 , ()
return sqrt(powf(fabs(first_point.x - second_point.x), 2) + powf((fabs(first_point.y - second_point.y)), 2));
}
int GetAmountAllCycles(int start_points, CycleData *cycle_list_host) {
int cycles = 0; //
for (int i = 0; i < start_points; i++) {
if (cycle_list_host[i].length > 0)
cycles++;
}
return cycles;
}
//
void FormDifferentCyclesList(float *a, float discr, dim3 grid, int start_points, vector <DifferingCycleInfo> *differentCycles, CycleData *cycle_list_host, float tau) {
bool is_add_cycle = false;
for (int i = 0; i < start_points; ++i) {
is_add_cycle = false;
for (int j = 0; j < differentCycles->size(); ++j) {
if (cycle_list_host[i].length == 0) { // 0
is_add_cycle = true;
break;
}
//
int h = 0;
for ( ; h < (*differentCycles)[j].elements.size(); ++h)
if (hostEuclideanDifference((*differentCycles)[j].elements[h], cycle_list_host[i].points[h]) >= tau)
break;
if (h == cycle_list_host[i].length) {
(*differentCycles)[j].startpoints.push_back(make_float2(a[0]+(i%grid.x)*discr, a[1]+i/grid.y*discr));
is_add_cycle = true;
break;
}
}
if (!is_add_cycle && cycle_list_host[i].length) { //
DifferingCycleInfo ci;
for (int j = 0; j < cycle_list_host[i].length; ++j)
ci.elements.push_back(cycle_list_host[i].points[j]);
ci.startpoints.push_back(make_float2(a[0]+(i%grid.x)*discr, a[1]+i/grid.y*discr));
differentCycles->push_back(ci);
}
}
}
void OutputDifferentCycles(float *a, float d, dim3 grid, int start_points, CycleData *cycle_list_host, float tau) { //
vector <DifferingCycleInfo> differentCycles; // ,
int cycles = GetAmountAllCycles(start_points, cycle_list_host);
FormDifferentCyclesList(a, d, grid, start_points, &differentCycles, cycle_list_host, tau);
//
FILE *output_file; //
output_file = fopen("different_cycles.txt", "wt");
fprintf(output_file, "All start points: \t%d\n", start_points);
fprintf(output_file, "All cycles: \t\t%d\n", cycles);
fprintf(output_file, "\nDifferent cycles \t%d\t:\n", differentCycles.size());
for (int i = 0; i < differentCycles.size(); ++i) {
fprintf(output_file, "\n------------------------------------------------");
fprintf(output_file, "\nStart points \t%d\t:\t", differentCycles[i].startpoints.size());
for (int j = 0; j < differentCycles[i].startpoints.size(); ++j)
fprintf(output_file, "(%f, %f)\t", differentCycles[i].startpoints[j].x, differentCycles[i].startpoints[j].y);
fprintf(output_file, "\nCycle elements \t%d\t:\t", differentCycles[i].elements.size());
for (int j = 0; j < differentCycles[i].elements.size(); ++j)
fprintf(output_file, "(%f, %f)\t", (differentCycles[i].elements[j]).x, differentCycles[i].elements[j].y);
}
fclose(output_file);
}
bool IsPointSequenceOver(float2 point) { // -
float2 stop_point = make_float2(FLT_MAX, FLT_MAX); // , -
if (point.x != stop_point.x || point.y != stop_point.y )
return false;
else
return true;
}
void OutputAllPointSequences(float2 point_seq[MAX_ARRAY_SIZE], int start_points, int max_cycle_len) { // -
FILE *output_file;
output_file = fopen("all_pointsequences.txt", "w");
fprintf(output_file, "Start point \t\t\t\t Points of the sequence\n");
for (int i = 0; i < start_points; ++i) {
fprintf(output_file, "\n(%f, %f) \t\t", point_seq[i*max_cycle_len].x, point_seq[i*max_cycle_len].y);
for (int j = 1; j < max_cycle_len; ++j) {
if (!IsPointSequenceOver(point_seq[i*max_cycle_len+j]))
fprintf(output_file, "(%f, %f) ", point_seq[i*max_cycle_len+j].x, point_seq[i*max_cycle_len+j].y);
else
break;
}
}
fclose(output_file);
}
// -0.000000 ( - )
void DeleteNegativeZeros(int start_points, CycleData *cycle_list_host) {
for (int i = 0; i < start_points; ++i)
for (int j = 0; j < cycle_list_host[i].length; ++j) {
if (cycle_list_host[i].points[j].x == 0)
cycle_list_host[i].points[j].x = 0;
if (cycle_list_host[i].points[j].y == 0)
cycle_list_host[i].points[j].y = 0;
}
}
__device__ float deviceEuclideanDifference(float2 first_point, float2 second_point) { // hostEuclideanDifference, GPU
return __fsqrt_rn(__powf(fabs(first_point.x-second_point.x), 2) + __powf(fabs(first_point.y-second_point.y), 2));
}
// ""
__device__ bool IsCyclePersisting(float2 first_point, int num_iter, int cycle_len, int T, float tau, int num_math_mapping, float math_mapping_params[MAX_ARRAY_SIZE]) {
float2 second_point = first_point;
SkipIterations(&second_point, num_iter, cycle_len, num_math_mapping, math_mapping_params);
num_iter+=cycle_len;
for (int i = 0; i < T*cycle_len; ++i) {
float diff = deviceEuclideanDifference(first_point, second_point);
if (diff > tau)
return 0;
first_point = MathMapping(first_point, num_iter-cycle_len, num_math_mapping, math_mapping_params);
second_point = MathMapping(second_point, num_iter, num_math_mapping, math_mapping_params);
num_iter++;
}
return 1;
}
__device__ void AddCycleAtList(int idx, int cycle_len, float2 first_point, int num_iter, CycleData *cycle_list, int num_math_mapping, float math_mapping_params[MAX_ARRAY_SIZE]) {
// idx k-, first_point
for (int i = 0; i < cycle_len; ++i) {
cycle_list[idx].points[i] = first_point;
first_point = MathMapping(first_point, num_iter, num_math_mapping, math_mapping_params);
num_iter++;
}
cycle_list[idx].length = cycle_len;
}
__device__ void SearchMinPointInCycle(int idx, float2 *min, int *minPointIdx, CycleData *cycle_list) { //
for (int i = 0; i < cycle_list[idx].length ; ++i){
if (cycle_list[idx].points[i].x < min->x) {
*minPointIdx = i;
*min = cycle_list[idx].points[i];
}
if (cycle_list[idx].points[i].x == min->x && cycle_list[idx].points[i].y < min->y) {
*minPointIdx = i;
*min = cycle_list[idx].points[i];
}
}
}
// ()
__device__ void TurnCycleRelativeHisMinElement(CycleData *cycle_list, int idx, int minPointIdx) {
float2 v[128];
for (int i = 0; i < minPointIdx; ++i)
v[i] = cycle_list[idx].points[i];
for (int i = 0; i < cycle_list[idx].length-minPointIdx; ++i )
cycle_list[idx].points[i] = cycle_list[idx].points[minPointIdx+i];
for (int i = cycle_list[idx].length-minPointIdx; i < cycle_list[idx].length; ++i)
cycle_list[idx].points[i] = v[i-cycle_list[idx].length+minPointIdx];
}
__global__ void KernelShiftCycle(CycleData * cycle_list) { // , ()
int idx = blockIdx.x + blockIdx.y*gridDim.x;
float2 min;
if (cycle_list[idx].length != 0)
min = cycle_list[idx].points[0];
else
return;
int minPointIdx = 0; // ()
SearchMinPointInCycle(idx, &min, &minPointIdx, cycle_list);
TurnCycleRelativeHisMinElement(cycle_list, idx, minPointIdx);
}
// ,
__global__ void KernelSearchCycles(float2 *point_seq, float a[MAX_ARRAY_SIZE], float discr, int skip_iter, int max_cycle_len, int T, CycleData *cycle_list, float eps, float tau, int num_math_mapping, float math_mapping_params[MAX_ARRAY_SIZE]) {
float x = a[0] + blockIdx.x*discr; // x- ( )
float y = a[1] + blockIdx.y*discr; // y-
int idx = blockIdx.x + blockIdx.y*gridDim.x;
float2 first_point, second_point;
first_point = make_float2(x, y);
point_seq[idx*max_cycle_len+0]=first_point;
SkipIterations(&first_point, 0, skip_iter, num_math_mapping, math_mapping_params);
int num_iter=skip_iter+1;
second_point = MathMapping(first_point, skip_iter, num_math_mapping, math_mapping_params); // -
point_seq[idx*max_cycle_len+1]=second_point;
if( num_math_mapping==5 )
max_cycle_len=math_mapping_params[4]+1;
for (int i = 0; i < max_cycle_len-2; ++i) {
if (IsEqualPoints(first_point, second_point, eps)) // - first second
if (IsCyclePersisting(first_point, skip_iter, i+1, T, tau, num_math_mapping, math_mapping_params)) { // - .
if( num_math_mapping==5 )
AddCycleAtList(idx, 1, first_point, skip_iter, cycle_list, num_math_mapping, math_mapping_params);
else
AddCycleAtList(idx, i+1, first_point, skip_iter, cycle_list, num_math_mapping, math_mapping_params);
// CUDA , vector, -,
// ,
// -
point_seq[idx*max_cycle_len+2+i]=make_float2(FLT_MAX,FLT_MAX);
return;
} // return, .
if( num_math_mapping==5 )
first_point=second_point;
second_point = MathMapping(second_point, num_iter+i, num_math_mapping, math_mapping_params);
point_seq[idx*max_cycle_len+2+i]=second_point;
}
cycle_list[idx].length = 0; //
}
int main() {
printf("Stages of program:\n\n"); //
int num_math_mapping = 0; // ,
int num_points_source = 0; // ,
int dimension = 0, skip_iter = 0, max_iter = 0, max_cycle_len = 0, T = 0;
float eps = 0.0, tau = 0.0, discr = 0.0;
float a[MAX_ARRAY_SIZE], b[MAX_ARRAY_SIZE]; // -
float math_mapping_params[MAX_ARRAY_SIZE]; //
float2 point_seq[MAX_ARRAY_SIZE]; // -
string configfile = "data.config"; // ,
string readsection = "data_program"; // ,
if (!ReadData(configfile, readsection, &dimension, a, b, &num_points_source, &discr, &skip_iter, &max_iter, &max_cycle_len, &T, &eps, &tau, &num_math_mapping, math_mapping_params)) { //
ResultsOfWorkProgram(false);
return 0;
}
dim3 grid; // ,
if (!MakeGrid(num_points_source, &grid, a, b, discr)) {
ResultsOfWorkProgram(false);
return 0;
}
int start_points = grid.x*grid.y; // ,
CycleData *cycle_list_device = NULL, *cycle_list_host = NULL;
float2 *point_seq_device = NULL;
cycle_list_host = (CycleData*) malloc (start_points*sizeof(CycleData));
hipMalloc(&cycle_list_device, start_points*sizeof(CycleData));
if (!IsCudaDataCreated((void*) cycle_list_device))
return 0;
float *a_device = NULL, *math_mapping_params_device = NULL;
hipMalloc(&a_device, dimension*sizeof(float));
hipMalloc(&math_mapping_params_device, MAX_ARRAY_SIZE*sizeof(float));
hipMalloc( &point_seq_device, MAX_ARRAY_SIZE*sizeof(float2));
if (!IsCudaDataCreated((void*) a_device))
return 0;
if (!IsCudaDataCreated((void*) math_mapping_params_device))
return 0;
hipEvent_t start_time, finish_time;
hipEventCreate(&start_time);
hipEventCreate(&finish_time);
hipEventRecord(start_time, 0);
hipMemcpy(a_device, a, dimension*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(math_mapping_params_device, math_mapping_params, MAX_ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( KernelSearchCycles), dim3(grid), dim3(1), 0, 0, point_seq_device, a_device, discr, skip_iter, max_cycle_len, T, cycle_list_device, eps, tau, num_math_mapping, math_mapping_params_device);
hipDeviceSynchronize(); // ( , )
hipLaunchKernelGGL(( KernelShiftCycle), dim3(grid), dim3(1), 0, 0, cycle_list_device);
hipDeviceSynchronize();
hipMemcpy(cycle_list_host, cycle_list_device, start_points*sizeof(CycleData), hipMemcpyDeviceToHost);
hipMemcpy(point_seq, point_seq_device, MAX_ARRAY_SIZE*sizeof(float2), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(finish_time, 0);
hipEventSynchronize(finish_time);
float work_time = 0.0;
hipEventElapsedTime(&work_time, start_time, finish_time);
printf("Work time on GPU: %4.0f ms\n", work_time); // GPU
hipEventDestroy(start_time);
hipEventDestroy(finish_time);
OutputAllPointSequences(point_seq, start_points, max_cycle_len);
DeleteNegativeZeros(start_points, cycle_list_host);
OutputAllCycles(a, discr, grid, start_points, cycle_list_host);
OutputDifferentCycles(a, discr, grid, start_points, cycle_list_host, tau);
ResultsOfWorkProgram(true);
FreeUsedDynamicObjects(cycle_list_host, cycle_list_device, a_device, math_mapping_params_device, point_seq_device);
return 0;
} | 497587fcb91ad713a510ddf197173cdd8eca6326.cu | #pragma region Формулировка задачи
// Дан многомерный кубик. С помощью определенных входных параметров опредляются точки, из которых будут испускаться последовательности решений математического
// отображения. Требуется найти всевозможные цикловые решения без повторений, не превышающие определенной длины, в многомерном кубике.
//
// Вход: в файле data.config содержатся данные для работы программы
// dimension (n, size) - размерность многомерного кубика;
// Далее идут dimension отрезков-проекций вида [a_i, b_i] многомерного куба на оси координат (полностью определяют расположение куба в пространстве)
// (initset_proections);
// discr (d) - параметр дискретизации, расстояние между 2 соседними точками, из которых будут испускаться последовательности решений
// (параметр дискретизации определяет сетку (grid), которая накладывается на многомерный куб. Из каждой точки сетки испускаются решения);
// skip_iter (N, skip, amt_skip_iter) - количество пропускаемых значений для того, чтобы приблизиться к месту нахождения возможных циклов;
// max_iter (M, max_amt_iter) - ограничение сверху на количество итераций (максимальное количество подсчитанных значений в последовательности решений);
// max_cycle_len (K, lim_cycle_len) - максимально возможное значение длины цикла;
// T (check) - количество проверок цикла на устойчивость;
// eps - точность цикла (если какие-нибудь 2 значения в последовательности решений отличаются друг от друг не более чем на eps по модулю,
// то существует подозрение, что в этой последовательности решений есть цикл);
// tau - точность для проверки циклов на равенства (если 2 цикла одинаковой длины отличаются друг от друга по определенной выбранной норме не более чем на
// tau по модулю, то эти циклы считаются равными);
// name_math_mapping - использумое математическое отображение;
// math_mapping_params - входные параметры математического изображения, НЕОБЯЗАТЕЛЬНЫЙ ПАРАМЕТРЫ (по умолчанию все входные параметры для математического
// отображения - нулевые).
//
// Выход: в файле all_cycles.txt содержатся все возможные циклы в данном многомерном кубе;
// в файле different_cycles.txt содержатся все различные циклы.
#pragma endregion
#include "program.h" // вспомогательный заголовочный .h файл
int NumberMathMapping(char name_math_mapping[MAX_ARRAY_SIZE]) {
string string_math_mapping(name_math_mapping, 0, strlen(name_math_mapping)-1);
if (string_math_mapping == "ROTATION")
return 1;
if (string_math_mapping == "BERNULLI")
return 2;
if (string_math_mapping == "LOGICAL")
return 3;
if (string_math_mapping == "FILTERING")
return 4;
if (string_math_mapping == "RELAX")
return 5;
return 0;
}
int NumberGridKind(char name_grid[MAX_ARRAY_SIZE]) {
string string_name_grid(name_grid, 0, strlen(name_grid)-1);
if (string_name_grid == "UNIFORM")
return 1;
if (string_name_grid == "RANDOM")
return 2;
return 0;
}
bool AreParseDataFromConfig(string configfile, string readsection) { // распарсивание данных из файла конфигураций с помощью скрипта, написанного на Питоне
FILE *script_file;
char buffer[MAX_ARRAY_SIZE]; // выходящая информация из скрипта
string callcommand; // строка вызова для функции _popen
callcommand = "python " + pyscriptname + " " + configfile + " " + readsection; // формирование вызываемой в функции _popen строки
if (!(script_file = _popen(callcommand.c_str(), "r"))) { // в том случае, если скрипт не выполнился
printf("Error in launch Python script!");
return false;
}
while (fgets(buffer, sizeof(buffer), script_file) != NULL) // выгрузка выходящей из скрипта информации о его работе
printf("%s", buffer);
_pclose(script_file);
return true;
}
// функция считывания входных параметров для решения задачи
bool ReadData(string configfile, string readsection, int *dimension, float *a, float *b, int *num_points_source, float *discr, int *skip_iter, int *max_iter, int *max_cycle_len, int *T, float *eps, float *tau, int *num_math_mapping, float *math_mapping_params) {
if (!AreParseDataFromConfig(configfile, readsection))
return false;
FILE *input_file; // вспомогательный файл, с которого будет считываться информация
input_file = fopen("input.txt", "rt");
// считывание данных и проверка правильности считанных значений относительно ограничений
fscanf(input_file, "%d", dimension);
if (*dimension > MAX_DIMENSION) {
printf("Too much dimension of the multidimensional cube! Please reduce it...\n");
return false;
}
for (int i=0; i<MAX_DIMENSION; i++)
a[i] = b[i] = 0.;
for (int i = 0; i < *dimension; i++)
fscanf(input_file, "%f %f", &a[i], &b[i]);
char ch = NULL;
fscanf(input_file, "%c", &ch);
char name_points_source[MAX_ARRAY_SIZE];
fgets(name_points_source, MAX_ARRAY_SIZE, input_file);
*num_points_source = NumberGridKind(name_points_source);
fscanf(input_file, "%f", discr);
if (*discr > MAX_DISCR) {
printf("Too high a value of the discretization! Please reduce it...\n");
return false;
}
fscanf(input_file, "%d %d %d %d", skip_iter, max_iter , max_cycle_len, T);
if (*max_cycle_len > LIM_MAX_CYCLE_LENGTH) {
printf("Too high a value of cycle length! Please reduce it...\n");
return false;
}
fscanf(input_file,"%f %f", eps, tau);
memset(math_mapping_params, 0, MAX_ARRAY_SIZE);
fscanf(input_file, "%c", &ch);
char name_math_mapping[MAX_ARRAY_SIZE];
fgets(name_math_mapping, MAX_ARRAY_SIZE, input_file);
*num_math_mapping = NumberMathMapping(name_math_mapping);
int idx = 0;
fscanf(input_file, "%d", &idx);
for (int i=0; i<idx; i++)
fscanf(input_file, "%f", &math_mapping_params[i]);
printf("Program data is READ!\n"); // вывод сообщения об удачном считывании входных данных для работы программы
fclose(input_file);
remove("input.txt"); // удаление вспомогательного файла
return true;
}
void OutputAllCycles(float a[MAX_ARRAY_SIZE], float discr, dim3 grid, int start_points, CycleData *cycle_list_host) { // вывод всех циклов в файл
FILE *output_file; // файл для вывода информации
output_file = fopen("all_cycles.txt", "wt");
fprintf(output_file, "Start point \t\t\t\t Cyclelength \t\t\t Cycle\n");
for (int i = 0; i < start_points; ++i) { // вывод координат точек, из которых испускались решения
fprintf (output_file, "\n(%f,%f) \t\t cyclelength: %d \t\t", a[0]+(i%grid.x)*discr, a[1]+i/grid.y*discr, cycle_list_host[i].length);
for (int j = 0; j < cycle_list_host[i].length; ++j) // вывод координат цикла
fprintf (output_file, " (%f,%f) ", cycle_list_host[i].points[j].x, cycle_list_host[i].points[j].y) ;
}
fprintf (output_file, "\n\nAll start points: %d ", start_points) ;
fclose(output_file);
printf("Results of work program WRITE to output files!\n"); // вывод сообщения об удачном выводе данных в файл
}
void CudaOutputErrors() { // функция, выводящая информацию об ошибках при работе с инструментом CUDA
printf("CUDA errors: ");
printf(cudaGetErrorString(cudaGetLastError()));
printf("\n\n");
}
void ResultsOfWorkProgram(bool state) { // вывод информации о завершающих этапах работы программы
if (!state)
printf("Program is interrupted!\n");
else {
printf("Work with GPU are OVER!\nProgram has successfully completed its work!\n");
CudaOutputErrors();
}
}
bool IsCudaDataCreated(void *cuda_pointer) { // функция, проверяющая была ли выделена память на GPU
if (cuda_pointer == NULL) { // если память под указатель не была выделена на GPU
printf("\nData for device is not created!\n");
CudaOutputErrors();
return 0;
}
return 1;
}
// функция очистки памяти от динамических объектов
void FreeUsedDynamicObjects(CycleData *cycle_list_host, CycleData *cycle_list_device, float *a_device, float *math_mapping_params, float2 *point_seq_device) {
free(cycle_list_host);
cudaFree(cycle_list_device);
cudaFree(a_device);
cudaFree(math_mapping_params);
cudaFree(point_seq_device);
}
float RandomBetweenTwoValues(float p, float q) {
float rand_num = ((float)rand()) / (float)RAND_MAX;
float diff = q - p;
float r = rand_num * diff;
return p + r;
}
bool MakeGrid(int num_points_source, dim3 *grid, float a[MAX_ARRAY_SIZE], float b[MAX_ARRAY_SIZE], float discr) { // формирование сетки для GPU
if (num_points_source == 0) { // не был определён способ формирования сетки с начальными точками
printf("\nGrid was not formed!\n");
CudaOutputErrors();
return false;
}
if (num_points_source == 1) {
grid->x = ((b[0] - a[0]) / discr) + 1;
grid->y = ((b[1] - a[1]) / discr) + 1;
}
if (num_points_source == 2) {
grid->x = RandomBetweenTwoValues(a[0], b[0]);
grid->y = RandomBetweenTwoValues(a[1], b[1]);
}
return true;
}
__device__ void SkipIterations(float2 *point, int num_iter, int skip_iter, int num_math_mapping, float math_mapping_params[MAX_ARRAY_SIZE]) { // пропуск первых N итераций
for (int i = 0; i < skip_iter; ++i)
*point = MathMapping(*point, num_iter+i, num_math_mapping, math_mapping_params);
}
__device__ bool IsEqualPoints(float2 first_point, float2 second_point, float eps) { // эквивалентные точки с точностью до eps
if ((fabs(first_point.x - second_point.x) < eps) && (fabs(first_point.y - second_point.y) < eps))
return 1;
return 0;
}
float hostEuclideanDifference(float2 first_point, float2 second_point) { // разница между 2 циклами, посчитанная с помощью второй (евклидовой) нормы
return sqrt(powf(fabs(first_point.x - second_point.x), 2) + powf((fabs(first_point.y - second_point.y)), 2));
}
int GetAmountAllCycles(int start_points, CycleData *cycle_list_host) {
int cycles = 0; // количество циклов
for (int i = 0; i < start_points; i++) {
if (cycle_list_host[i].length > 0)
cycles++;
}
return cycles;
}
// формирование списка различных циклов
void FormDifferentCyclesList(float *a, float discr, dim3 grid, int start_points, vector <DifferingCycleInfo> *differentCycles, CycleData *cycle_list_host, float tau) {
bool is_add_cycle = false;
for (int i = 0; i < start_points; ++i) {
is_add_cycle = false;
for (int j = 0; j < differentCycles->size(); ++j) {
if (cycle_list_host[i].length == 0) { // циклы длины 0 не добавляются в вектор различных циклов
is_add_cycle = true;
break;
}
// добавление циклов в зависимости от значения евклидовой нормы разности точек
int h = 0;
for ( ; h < (*differentCycles)[j].elements.size(); ++h)
if (hostEuclideanDifference((*differentCycles)[j].elements[h], cycle_list_host[i].points[h]) >= tau)
break;
if (h == cycle_list_host[i].length) {
(*differentCycles)[j].startpoints.push_back(make_float2(a[0]+(i%grid.x)*discr, a[1]+i/grid.y*discr));
is_add_cycle = true;
break;
}
}
if (!is_add_cycle && cycle_list_host[i].length) { // добавление цикла в вектор
DifferingCycleInfo ci;
for (int j = 0; j < cycle_list_host[i].length; ++j)
ci.elements.push_back(cycle_list_host[i].points[j]);
ci.startpoints.push_back(make_float2(a[0]+(i%grid.x)*discr, a[1]+i/grid.y*discr));
differentCycles->push_back(ci);
}
}
}
void OutputDifferentCycles(float *a, float d, dim3 grid, int start_points, CycleData *cycle_list_host, float tau) { // вывод различных циклов в файл
vector <DifferingCycleInfo> differentCycles; // вектор, содержащий различные циклы
int cycles = GetAmountAllCycles(start_points, cycle_list_host);
FormDifferentCyclesList(a, d, grid, start_points, &differentCycles, cycle_list_host, tau);
// вывод различных циклов
FILE *output_file; // файл для вывода информации
output_file = fopen("different_cycles.txt", "wt");
fprintf(output_file, "All start points: \t%d\n", start_points);
fprintf(output_file, "All cycles: \t\t%d\n", cycles);
fprintf(output_file, "\nDifferent cycles \t%d\t:\n", differentCycles.size());
for (int i = 0; i < differentCycles.size(); ++i) {
fprintf(output_file, "\n------------------------------------------------");
fprintf(output_file, "\nStart points \t%d\t:\t", differentCycles[i].startpoints.size());
for (int j = 0; j < differentCycles[i].startpoints.size(); ++j)
fprintf(output_file, "(%f, %f)\t", differentCycles[i].startpoints[j].x, differentCycles[i].startpoints[j].y);
fprintf(output_file, "\nCycle elements \t%d\t:\t", differentCycles[i].elements.size());
for (int j = 0; j < differentCycles[i].elements.size(); ++j)
fprintf(output_file, "(%f, %f)\t", (differentCycles[i].elements[j]).x, differentCycles[i].elements[j].y);
}
fclose(output_file);
}
bool IsPointSequenceOver(float2 point) { // проверка на окончание последовательности-решения
float2 stop_point = make_float2(FLT_MAX, FLT_MAX); // точка с очень большими координатами, символизирующими конец последовательности-решения
if (point.x != stop_point.x || point.y != stop_point.y )
return false;
else
return true;
}
void OutputAllPointSequences(float2 point_seq[MAX_ARRAY_SIZE], int start_points, int max_cycle_len) { // вывод всех последовательностей-решений в файл
FILE *output_file;
output_file = fopen("all_pointsequences.txt", "w");
fprintf(output_file, "Start point \t\t\t\t Points of the sequence\n");
for (int i = 0; i < start_points; ++i) {
fprintf(output_file, "\n(%f, %f) \t\t", point_seq[i*max_cycle_len].x, point_seq[i*max_cycle_len].y);
for (int j = 1; j < max_cycle_len; ++j) {
if (!IsPointSequenceOver(point_seq[i*max_cycle_len+j]))
fprintf(output_file, "(%f, %f) ", point_seq[i*max_cycle_len+j].x, point_seq[i*max_cycle_len+j].y);
else
break;
}
}
fclose(output_file);
}
// избавление от значений вида -0.000000 при выводе в файл (подобного рода значения возникают из-за машинного нуля)
void DeleteNegativeZeros(int start_points, CycleData *cycle_list_host) {
for (int i = 0; i < start_points; ++i)
for (int j = 0; j < cycle_list_host[i].length; ++j) {
if (cycle_list_host[i].points[j].x == 0)
cycle_list_host[i].points[j].x = 0;
if (cycle_list_host[i].points[j].y == 0)
cycle_list_host[i].points[j].y = 0;
}
}
__device__ float deviceEuclideanDifference(float2 first_point, float2 second_point) { // аналог функции hostEuclideanDifference, выполняется на GPU
return __fsqrt_rn(__powf(fabs(first_point.x-second_point.x), 2) + __powf(fabs(first_point.y-second_point.y), 2));
}
// Проверка цикла на "устойчивость"
__device__ bool IsCyclePersisting(float2 first_point, int num_iter, int cycle_len, int T, float tau, int num_math_mapping, float math_mapping_params[MAX_ARRAY_SIZE]) {
float2 second_point = first_point;
SkipIterations(&second_point, num_iter, cycle_len, num_math_mapping, math_mapping_params);
num_iter+=cycle_len;
for (int i = 0; i < T*cycle_len; ++i) {
float diff = deviceEuclideanDifference(first_point, second_point);
if (diff > tau)
return 0;
first_point = MathMapping(first_point, num_iter-cycle_len, num_math_mapping, math_mapping_params);
second_point = MathMapping(second_point, num_iter, num_math_mapping, math_mapping_params);
num_iter++;
}
return 1;
}
__device__ void AddCycleAtList(int idx, int cycle_len, float2 first_point, int num_iter, CycleData *cycle_list, int num_math_mapping, float math_mapping_params[MAX_ARRAY_SIZE]) {
// добавить в список по индексу idx k-элементов, начиная с first_point
for (int i = 0; i < cycle_len; ++i) {
cycle_list[idx].points[i] = first_point;
first_point = MathMapping(first_point, num_iter, num_math_mapping, math_mapping_params);
num_iter++;
}
cycle_list[idx].length = cycle_len;
}
__device__ void SearchMinPointInCycle(int idx, float2 *min, int *minPointIdx, CycleData *cycle_list) { // поиск минимальной точки в цикле и ее индекса
for (int i = 0; i < cycle_list[idx].length ; ++i){
if (cycle_list[idx].points[i].x < min->x) {
*minPointIdx = i;
*min = cycle_list[idx].points[i];
}
if (cycle_list[idx].points[i].x == min->x && cycle_list[idx].points[i].y < min->y) {
*minPointIdx = i;
*min = cycle_list[idx].points[i];
}
}
}
// циклический поворот (сдвиг) цикла относительно его минимального элемента
__device__ void TurnCycleRelativeHisMinElement(CycleData *cycle_list, int idx, int minPointIdx) {
float2 v[128];
for (int i = 0; i < minPointIdx; ++i)
v[i] = cycle_list[idx].points[i];
for (int i = 0; i < cycle_list[idx].length-minPointIdx; ++i )
cycle_list[idx].points[i] = cycle_list[idx].points[minPointIdx+i];
for (int i = cycle_list[idx].length-minPointIdx; i < cycle_list[idx].length; ++i)
cycle_list[idx].points[i] = v[i-cycle_list[idx].length+minPointIdx];
}
__global__ void KernelShiftCycle(CycleData * cycle_list) { // ядро, осуществляющее смещение (поворот) цикла
int idx = blockIdx.x + blockIdx.y*gridDim.x;
float2 min;
if (cycle_list[idx].length != 0)
min = cycle_list[idx].points[0];
else
return;
int minPointIdx = 0; // индекс (номер) минимального элемента в цикле
SearchMinPointInCycle(idx, &min, &minPointIdx, cycle_list);
TurnCycleRelativeHisMinElement(cycle_list, idx, minPointIdx);
}
// ядро, осуществляющее поиск циклов
__global__ void KernelSearchCycles(float2 *point_seq, float a[MAX_ARRAY_SIZE], float discr, int skip_iter, int max_cycle_len, int T, CycleData *cycle_list, float eps, float tau, int num_math_mapping, float math_mapping_params[MAX_ARRAY_SIZE]) {
float x = a[0] + blockIdx.x*discr; // x-координата текущей стартовой точки (из начального множества)
float y = a[1] + blockIdx.y*discr; // y-координата
int idx = blockIdx.x + blockIdx.y*gridDim.x;
float2 first_point, second_point;
first_point = make_float2(x, y);
point_seq[idx*max_cycle_len+0]=first_point;
SkipIterations(&first_point, 0, skip_iter, num_math_mapping, math_mapping_params);
int num_iter=skip_iter+1;
second_point = MathMapping(first_point, skip_iter, num_math_mapping, math_mapping_params); // следующая точка из последовательности-решения
point_seq[idx*max_cycle_len+1]=second_point;
if( num_math_mapping==5 )
max_cycle_len=math_mapping_params[4]+1;
for (int i = 0; i < max_cycle_len-2; ++i) {
if (IsEqualPoints(first_point, second_point, eps)) // сработало условие - значит first и second близки
if (IsCyclePersisting(first_point, skip_iter, i+1, T, tau, num_math_mapping, math_mapping_params)) { // если цикл сохраняется - добавляем элементы в список и выходим.
if( num_math_mapping==5 )
AddCycleAtList(idx, 1, first_point, skip_iter, cycle_list, num_math_mapping, math_mapping_params);
else
AddCycleAtList(idx, i+1, first_point, skip_iter, cycle_list, num_math_mapping, math_mapping_params);
// поскольку в CUDA невозможно ни хранить зубчатые массивы, ни работать с vector, для окончания последовательности-решения, исходящего из данной
// начальной точки в её конец специально добавляется точка с очень большими координатами, символизирующее окончание работы с данной
// последовательностью-решением
point_seq[idx*max_cycle_len+2+i]=make_float2(FLT_MAX,FLT_MAX);
return;
} // не сработал return, значит ищем цикл большей длины.
if( num_math_mapping==5 )
first_point=second_point;
second_point = MathMapping(second_point, num_iter+i, num_math_mapping, math_mapping_params);
point_seq[idx*max_cycle_len+2+i]=second_point;
}
cycle_list[idx].length = 0; // в случае если цикл так и не нашли
}
int main() {
printf("Stages of program:\n\n"); // этапы выполнения программы
int num_math_mapping = 0; // число, соответствующее математическому отображению
int num_points_source = 0; // число, соответствующее разновидности генерации сетки
int dimension = 0, skip_iter = 0, max_iter = 0, max_cycle_len = 0, T = 0;
float eps = 0.0, tau = 0.0, discr = 0.0;
float a[MAX_ARRAY_SIZE], b[MAX_ARRAY_SIZE]; // массивы для хранения концов отрезков-проекций многомерного куба на оси координат
float math_mapping_params[MAX_ARRAY_SIZE]; // массив для хранения параметров математического отображения
float2 point_seq[MAX_ARRAY_SIZE]; // массив для хранения последовательности-решения для каждой итерации
string configfile = "data.config"; // конфигурационный файл, в котором хранятся входные параметры программы
string readsection = "data_program"; // секция, определяющая какие данные следует считывать из конфига
if (!ReadData(configfile, readsection, &dimension, a, b, &num_points_source, &discr, &skip_iter, &max_iter, &max_cycle_len, &T, &eps, &tau, &num_math_mapping, math_mapping_params)) { // в случае ошибки при считывании входных параметров программы
ResultsOfWorkProgram(false);
return 0;
}
dim3 grid; // сетка из начальных точек, поступающих на вход ядру
if (!MakeGrid(num_points_source, &grid, a, b, discr)) {
ResultsOfWorkProgram(false);
return 0;
}
int start_points = grid.x*grid.y; // количество начальных точек, из которых будут испускаться решения
CycleData *cycle_list_device = NULL, *cycle_list_host = NULL;
float2 *point_seq_device = NULL;
cycle_list_host = (CycleData*) malloc (start_points*sizeof(CycleData));
cudaMalloc(&cycle_list_device, start_points*sizeof(CycleData));
if (!IsCudaDataCreated((void*) cycle_list_device))
return 0;
float *a_device = NULL, *math_mapping_params_device = NULL;
cudaMalloc(&a_device, dimension*sizeof(float));
cudaMalloc(&math_mapping_params_device, MAX_ARRAY_SIZE*sizeof(float));
cudaMalloc( &point_seq_device, MAX_ARRAY_SIZE*sizeof(float2));
if (!IsCudaDataCreated((void*) a_device))
return 0;
if (!IsCudaDataCreated((void*) math_mapping_params_device))
return 0;
cudaEvent_t start_time, finish_time;
cudaEventCreate(&start_time);
cudaEventCreate(&finish_time);
cudaEventRecord(start_time, 0);
cudaMemcpy(a_device, a, dimension*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(math_mapping_params_device, math_mapping_params, MAX_ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
KernelSearchCycles<<<grid, 1>>>(point_seq_device, a_device, discr, skip_iter, max_cycle_len, T, cycle_list_device, eps, tau, num_math_mapping, math_mapping_params_device);
cudaThreadSynchronize(); // синхронизация всех нитей (следующая функция программы не будет выполнена до тех пор, пока последняя нить не окончит свою работу)
KernelShiftCycle<<<grid, 1>>>(cycle_list_device);
cudaThreadSynchronize();
cudaMemcpy(cycle_list_host, cycle_list_device, start_points*sizeof(CycleData), cudaMemcpyDeviceToHost);
cudaMemcpy(point_seq, point_seq_device, MAX_ARRAY_SIZE*sizeof(float2), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(finish_time, 0);
cudaEventSynchronize(finish_time);
float work_time = 0.0;
cudaEventElapsedTime(&work_time, start_time, finish_time);
printf("Work time on GPU: %4.0f ms\n", work_time); // вывод времени работы программы на GPU
cudaEventDestroy(start_time);
cudaEventDestroy(finish_time);
OutputAllPointSequences(point_seq, start_points, max_cycle_len);
DeleteNegativeZeros(start_points, cycle_list_host);
OutputAllCycles(a, discr, grid, start_points, cycle_list_host);
OutputDifferentCycles(a, discr, grid, start_points, cycle_list_host, tau);
ResultsOfWorkProgram(true);
FreeUsedDynamicObjects(cycle_list_host, cycle_list_device, a_device, math_mapping_params_device, point_seq_device);
return 0;
} |
f1c781f632b783f172cdda39b5da422f6a92f62a.hip | // !!! This is a file automatically generated by hipify!!!
// ******************************************
// implicit time stepping implementation of 2D diffusion problem
// Ben Cumming, CSCS
// *****************************************
// A small benchmark app that solves the 2D fisher equation using second-order
// finite differences.
// Syntax: ./main nx ny nt t
#include <algorithm>
#include <iostream>
#include <sstream>
#include <fstream>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <omp.h>
#include <mpi.h>
#include "data.h"
#include "linalg.h"
#include "operators.h"
#include "stats.h"
#include "unit_tests.h"
using namespace data;
using namespace linalg;
using namespace operators;
using namespace stats;
// ==============================================================================
void write_binary(std::string fname, Field &u, SubDomain &domain, Discretization &options)
{
MPI_Offset disp = 0;
MPI_File filehandle;
MPI_Datatype filetype;
int result =
MPI_File_open(
MPI_COMM_WORLD,
fname.c_str(),
MPI_MODE_CREATE | MPI_MODE_WRONLY,
MPI_INFO_NULL,
&filehandle
);
assert(result==MPI_SUCCESS);
int ustart[] = {domain.startx-1, domain.starty-1};
int ucount[] = {domain.nx, domain.ny};
int dimuids[] = {options.nx, options.ny};
result = MPI_Type_create_subarray(2, dimuids, ucount, ustart, MPI_ORDER_FORTRAN, MPI_DOUBLE, &filetype);
assert(result==MPI_SUCCESS);
result = MPI_Type_commit(&filetype);
assert(result==MPI_SUCCESS);
result = MPI_File_set_view(filehandle, disp, MPI_DOUBLE, filetype, "native", MPI_INFO_NULL);
assert(result==MPI_SUCCESS);
// update the host values, before writing to file
u.update_host();
result = MPI_File_write_all(filehandle, u.host_data(), domain.N, MPI_DOUBLE, MPI_STATUS_IGNORE);
assert(result==MPI_SUCCESS);
result = MPI_Type_free(&filetype);
assert(result==MPI_SUCCESS);
result = MPI_File_close(&filehandle);
assert(result==MPI_SUCCESS);
}
// read command line arguments
static void readcmdline(Discretization& options, int argc, char* argv[])
{
if (argc<5 || argc>6 ) {
std::cerr << "Usage: main nx ny nt t\n";
std::cerr << " nx number of gridpoints in x-direction\n";
std::cerr << " ny number of gridpoints in y-direction\n";
std::cerr << " nt number of timesteps\n";
std::cerr << " t total time\n";
std::cerr << " v [optional] turn on verbose output\n";
exit(1);
}
// read nx
options.nx = atoi(argv[1]);
if (options.nx < 1) {
std::cerr << "nx must be positive integer\n";
exit(-1);
}
// read ny
options.ny = atoi(argv[2]);
if (options.ny < 1) {
std::cerr << "ny must be positive integer\n";
exit(-1);
}
// read nt
options.nt = atoi(argv[3]);
if (options.nt < 1) {
std::cerr << "nt must be positive integer\n";
exit(-1);
}
// read total time
double t = atof(argv[4]);
if (t < 0) {
std::cerr << "t must be positive real value\n";
exit(-1);
}
verbose_output = false;
if( argc==6 ) {
verbose_output = (domain.rank==0);
}
// compute timestep size
options.dt = t / options.nt;
// compute the distance between grid points
// assume that x dimension has length 1.0
options.dx = 1. / (options.nx - 1);
// set alpha, assume diffusion coefficient D is 1
options.alpha = (options.dx * options.dx) / (1. * options.dt);
}
// ==============================================================================
int main(int argc, char* argv[])
{
// read command line arguments
readcmdline(options, argc, argv);
// initialize cuda
// assert that there is exactly one GPU per node, i.e. there should only be 1 GPU
// visible to each MPI rank
int device_count;
cuda_api_call( hipGetDeviceCount(&device_count) );
if(device_count != 1) {
std::cerr << "error: there should be one device per node" << std::endl;
exit(-1);
}
cuda_api_call( hipSetDevice(0) );
// get the cublas handle to force cublas initialization outside the main time
// stepping loop, to ensure that the timing doesn't count initialization costs
auto handle = cublas_handle();
// initialize MPI
int mpi_rank, mpi_size;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
bool is_root = mpi_rank==0;
// initialize subdomain
domain.init(mpi_rank, mpi_size, options);
domain.print();
int nx = domain.nx;
int ny = domain.ny;
int nt = options.nt;
// set iteration parameters
int max_cg_iters = 200;
int max_newton_iters = 50;
double tolerance = 1.e-6;
if( domain.rank == 0 ) {
std::cout << "\n========================================================================" << std::endl;
std::cout << " Welcome to mini-stencil!" << std::endl;
std::cout << "version :: C++ with MPI : " << domain.size << " MPI ranks" << std::endl;
std::cout << "mesh :: " << options.nx << " * " << options.ny << " dx = " << options.dx << std::endl;
std::cout << "time :: " << nt << " time steps from 0 .. " << options.nt*options.dt << std::endl;;
std::cout << "iteration :: " << "CG " << max_cg_iters
<< ", Newton " << max_newton_iters
<< ", tolerance " << tolerance << std::endl;;
std::cout << "========================================================================\n" << std::endl;
}
// allocate global fields
x_new.init(nx,ny);
x_old.init(nx,ny);
bndN.init(nx,1);
bndS.init(nx,1);
bndE.init(ny,1);
bndW.init(ny,1);
buffN.init(nx,1);
buffS.init(nx,1);
buffE.init(ny,1);
buffW.init(ny,1);
Field b(nx,ny);
Field deltax(nx,ny);
// create 1-sided windows with boundaries
// TODO: put unit tests here because:
// * they can then use the buffers and fields allocated for the main application
// * they won't interfere with the initial conditions, set below
if (!unit_tests()) {
return 1;
}
// set dirichlet boundary conditions to 0 all around
ss_fill(bndN, 0);
ss_fill(bndS, 0);
ss_fill(bndE, 0);
ss_fill(bndW, 0);
// set the initial condition
// a circle of concentration 0.1 centred at (xdim/4, ydim/4) with radius
// no larger than 1/8 of both xdim and ydim
ss_fill(x_new, 0.);
double xc = 1.0 / 4.0;
double yc = (options.ny - 1) * options.dx / 4;
double radius = ::min(xc, yc) / 2.0;
for (int j = domain.starty-1; j < domain.endy; j++)
{
double y = (j - 1) * options.dx;
for (int i = domain.startx-1; i < domain.endx; i++)
{
double x = (i - 1) * options.dx;
if ((x - xc) * (x - xc) + (y - yc) * (y - yc) < radius * radius)
x_new(i-domain.startx+1, j-domain.starty+1) = 0.1;
}
}
// update initial conditions on the device
x_new.update_device();
iters_cg = 0;
iters_newton = 0;
// start timer
double timespent = -omp_get_wtime();
// main timeloop
for (int timestep = 1; timestep <= nt; timestep++)
{
// set x_new and x_old to be the solution
ss_copy(x_old, x_new);
double residual;
bool converged = false;
int it;
for (it=0; it<max_newton_iters; it++)
{
// compute residual : requires both x_new and x_old
diffusion(x_new, b);
residual = ss_norm2(b);
// check for convergence
if (residual < tolerance)
{
converged = true;
break;
}
// solve linear system to get -deltax
bool cg_converged = false;
ss_cg(deltax, b, max_cg_iters, tolerance, cg_converged);
// check that the CG solver converged
if (!cg_converged) break;
// update solution
ss_axpy(x_new, -1.0, deltax);
}
iters_newton += it+1;
// output some statistics
if (converged && verbose_output && is_root) {
std::cout << "step " << timestep
<< " required " << it
<< " iterations for residual " << residual
<< std::endl;
}
if (!converged) {
if(!domain.rank) {
std::cerr << "step " << timestep
<< " ERROR : nonlinear iterations failed to converge" << std::endl;;
}
break;
}
}
// get times
timespent += omp_get_wtime();
////////////////////////////////////////////////////////////////////
// write final solution to BOV file for visualization
////////////////////////////////////////////////////////////////////
// binary data
write_binary("output.bin", x_old, domain, options);
// metadata
if (is_root) {
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << options.nx << " " << options.ny << " 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 " << (options.ny-1)*options.dx << " 1.0" << std::endl;
}
// print table sumarizing results
if (is_root) {
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
std::cout << "simulation took " << timespent << " seconds" << std::endl;
std::cout << int(iters_cg) << " conjugate gradient iterations, at rate of "
<< float(iters_cg)/timespent << " iters/second" << std::endl;
std::cout << iters_newton << " newton iterations" << std::endl;
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
}
if (is_root) std::cout << "Goodbye!" << std::endl;
// clean windows, communicator and do finalize
MPI_Finalize();
return 0;
}
| f1c781f632b783f172cdda39b5da422f6a92f62a.cu | // ******************************************
// implicit time stepping implementation of 2D diffusion problem
// Ben Cumming, CSCS
// *****************************************
// A small benchmark app that solves the 2D fisher equation using second-order
// finite differences.
// Syntax: ./main nx ny nt t
#include <algorithm>
#include <iostream>
#include <sstream>
#include <fstream>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <omp.h>
#include <mpi.h>
#include "data.h"
#include "linalg.h"
#include "operators.h"
#include "stats.h"
#include "unit_tests.h"
using namespace data;
using namespace linalg;
using namespace operators;
using namespace stats;
// ==============================================================================
void write_binary(std::string fname, Field &u, SubDomain &domain, Discretization &options)
{
MPI_Offset disp = 0;
MPI_File filehandle;
MPI_Datatype filetype;
int result =
MPI_File_open(
MPI_COMM_WORLD,
fname.c_str(),
MPI_MODE_CREATE | MPI_MODE_WRONLY,
MPI_INFO_NULL,
&filehandle
);
assert(result==MPI_SUCCESS);
int ustart[] = {domain.startx-1, domain.starty-1};
int ucount[] = {domain.nx, domain.ny};
int dimuids[] = {options.nx, options.ny};
result = MPI_Type_create_subarray(2, dimuids, ucount, ustart, MPI_ORDER_FORTRAN, MPI_DOUBLE, &filetype);
assert(result==MPI_SUCCESS);
result = MPI_Type_commit(&filetype);
assert(result==MPI_SUCCESS);
result = MPI_File_set_view(filehandle, disp, MPI_DOUBLE, filetype, "native", MPI_INFO_NULL);
assert(result==MPI_SUCCESS);
// update the host values, before writing to file
u.update_host();
result = MPI_File_write_all(filehandle, u.host_data(), domain.N, MPI_DOUBLE, MPI_STATUS_IGNORE);
assert(result==MPI_SUCCESS);
result = MPI_Type_free(&filetype);
assert(result==MPI_SUCCESS);
result = MPI_File_close(&filehandle);
assert(result==MPI_SUCCESS);
}
// read command line arguments
static void readcmdline(Discretization& options, int argc, char* argv[])
{
if (argc<5 || argc>6 ) {
std::cerr << "Usage: main nx ny nt t\n";
std::cerr << " nx number of gridpoints in x-direction\n";
std::cerr << " ny number of gridpoints in y-direction\n";
std::cerr << " nt number of timesteps\n";
std::cerr << " t total time\n";
std::cerr << " v [optional] turn on verbose output\n";
exit(1);
}
// read nx
options.nx = atoi(argv[1]);
if (options.nx < 1) {
std::cerr << "nx must be positive integer\n";
exit(-1);
}
// read ny
options.ny = atoi(argv[2]);
if (options.ny < 1) {
std::cerr << "ny must be positive integer\n";
exit(-1);
}
// read nt
options.nt = atoi(argv[3]);
if (options.nt < 1) {
std::cerr << "nt must be positive integer\n";
exit(-1);
}
// read total time
double t = atof(argv[4]);
if (t < 0) {
std::cerr << "t must be positive real value\n";
exit(-1);
}
verbose_output = false;
if( argc==6 ) {
verbose_output = (domain.rank==0);
}
// compute timestep size
options.dt = t / options.nt;
// compute the distance between grid points
// assume that x dimension has length 1.0
options.dx = 1. / (options.nx - 1);
// set alpha, assume diffusion coefficient D is 1
options.alpha = (options.dx * options.dx) / (1. * options.dt);
}
// ==============================================================================
int main(int argc, char* argv[])
{
// read command line arguments
readcmdline(options, argc, argv);
// initialize cuda
// assert that there is exactly one GPU per node, i.e. there should only be 1 GPU
// visible to each MPI rank
int device_count;
cuda_api_call( cudaGetDeviceCount(&device_count) );
if(device_count != 1) {
std::cerr << "error: there should be one device per node" << std::endl;
exit(-1);
}
cuda_api_call( cudaSetDevice(0) );
// get the cublas handle to force cublas initialization outside the main time
// stepping loop, to ensure that the timing doesn't count initialization costs
auto handle = cublas_handle();
// initialize MPI
int mpi_rank, mpi_size;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
bool is_root = mpi_rank==0;
// initialize subdomain
domain.init(mpi_rank, mpi_size, options);
domain.print();
int nx = domain.nx;
int ny = domain.ny;
int nt = options.nt;
// set iteration parameters
int max_cg_iters = 200;
int max_newton_iters = 50;
double tolerance = 1.e-6;
if( domain.rank == 0 ) {
std::cout << "\n========================================================================" << std::endl;
std::cout << " Welcome to mini-stencil!" << std::endl;
std::cout << "version :: C++ with MPI : " << domain.size << " MPI ranks" << std::endl;
std::cout << "mesh :: " << options.nx << " * " << options.ny << " dx = " << options.dx << std::endl;
std::cout << "time :: " << nt << " time steps from 0 .. " << options.nt*options.dt << std::endl;;
std::cout << "iteration :: " << "CG " << max_cg_iters
<< ", Newton " << max_newton_iters
<< ", tolerance " << tolerance << std::endl;;
std::cout << "========================================================================\n" << std::endl;
}
// allocate global fields
x_new.init(nx,ny);
x_old.init(nx,ny);
bndN.init(nx,1);
bndS.init(nx,1);
bndE.init(ny,1);
bndW.init(ny,1);
buffN.init(nx,1);
buffS.init(nx,1);
buffE.init(ny,1);
buffW.init(ny,1);
Field b(nx,ny);
Field deltax(nx,ny);
// create 1-sided windows with boundaries
// TODO: put unit tests here because:
// * they can then use the buffers and fields allocated for the main application
// * they won't interfere with the initial conditions, set below
if (!unit_tests()) {
return 1;
}
// set dirichlet boundary conditions to 0 all around
ss_fill(bndN, 0);
ss_fill(bndS, 0);
ss_fill(bndE, 0);
ss_fill(bndW, 0);
// set the initial condition
// a circle of concentration 0.1 centred at (xdim/4, ydim/4) with radius
// no larger than 1/8 of both xdim and ydim
ss_fill(x_new, 0.);
double xc = 1.0 / 4.0;
double yc = (options.ny - 1) * options.dx / 4;
double radius = std::min(xc, yc) / 2.0;
for (int j = domain.starty-1; j < domain.endy; j++)
{
double y = (j - 1) * options.dx;
for (int i = domain.startx-1; i < domain.endx; i++)
{
double x = (i - 1) * options.dx;
if ((x - xc) * (x - xc) + (y - yc) * (y - yc) < radius * radius)
x_new(i-domain.startx+1, j-domain.starty+1) = 0.1;
}
}
// update initial conditions on the device
x_new.update_device();
iters_cg = 0;
iters_newton = 0;
// start timer
double timespent = -omp_get_wtime();
// main timeloop
for (int timestep = 1; timestep <= nt; timestep++)
{
// set x_new and x_old to be the solution
ss_copy(x_old, x_new);
double residual;
bool converged = false;
int it;
for (it=0; it<max_newton_iters; it++)
{
// compute residual : requires both x_new and x_old
diffusion(x_new, b);
residual = ss_norm2(b);
// check for convergence
if (residual < tolerance)
{
converged = true;
break;
}
// solve linear system to get -deltax
bool cg_converged = false;
ss_cg(deltax, b, max_cg_iters, tolerance, cg_converged);
// check that the CG solver converged
if (!cg_converged) break;
// update solution
ss_axpy(x_new, -1.0, deltax);
}
iters_newton += it+1;
// output some statistics
if (converged && verbose_output && is_root) {
std::cout << "step " << timestep
<< " required " << it
<< " iterations for residual " << residual
<< std::endl;
}
if (!converged) {
if(!domain.rank) {
std::cerr << "step " << timestep
<< " ERROR : nonlinear iterations failed to converge" << std::endl;;
}
break;
}
}
// get times
timespent += omp_get_wtime();
////////////////////////////////////////////////////////////////////
// write final solution to BOV file for visualization
////////////////////////////////////////////////////////////////////
// binary data
write_binary("output.bin", x_old, domain, options);
// metadata
if (is_root) {
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << options.nx << " " << options.ny << " 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 " << (options.ny-1)*options.dx << " 1.0" << std::endl;
}
// print table sumarizing results
if (is_root) {
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
std::cout << "simulation took " << timespent << " seconds" << std::endl;
std::cout << int(iters_cg) << " conjugate gradient iterations, at rate of "
<< float(iters_cg)/timespent << " iters/second" << std::endl;
std::cout << iters_newton << " newton iterations" << std::endl;
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
}
if (is_root) std::cout << "Goodbye!" << std::endl;
// clean windows, communicator and do finalize
MPI_Finalize();
return 0;
}
|
7cd3875154c2255e5f3f6c7bb9b74c8fdeb0f6a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
__global__ void ShellSortKernel( int *a, int N) //este definit nucleul
{
int i = threadIdx.x; //contine indexul firului din bloc
int j = threadIdx.y;
int temp;
int dim; //cate elemente sa imi ia
for (dim = N / 2; dim > 0; dim /= 2)
for (i = dim; i < N; i++) //procesul i imi ia ultimul element, iar procesul j imi ia primul element
for (j = i - dim; j >= 0 && a[j]>a[j + dim]; j -= dim) {
temp = a[j]; // daca primul element este mai mare ca al doilea mi le schimba intre ele
a[j] = a[j + dim];
a[j + dim] = temp;
}
}
int main()
{
size_t N = 10;
int array[] = { 9, 6, 1, 2, 7, 3, 8, 4, 10, 5 };
int i;
int *d_a; //vectorul buffer alocat
hipMalloc(&d_a, N); //alocam memorie liniara // se aloca vectorul in memoria dispozitivului si dimensiunea acestuia
memcpy(d_a, array, sizeof(int)*N); //transferul de date ntre memoria gazd i memoria dispozitivului
//unde sa se puna, ce sa se puna si dimensiunea
int numBlocks = 1;
// Lansai un kernel pe GPU cu fir N pentru fiecare element.
for (int k = 0; k < N; k++) {
ShellSortKernel << <numBlocks, N >> >(d_a, N);
//Fiecare fir care execut nucleul are un ID de thread unic, accesibil n cadrul kernelului, prin intermediul variabilei threadIdx ncorporate.
//__global__ void VecAdd(float* A, float* B, float* C, int N)
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
}
for (i = 0; i < N; i++)
printf("%d ", array[i]);
hipFree(d_a); //eliberam memoria liniara
return 0;
}
| 7cd3875154c2255e5f3f6c7bb9b74c8fdeb0f6a4.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
__global__ void ShellSortKernel( int *a, int N) //este definit nucleul
{
int i = threadIdx.x; //contine indexul firului din bloc
int j = threadIdx.y;
int temp;
int dim; //cate elemente sa imi ia
for (dim = N / 2; dim > 0; dim /= 2)
for (i = dim; i < N; i++) //procesul i imi ia ultimul element, iar procesul j imi ia primul element
for (j = i - dim; j >= 0 && a[j]>a[j + dim]; j -= dim) {
temp = a[j]; // daca primul element este mai mare ca al doilea mi le schimba intre ele
a[j] = a[j + dim];
a[j + dim] = temp;
}
}
int main()
{
size_t N = 10;
int array[] = { 9, 6, 1, 2, 7, 3, 8, 4, 10, 5 };
int i;
int *d_a; //vectorul buffer alocat
cudaMalloc(&d_a, N); //alocam memorie liniara // se aloca vectorul in memoria dispozitivului si dimensiunea acestuia
memcpy(d_a, array, sizeof(int)*N); //transferul de date între memoria gazdă și memoria dispozitivului
//unde sa se puna, ce sa se puna si dimensiunea
int numBlocks = 1;
// Lansați un kernel pe GPU cu fir N pentru fiecare element.
for (int k = 0; k < N; k++) {
ShellSortKernel << <numBlocks, N >> >(d_a, N);
//Fiecare fir care execută nucleul are un ID de thread unic, accesibil în cadrul kernelului, prin intermediul variabilei threadIdx încorporate.
//__global__ void VecAdd(float* A, float* B, float* C, int N)
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
}
for (i = 0; i < N; i++)
printf("%d ", array[i]);
cudaFree(d_a); //eliberam memoria liniara
return 0;
}
|
e6d6b99fd5005ba72f6a33cf0625e09c25c4c590.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <vector>
#include <string>
using namespace std;
#define BLOCK_SIZE 10
__global__ void check(float* A, float* B, int* result, int size)
{
int indexY = size * (blockDim.y * blockIdx.y + threadIdx.y);
int indexX = blockDim.x * blockIdx.x + threadIdx.x;
if (blockDim.y * blockIdx.y + threadIdx.y <= size && blockDim.x * blockIdx.x + threadIdx.x <= size)
{
float sumAB = 0.0f;
float sumBA = 0.0f;
for (int i = 0; i < size; i++)
{
sumAB += A[indexY + i] * B[i * size + indexX];
sumBA += B[indexY + i] * A[i * size + indexX];
}
if (sumAB != sumBA)
*result = atomicOr(result, 1);
}
}
int main()
{
ifstream file;
file.open("Input.txt");
vector<float> data;
int length = 0;
while (!file.eof())
{
float a;
file >> a;
data.push_back(a);
length++;
}
file.close();
length /= 2;
int size = sqrt(length);
dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid = dim3(size / BLOCK_SIZE + (size % BLOCK_SIZE == 0 ? 0 : 1), size / BLOCK_SIZE + (size % BLOCK_SIZE == 0 ? 0 : 1));
float* h_A = &data[0];
float* h_B = &data[length];
float* d_A;
float* d_B;
int* h_result = new int;
int* d_result;
hipMalloc((void**)&d_A, sizeof(float) * length);
hipMalloc((void**)&d_B, sizeof(float) * length);
hipMemcpy(d_A, h_A, sizeof(float) * length, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, sizeof(float) * length, hipMemcpyHostToDevice);
hipMalloc((void**)&d_result, sizeof(int));
check << <blocksPerGrid, threadsPerBlock >> > (d_A, d_B, d_result, size);
hipDeviceSynchronize();
hipMemcpy(h_result, d_result, sizeof(int), hipMemcpyDeviceToHost);
if (*h_result == 1)
printf("Matrixes are non commuting");
else
printf("Matrixes are commuting");
delete h_result;
hipFree(d_result);
hipFree(d_A);
hipFree(d_B);
data.clear();
return 0;
} | e6d6b99fd5005ba72f6a33cf0625e09c25c4c590.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <vector>
#include <string>
using namespace std;
#define BLOCK_SIZE 10
__global__ void check(float* A, float* B, int* result, int size)
{
int indexY = size * (blockDim.y * blockIdx.y + threadIdx.y);
int indexX = blockDim.x * blockIdx.x + threadIdx.x;
if (blockDim.y * blockIdx.y + threadIdx.y <= size && blockDim.x * blockIdx.x + threadIdx.x <= size)
{
float sumAB = 0.0f;
float sumBA = 0.0f;
for (int i = 0; i < size; i++)
{
sumAB += A[indexY + i] * B[i * size + indexX];
sumBA += B[indexY + i] * A[i * size + indexX];
}
if (sumAB != sumBA)
*result = atomicOr(result, 1);
}
}
int main()
{
ifstream file;
file.open("Input.txt");
vector<float> data;
int length = 0;
while (!file.eof())
{
float a;
file >> a;
data.push_back(a);
length++;
}
file.close();
length /= 2;
int size = sqrt(length);
dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid = dim3(size / BLOCK_SIZE + (size % BLOCK_SIZE == 0 ? 0 : 1), size / BLOCK_SIZE + (size % BLOCK_SIZE == 0 ? 0 : 1));
float* h_A = &data[0];
float* h_B = &data[length];
float* d_A;
float* d_B;
int* h_result = new int;
int* d_result;
cudaMalloc((void**)&d_A, sizeof(float) * length);
cudaMalloc((void**)&d_B, sizeof(float) * length);
cudaMemcpy(d_A, h_A, sizeof(float) * length, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(float) * length, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_result, sizeof(int));
check << <blocksPerGrid, threadsPerBlock >> > (d_A, d_B, d_result, size);
cudaDeviceSynchronize();
cudaMemcpy(h_result, d_result, sizeof(int), cudaMemcpyDeviceToHost);
if (*h_result == 1)
printf("Matrixes are non commuting");
else
printf("Matrixes are commuting");
delete h_result;
cudaFree(d_result);
cudaFree(d_A);
cudaFree(d_B);
data.clear();
return 0;
} |
0eb0b2902918d26d86a26ce0e5886b3c3774fc76.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_unary_op_basic.cu
* \brief GPU Implementation of unary functions.
*/
#include "./elemwise_binary_op.h"
#include "./elemwise_unary_op.h"
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(relu)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::relu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::relu>);
NNVM_REGISTER_OP(_backward_relu)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::relu_grad>>);
NNVM_REGISTER_OP(sigmoid)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sigmoid>);
NNVM_REGISTER_OP(_backward_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sigmoid_grad>>);
NNVM_REGISTER_OP(hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidForward<gpu>);
NNVM_REGISTER_OP(_backward_hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidBackward<gpu>);
// softsign
NNVM_REGISTER_OP(softsign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::softsign>);
NNVM_REGISTER_OP(_backward_softsign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::softsign_grad>>);
// erf
NNVM_REGISTER_OP(erf)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erf>);
NNVM_REGISTER_OP(_backward_erf)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erf_grad>>);
// erfinv
NNVM_REGISTER_OP(erfinv)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erfinv>);
NNVM_REGISTER_OP(_backward_erfinv)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erfinv_grad>>);
// copy
NNVM_REGISTER_OP(_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
NNVM_REGISTER_OP(_backward_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
NNVM_REGISTER_OP(_backward_reshape)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(BlockGrad)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(make_loss)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
// identity output as first input, but attributes are constrainted to be like rhs
NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeFirstItemEx<gpu>);
NNVM_REGISTER_OP(reshape_like)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
void ShapeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
hipMemcpyAsync(out_data.dptr_,
in_data.shape_.data(),
in_data.ndim() * sizeof(int64_t),
hipMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(shape_array)
.set_attr<FCompute>("FCompute<gpu>", ShapeComputeGPU);
void SizeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
const index_t size_var = in_data.Size();
hipMemcpyAsync(out_data.dptr_,
&size_var,
1U * sizeof(int64_t),
hipMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(size_array)
.set_attr<FCompute>("FCompute<gpu>", SizeComputeGPU);
NNVM_REGISTER_OP(Cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
NNVM_REGISTER_OP(_backward_cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
// negative
NNVM_REGISTER_OP(negative)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::negation>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::negation>);
// abs
NNVM_REGISTER_OP(abs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::abs>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::abs>);
NNVM_REGISTER_OP(_backward_abs)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::sign> >);
// sign
NNVM_REGISTER_OP(sign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sign>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::sign>);
NNVM_REGISTER_OP(_backward_sign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sign_grad> >);
// round
NNVM_REGISTER_OP(round)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::round>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::round>);
// ceil
NNVM_REGISTER_OP(ceil)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::ceil>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::ceil>);
// floor
NNVM_REGISTER_OP(floor)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::floor>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::floor>);
// trunc
NNVM_REGISTER_OP(trunc)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::trunc>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::trunc>);
// rint
NNVM_REGISTER_OP(rint)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::rint>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::rint>);
// fix
NNVM_REGISTER_OP(fix)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::fix>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::fix>);
// gamma
NNVM_REGISTER_OP(gamma)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gamma>);
NNVM_REGISTER_OP(_backward_gamma)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gamma_grad> >);
// gammaln
NNVM_REGISTER_OP(gammaln)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gammaln>);
NNVM_REGISTER_OP(_backward_gammaln)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gammaln_grad> >);
// logical not
NNVM_REGISTER_OP(logical_not)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>);
} // namespace op
} // namespace mxnet
| 0eb0b2902918d26d86a26ce0e5886b3c3774fc76.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_unary_op_basic.cu
* \brief GPU Implementation of unary functions.
*/
#include "./elemwise_binary_op.h"
#include "./elemwise_unary_op.h"
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(relu)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::relu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::relu>);
NNVM_REGISTER_OP(_backward_relu)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::relu_grad>>);
NNVM_REGISTER_OP(sigmoid)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sigmoid>);
NNVM_REGISTER_OP(_backward_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sigmoid_grad>>);
NNVM_REGISTER_OP(hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidForward<gpu>);
NNVM_REGISTER_OP(_backward_hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidBackward<gpu>);
// softsign
NNVM_REGISTER_OP(softsign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::softsign>);
NNVM_REGISTER_OP(_backward_softsign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::softsign_grad>>);
// erf
NNVM_REGISTER_OP(erf)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erf>);
NNVM_REGISTER_OP(_backward_erf)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erf_grad>>);
// erfinv
NNVM_REGISTER_OP(erfinv)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erfinv>);
NNVM_REGISTER_OP(_backward_erfinv)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erfinv_grad>>);
// copy
NNVM_REGISTER_OP(_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
NNVM_REGISTER_OP(_backward_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
NNVM_REGISTER_OP(_backward_reshape)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(BlockGrad)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(make_loss)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
// identity output as first input, but attributes are constrainted to be like rhs
NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeFirstItemEx<gpu>);
NNVM_REGISTER_OP(reshape_like)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
void ShapeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
cudaMemcpyAsync(out_data.dptr_,
in_data.shape_.data(),
in_data.ndim() * sizeof(int64_t),
cudaMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(shape_array)
.set_attr<FCompute>("FCompute<gpu>", ShapeComputeGPU);
void SizeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
const index_t size_var = in_data.Size();
cudaMemcpyAsync(out_data.dptr_,
&size_var,
1U * sizeof(int64_t),
cudaMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(size_array)
.set_attr<FCompute>("FCompute<gpu>", SizeComputeGPU);
NNVM_REGISTER_OP(Cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
NNVM_REGISTER_OP(_backward_cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
// negative
NNVM_REGISTER_OP(negative)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::negation>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::negation>);
// abs
NNVM_REGISTER_OP(abs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::abs>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::abs>);
NNVM_REGISTER_OP(_backward_abs)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::sign> >);
// sign
NNVM_REGISTER_OP(sign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sign>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::sign>);
NNVM_REGISTER_OP(_backward_sign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sign_grad> >);
// round
NNVM_REGISTER_OP(round)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::round>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::round>);
// ceil
NNVM_REGISTER_OP(ceil)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::ceil>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::ceil>);
// floor
NNVM_REGISTER_OP(floor)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::floor>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::floor>);
// trunc
NNVM_REGISTER_OP(trunc)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::trunc>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::trunc>);
// rint
NNVM_REGISTER_OP(rint)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::rint>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::rint>);
// fix
NNVM_REGISTER_OP(fix)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::fix>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::fix>);
// gamma
NNVM_REGISTER_OP(gamma)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gamma>);
NNVM_REGISTER_OP(_backward_gamma)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gamma_grad> >);
// gammaln
NNVM_REGISTER_OP(gammaln)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gammaln>);
NNVM_REGISTER_OP(_backward_gammaln)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gammaln_grad> >);
// logical not
NNVM_REGISTER_OP(logical_not)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>);
} // namespace op
} // namespace mxnet
|
393dba2d2413fcd6bb9ae7a5060fda71620667a4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CUDA blur
* Kevin Yuh, 2014
* Revised by Jennifer Chih Wen Lin 2016
*/
#include <cstdio>
#include <hip/hip_runtime.h>
#include "blur_device.cuh"
__global__
void cudaBlurKernel(const float *raw_data, const float *blur_v, float *out_data,
int n_frames, int blur_v_size) {
// TODO: Fill in the implementation for the GPU-accelerated convolution.
//
// It may be helpful to use the information in the lecture slides, as well
// as the CPU implementation, as a reference.
uint thread_index = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_index < n_frames) {
int j = 0;
while(j < blur_v_size) {
out_data[thread_index] += raw_data[blur_v_size + thread_index - j]\
* blur_v[j];
j++;
}
thread_index += blockDim.x *gridDim.x;
}
}
void cudaCallBlurKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
hipLaunchKernelGGL(( cudaBlurKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, raw_data,blur_v, out_data,\
n_frames, blur_v_size);
}
| 393dba2d2413fcd6bb9ae7a5060fda71620667a4.cu | /*
* CUDA blur
* Kevin Yuh, 2014
* Revised by Jennifer Chih Wen Lin 2016
*/
#include <cstdio>
#include <cuda_runtime.h>
#include "blur_device.cuh"
__global__
void cudaBlurKernel(const float *raw_data, const float *blur_v, float *out_data,
int n_frames, int blur_v_size) {
// TODO: Fill in the implementation for the GPU-accelerated convolution.
//
// It may be helpful to use the information in the lecture slides, as well
// as the CPU implementation, as a reference.
uint thread_index = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_index < n_frames) {
int j = 0;
while(j < blur_v_size) {
out_data[thread_index] += raw_data[blur_v_size + thread_index - j]\
* blur_v[j];
j++;
}
thread_index += blockDim.x *gridDim.x;
}
}
void cudaCallBlurKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
cudaBlurKernel<<<blocks, threadsPerBlock>>>(raw_data,blur_v, out_data,\
n_frames, blur_v_size);
}
|
28714e13f7f1513d813beb05c6d5d5f59fafc65c.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2022.
// All rights reserved.
// @author heyanguang
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <fstream>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m, n) (((m) / (n)) + ((m) % (n) > 0))
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
namespace memory_test {
template <typename T = float>
__global__ void GlobalToDynamicShared(const T *input, T *output) {
extern __shared__ float shared_memory_pool[];
T *shared_memory_addr = reinterpret_cast<T*>(shared_memory_pool);
int32_t block_thread_count = blockDim.x * blockDim.y;
uint64_t shared_memory_element_count = gridDim.y * block_thread_count;
uint64_t global_addr = blockIdx.x * shared_memory_element_count + blockIdx.y * block_thread_count
+ threadIdx.y * blockDim.x + threadIdx.x;
uint64_t shared_addr = blockIdx.y * block_thread_count + threadIdx.y * blockDim.x + threadIdx.x;
shared_memory_addr[shared_addr] = input[global_addr];
// shared_memory_addr[shared_addr] = 0.5;
// T var;
// var = shared_memory_addr[shared_addr];
// var = input[global_addr];
// shared_memory_addr[shared_addr] = var;
}
template <typename T = float>
__global__ void GlobalToGlobal(const T *input, T *output) {
int32_t block_thread_count = blockDim.x * blockDim.y;
// uint64_t shared_memory_element_count = gridDim.y * block_thread_count;
// uint64_t global_addr = blockIdx.x * shared_memory_element_count + blockIdx.y * block_thread_count
// + threadIdx.y * blockDim.x + threadIdx.x;
uint64_t shared_memory_element_count = gridDim.x * block_thread_count;
uint64_t global_addr = blockIdx.y * shared_memory_element_count + blockIdx.x * block_thread_count
+ threadIdx.y * blockDim.x + threadIdx.x;
output[global_addr] = input[global_addr];
// T val = input[global_addr];
// T val = 0.5;
// output[global_addr] = val;
// T var;
// var = input[global_addr];
// output[global_addr] = var;
}
template <typename T = float>
__global__ void GlobalToGlobalV4(const T *input, T *output) {
int32_t block_thread_count = blockDim.x * blockDim.y;
uint64_t shared_memory_element_count = gridDim.x * block_thread_count;
uint64_t global_addr = blockIdx.y * shared_memory_element_count + blockIdx.x * block_thread_count
+ threadIdx.y * blockDim.x + threadIdx.x;
// output[global_addr * 4 + 0] = input[global_addr * 4 + 0];
// output[global_addr * 4 + 1] = input[global_addr * 4 + 1];
// output[global_addr * 4 + 2] = input[global_addr * 4 + 2];
// output[global_addr * 4 + 3] = input[global_addr * 4 + 3];
asm volatile(
"{\n\t"
".reg.f32 a<4>;\n\t"
".reg.u64 rd, wr;\n\t"
"add.u64 rd, %0, %2;\n\t"
"add.u64 wr, %1, %2;\n\t"
"ld.global.v4.f32 { a0, a1, a2, a3 }, [rd];\n\t"
"st.global.v4.f32 [wr], { a0, a1, a2, a3 };\n\t"
"}"
:
: "l"(input), "l"(output), "l"(global_addr * 16)
: "memory"
);
}
template <typename T = float>
__global__ void GlobalToDynamicSharedToGlobal(const T *input, const int32_t sm_element_count, T *output) {
extern __shared__ float shared_memory_pool[];
// __shared__ float other[32 * 1024];
T *shared_memory_addr = reinterpret_cast<T*>(shared_memory_pool);
int32_t block_thread_count = blockDim.x * blockDim.y;
uint64_t shared_memory_element_count = gridDim.x * block_thread_count;
uint64_t global_addr = blockIdx.y * shared_memory_element_count + blockIdx.x * block_thread_count
+ threadIdx.y * blockDim.x + threadIdx.x;
uint64_t shared_addr = blockIdx.x * block_thread_count + threadIdx.y * blockDim.x + threadIdx.x;
// shared_memory_addr[shared_addr] = input[global_addr];
// output[global_addr] = shared_memory_addr[shared_addr];
if (shared_addr < sm_element_count) {
// T val = input[global_addr];
// shared_memory_addr[shared_addr] = val;
// other[shared_addr] = T(1.0) - val;
// output[global_addr] = other[shared_addr] + shared_memory_addr[shared_addr];
shared_memory_addr[shared_addr] = input[global_addr];
output[global_addr] = shared_memory_addr[shared_addr];
} else {
output[global_addr] = input[global_addr];
}
}
template <typename T = float>
__global__ void SharedToShared(const T *input, T *output) {
extern __shared__ float shared_memory_pool[];
int32_t block_thread_count = blockDim.x * blockDim.y;
uint64_t shared_memory_element_count = gridDim.y * block_thread_count;
T *shared_memory_addr1 = reinterpret_cast<T*>(shared_memory_pool);
T *shared_memory_addr2 = shared_memory_addr1 + shared_memory_element_count;
uint64_t shared_addr = blockIdx.y * block_thread_count + threadIdx.y * blockDim.x + threadIdx.x;
shared_memory_addr2[shared_addr] = shared_memory_addr1[shared_addr];
// shared_memory_addr2[shared_addr] = 0.5;
}
template <typename T = float>
__global__ void SharedToSharedV4(const T *input, T *output) {
extern __shared__ float shared_memory_pool[];
int32_t block_thread_count = blockDim.x * blockDim.y;
uint64_t shared_memory_element_count = gridDim.y * block_thread_count;
T *shared_memory_addr1 = reinterpret_cast<T*>(shared_memory_pool);
T *shared_memory_addr2 = shared_memory_addr1 + shared_memory_element_count;
uint64_t shared_addr = blockIdx.y * block_thread_count + threadIdx.y * blockDim.x + threadIdx.x;
// shared_memory_addr2[shared_addr] = shared_memory_addr1[shared_addr];
// shared_memory_addr2[shared_addr] = 0.5;
// asm volatile (
// "{\n\t"
// ".reg.f32 a<4>;\n\t"
// ".reg.u32 smem_ptr32_0, smem_ptr32_1, rd, wr;\n\t"
// ".reg.u64 smem_ptr64_0, smem_ptr64_1;\n\t"
// "cvta.to.shared.u64 smem_ptr64_0, %0;\n\t"
// "cvta.to.shared.u64 smem_ptr64_1, %1;\n\t"
// "cvt.u32.u64 smem_ptr32_0, smem_ptr64_0;\n\t"
// "cvt.u32.u64 smem_ptr32_1, smem_ptr64_1;\n\t"
// "add.u32 smem_ptr32_0, smem_ptr32_0, %2;\n\t"
// "add.u32 smem_ptr32_1, smem_ptr32_1, %2;\n\t"
// "ld.shared.v4.f32 { a0, a1, a2, a3 }, [smem_ptr32_0];\n\t"
// "st.shared.v4.f32 [smem_ptr32_1], { a0, a1, a2, a3 };\n\t"
// "}"
// :
// : "l"(shared_memory_addr1), "l"(shared_memory_addr2), "r"(uint32_t(shared_addr * 16))
// : "memory"
// );
asm volatile (
"{\n\t"
".reg.f32 a<4>;\n\t"
".reg.u64 smem_ptr64_0, smem_ptr64_1;\n\t"
"cvta.to.shared.u64 smem_ptr64_0, %0;\n\t"
"cvta.to.shared.u64 smem_ptr64_1, %1;\n\t"
"add.u64 smem_ptr64_0, smem_ptr64_0, %2;\n\t"
"add.u64 smem_ptr64_1, smem_ptr64_1, %2;\n\t"
"ld.shared.v4.f32 { a0, a1, a2, a3 }, [smem_ptr64_0];\n\t"
"st.shared.v4.f32 [smem_ptr64_1], { a0, a1, a2, a3 };\n\t"
"}"
:
: "l"(shared_memory_addr1), "l"(shared_memory_addr2), "l"(shared_addr * 16)
: "memory"
);
// printf("addr1=%p, addr2=%p\n", shared_memory_addr1, shared_memory_addr2);
}
namespace functor {
template <typename T>
int32_t LaunchGlobalToDynamicShared(hipStream_t stream, const T *input, T *output,
const uint64_t shared_memory_size, const uint64_t cycle_count) {
int32_t block_count = shared_memory_size / (1024 * sizeof(T));
dim3 block(32, 32);
dim3 grid(cycle_count, block_count);
hipLaunchKernelGGL(( GlobalToDynamicShared<T>), dim3(grid), dim3(block), shared_memory_size, stream, input, output);
return 1;
}
template <typename T>
int32_t LaunchGlobalToGlobal(hipStream_t stream, const T *input, T *output,
const uint64_t shared_memory_size, const uint64_t block_size, const uint64_t grid_y) {
int32_t sm_element_count = shared_memory_size / sizeof(T);
int32_t grid_x = (sm_element_count + block_size - 1) / block_size;
dim3 block(1024);
// dim3 block(32, 32);
// dim3 grid(grid_x, grid_y);
dim3 grid(grid_y, grid_x);
hipLaunchKernelGGL(( GlobalToGlobal<T>), dim3(grid), dim3(block), 0, stream, input, output);
return 1;
}
template <typename T>
int32_t LaunchGlobalToGlobalV4(hipStream_t stream, const T *input, T *output,
const uint64_t shared_memory_size, const uint64_t block_size, const uint64_t grid_y) {
int32_t sm_element_count = shared_memory_size / sizeof(T);
int32_t grid_x = (sm_element_count + block_size - 1) / block_size;
dim3 block(1024);
// dim3 block(32, 32);
dim3 grid(grid_x, grid_y / 4);
hipLaunchKernelGGL(( GlobalToGlobalV4<T>), dim3(grid), dim3(block), 0, stream, input, output);
return 1;
}
template <typename T>
int32_t LaunchGlobalToDynamicSharedToGlobal(hipStream_t stream, const T *input, T *output,
const uint64_t shared_memory_size, const uint64_t block_size, const uint64_t grid_y) {
int32_t sm_element_count = shared_memory_size / sizeof(T);
int32_t grid_x = (sm_element_count + block_size - 1) / block_size;
dim3 block(32, 32);
dim3 grid(grid_x, grid_y);
if (shared_memory_size > 48 * 1024) {
hipFuncSetAttribute(GlobalToDynamicSharedToGlobal<T>, hipFuncAttributeMaxDynamicSharedMemorySize, shared_memory_size);
}
hipLaunchKernelGGL(( GlobalToDynamicSharedToGlobal<T>), dim3(grid), dim3(block), shared_memory_size, stream, input, sm_element_count, output);
return 1;
}
template <typename T>
int32_t LaunchSharedToShared(hipStream_t stream, const T *input, T *output,
const uint64_t shared_memory_size, const uint64_t cycle_count) {
int32_t block_count = shared_memory_size / (2 * 1024 * sizeof(T));
dim3 block(32, 32);
dim3 grid(cycle_count, block_count);
// dim3 grid(block_count, cycle_count);
hipLaunchKernelGGL(( SharedToShared<T>), dim3(grid), dim3(block), shared_memory_size, stream, input, output);
return 1;
}
template <typename T>
int32_t LaunchSharedToSharedV4(hipStream_t stream, const T *input, T *output,
const uint64_t shared_memory_size, const uint64_t cycle_count) {
int32_t block_count = shared_memory_size / (2 * 1024 * sizeof(T) * 4);
dim3 block(128, 8);
dim3 grid(cycle_count, block_count);
// dim3 grid(block_count, cycle_count);
hipLaunchKernelGGL(( SharedToSharedV4<T>), dim3(grid), dim3(block), shared_memory_size, stream, input, output);
return 1;
}
template int32_t LaunchGlobalToDynamicShared(hipStream_t stream, const float *input, float *output,
const uint64_t shared_memory_size, const uint64_t cycle_count);
template int32_t LaunchGlobalToGlobal(hipStream_t stream, const float *input, float *output,
const uint64_t shared_memory_size, const uint64_t block_size, const uint64_t grid_y);
template int32_t LaunchGlobalToGlobalV4(hipStream_t stream, const float *input, float *output,
const uint64_t shared_memory_size, const uint64_t block_size, const uint64_t grid_y);
template int32_t LaunchGlobalToDynamicSharedToGlobal(hipStream_t stream, const float *input, float *output,
const uint64_t shared_memory_size, const uint64_t block_size, const uint64_t grid_y);
template int32_t LaunchSharedToShared(hipStream_t stream, const float *input, float *output,
const uint64_t shared_memory_size, const uint64_t cycle_count);
template int32_t LaunchSharedToSharedV4(hipStream_t stream, const float *input, float *output,
const uint64_t shared_memory_size, const uint64_t cycle_count);
}
}
| 28714e13f7f1513d813beb05c6d5d5f59fafc65c.cu | // Copyright 2022.
// All rights reserved.
// @author heyanguang
#include <cuda.h>
#include <cuda_fp16.h>
#include <fstream>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m, n) (((m) / (n)) + ((m) % (n) > 0))
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
namespace memory_test {
template <typename T = float>
__global__ void GlobalToDynamicShared(const T *input, T *output) {
extern __shared__ float shared_memory_pool[];
T *shared_memory_addr = reinterpret_cast<T*>(shared_memory_pool);
int32_t block_thread_count = blockDim.x * blockDim.y;
uint64_t shared_memory_element_count = gridDim.y * block_thread_count;
uint64_t global_addr = blockIdx.x * shared_memory_element_count + blockIdx.y * block_thread_count
+ threadIdx.y * blockDim.x + threadIdx.x;
uint64_t shared_addr = blockIdx.y * block_thread_count + threadIdx.y * blockDim.x + threadIdx.x;
shared_memory_addr[shared_addr] = input[global_addr];
// shared_memory_addr[shared_addr] = 0.5;
// T var;
// var = shared_memory_addr[shared_addr];
// var = input[global_addr];
// shared_memory_addr[shared_addr] = var;
}
template <typename T = float>
__global__ void GlobalToGlobal(const T *input, T *output) {
int32_t block_thread_count = blockDim.x * blockDim.y;
// uint64_t shared_memory_element_count = gridDim.y * block_thread_count;
// uint64_t global_addr = blockIdx.x * shared_memory_element_count + blockIdx.y * block_thread_count
// + threadIdx.y * blockDim.x + threadIdx.x;
uint64_t shared_memory_element_count = gridDim.x * block_thread_count;
uint64_t global_addr = blockIdx.y * shared_memory_element_count + blockIdx.x * block_thread_count
+ threadIdx.y * blockDim.x + threadIdx.x;
output[global_addr] = input[global_addr];
// T val = input[global_addr];
// T val = 0.5;
// output[global_addr] = val;
// T var;
// var = input[global_addr];
// output[global_addr] = var;
}
template <typename T = float>
__global__ void GlobalToGlobalV4(const T *input, T *output) {
int32_t block_thread_count = blockDim.x * blockDim.y;
uint64_t shared_memory_element_count = gridDim.x * block_thread_count;
uint64_t global_addr = blockIdx.y * shared_memory_element_count + blockIdx.x * block_thread_count
+ threadIdx.y * blockDim.x + threadIdx.x;
// output[global_addr * 4 + 0] = input[global_addr * 4 + 0];
// output[global_addr * 4 + 1] = input[global_addr * 4 + 1];
// output[global_addr * 4 + 2] = input[global_addr * 4 + 2];
// output[global_addr * 4 + 3] = input[global_addr * 4 + 3];
asm volatile(
"{\n\t"
".reg.f32 a<4>;\n\t"
".reg.u64 rd, wr;\n\t"
"add.u64 rd, %0, %2;\n\t"
"add.u64 wr, %1, %2;\n\t"
"ld.global.v4.f32 { a0, a1, a2, a3 }, [rd];\n\t"
"st.global.v4.f32 [wr], { a0, a1, a2, a3 };\n\t"
"}"
:
: "l"(input), "l"(output), "l"(global_addr * 16)
: "memory"
);
}
template <typename T = float>
__global__ void GlobalToDynamicSharedToGlobal(const T *input, const int32_t sm_element_count, T *output) {
extern __shared__ float shared_memory_pool[];
// __shared__ float other[32 * 1024];
T *shared_memory_addr = reinterpret_cast<T*>(shared_memory_pool);
int32_t block_thread_count = blockDim.x * blockDim.y;
uint64_t shared_memory_element_count = gridDim.x * block_thread_count;
uint64_t global_addr = blockIdx.y * shared_memory_element_count + blockIdx.x * block_thread_count
+ threadIdx.y * blockDim.x + threadIdx.x;
uint64_t shared_addr = blockIdx.x * block_thread_count + threadIdx.y * blockDim.x + threadIdx.x;
// shared_memory_addr[shared_addr] = input[global_addr];
// output[global_addr] = shared_memory_addr[shared_addr];
if (shared_addr < sm_element_count) {
// T val = input[global_addr];
// shared_memory_addr[shared_addr] = val;
// other[shared_addr] = T(1.0) - val;
// output[global_addr] = other[shared_addr] + shared_memory_addr[shared_addr];
shared_memory_addr[shared_addr] = input[global_addr];
output[global_addr] = shared_memory_addr[shared_addr];
} else {
output[global_addr] = input[global_addr];
}
}
template <typename T = float>
__global__ void SharedToShared(const T *input, T *output) {
extern __shared__ float shared_memory_pool[];
int32_t block_thread_count = blockDim.x * blockDim.y;
uint64_t shared_memory_element_count = gridDim.y * block_thread_count;
T *shared_memory_addr1 = reinterpret_cast<T*>(shared_memory_pool);
T *shared_memory_addr2 = shared_memory_addr1 + shared_memory_element_count;
uint64_t shared_addr = blockIdx.y * block_thread_count + threadIdx.y * blockDim.x + threadIdx.x;
shared_memory_addr2[shared_addr] = shared_memory_addr1[shared_addr];
// shared_memory_addr2[shared_addr] = 0.5;
}
template <typename T = float>
__global__ void SharedToSharedV4(const T *input, T *output) {
extern __shared__ float shared_memory_pool[];
int32_t block_thread_count = blockDim.x * blockDim.y;
uint64_t shared_memory_element_count = gridDim.y * block_thread_count;
T *shared_memory_addr1 = reinterpret_cast<T*>(shared_memory_pool);
T *shared_memory_addr2 = shared_memory_addr1 + shared_memory_element_count;
uint64_t shared_addr = blockIdx.y * block_thread_count + threadIdx.y * blockDim.x + threadIdx.x;
// shared_memory_addr2[shared_addr] = shared_memory_addr1[shared_addr];
// shared_memory_addr2[shared_addr] = 0.5;
// asm volatile (
// "{\n\t"
// ".reg.f32 a<4>;\n\t"
// ".reg.u32 smem_ptr32_0, smem_ptr32_1, rd, wr;\n\t"
// ".reg.u64 smem_ptr64_0, smem_ptr64_1;\n\t"
// "cvta.to.shared.u64 smem_ptr64_0, %0;\n\t"
// "cvta.to.shared.u64 smem_ptr64_1, %1;\n\t"
// "cvt.u32.u64 smem_ptr32_0, smem_ptr64_0;\n\t"
// "cvt.u32.u64 smem_ptr32_1, smem_ptr64_1;\n\t"
// "add.u32 smem_ptr32_0, smem_ptr32_0, %2;\n\t"
// "add.u32 smem_ptr32_1, smem_ptr32_1, %2;\n\t"
// "ld.shared.v4.f32 { a0, a1, a2, a3 }, [smem_ptr32_0];\n\t"
// "st.shared.v4.f32 [smem_ptr32_1], { a0, a1, a2, a3 };\n\t"
// "}"
// :
// : "l"(shared_memory_addr1), "l"(shared_memory_addr2), "r"(uint32_t(shared_addr * 16))
// : "memory"
// );
asm volatile (
"{\n\t"
".reg.f32 a<4>;\n\t"
".reg.u64 smem_ptr64_0, smem_ptr64_1;\n\t"
"cvta.to.shared.u64 smem_ptr64_0, %0;\n\t"
"cvta.to.shared.u64 smem_ptr64_1, %1;\n\t"
"add.u64 smem_ptr64_0, smem_ptr64_0, %2;\n\t"
"add.u64 smem_ptr64_1, smem_ptr64_1, %2;\n\t"
"ld.shared.v4.f32 { a0, a1, a2, a3 }, [smem_ptr64_0];\n\t"
"st.shared.v4.f32 [smem_ptr64_1], { a0, a1, a2, a3 };\n\t"
"}"
:
: "l"(shared_memory_addr1), "l"(shared_memory_addr2), "l"(shared_addr * 16)
: "memory"
);
// printf("addr1=%p, addr2=%p\n", shared_memory_addr1, shared_memory_addr2);
}
namespace functor {
template <typename T>
int32_t LaunchGlobalToDynamicShared(cudaStream_t stream, const T *input, T *output,
const uint64_t shared_memory_size, const uint64_t cycle_count) {
int32_t block_count = shared_memory_size / (1024 * sizeof(T));
dim3 block(32, 32);
dim3 grid(cycle_count, block_count);
GlobalToDynamicShared<T><<<grid, block, shared_memory_size, stream>>>(input, output);
return 1;
}
template <typename T>
int32_t LaunchGlobalToGlobal(cudaStream_t stream, const T *input, T *output,
const uint64_t shared_memory_size, const uint64_t block_size, const uint64_t grid_y) {
int32_t sm_element_count = shared_memory_size / sizeof(T);
int32_t grid_x = (sm_element_count + block_size - 1) / block_size;
dim3 block(1024);
// dim3 block(32, 32);
// dim3 grid(grid_x, grid_y);
dim3 grid(grid_y, grid_x);
GlobalToGlobal<T><<<grid, block, 0, stream>>>(input, output);
return 1;
}
template <typename T>
int32_t LaunchGlobalToGlobalV4(cudaStream_t stream, const T *input, T *output,
const uint64_t shared_memory_size, const uint64_t block_size, const uint64_t grid_y) {
int32_t sm_element_count = shared_memory_size / sizeof(T);
int32_t grid_x = (sm_element_count + block_size - 1) / block_size;
dim3 block(1024);
// dim3 block(32, 32);
dim3 grid(grid_x, grid_y / 4);
GlobalToGlobalV4<T><<<grid, block, 0, stream>>>(input, output);
return 1;
}
template <typename T>
int32_t LaunchGlobalToDynamicSharedToGlobal(cudaStream_t stream, const T *input, T *output,
const uint64_t shared_memory_size, const uint64_t block_size, const uint64_t grid_y) {
int32_t sm_element_count = shared_memory_size / sizeof(T);
int32_t grid_x = (sm_element_count + block_size - 1) / block_size;
dim3 block(32, 32);
dim3 grid(grid_x, grid_y);
if (shared_memory_size > 48 * 1024) {
cudaFuncSetAttribute(GlobalToDynamicSharedToGlobal<T>, cudaFuncAttributeMaxDynamicSharedMemorySize, shared_memory_size);
}
GlobalToDynamicSharedToGlobal<T><<<grid, block, shared_memory_size, stream>>>(input, sm_element_count, output);
return 1;
}
template <typename T>
int32_t LaunchSharedToShared(cudaStream_t stream, const T *input, T *output,
const uint64_t shared_memory_size, const uint64_t cycle_count) {
int32_t block_count = shared_memory_size / (2 * 1024 * sizeof(T));
dim3 block(32, 32);
dim3 grid(cycle_count, block_count);
// dim3 grid(block_count, cycle_count);
SharedToShared<T><<<grid, block, shared_memory_size, stream>>>(input, output);
return 1;
}
template <typename T>
int32_t LaunchSharedToSharedV4(cudaStream_t stream, const T *input, T *output,
const uint64_t shared_memory_size, const uint64_t cycle_count) {
int32_t block_count = shared_memory_size / (2 * 1024 * sizeof(T) * 4);
dim3 block(128, 8);
dim3 grid(cycle_count, block_count);
// dim3 grid(block_count, cycle_count);
SharedToSharedV4<T><<<grid, block, shared_memory_size, stream>>>(input, output);
return 1;
}
template int32_t LaunchGlobalToDynamicShared(cudaStream_t stream, const float *input, float *output,
const uint64_t shared_memory_size, const uint64_t cycle_count);
template int32_t LaunchGlobalToGlobal(cudaStream_t stream, const float *input, float *output,
const uint64_t shared_memory_size, const uint64_t block_size, const uint64_t grid_y);
template int32_t LaunchGlobalToGlobalV4(cudaStream_t stream, const float *input, float *output,
const uint64_t shared_memory_size, const uint64_t block_size, const uint64_t grid_y);
template int32_t LaunchGlobalToDynamicSharedToGlobal(cudaStream_t stream, const float *input, float *output,
const uint64_t shared_memory_size, const uint64_t block_size, const uint64_t grid_y);
template int32_t LaunchSharedToShared(cudaStream_t stream, const float *input, float *output,
const uint64_t shared_memory_size, const uint64_t cycle_count);
template int32_t LaunchSharedToSharedV4(cudaStream_t stream, const float *input, float *output,
const uint64_t shared_memory_size, const uint64_t cycle_count);
}
}
|
722c72efbd870336ed88c3cca8bd4c1954d1a57f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hipfft.h>
#include <iostream>
#include <complex>
// #define DATA_LEN 1024
// #define ITERATION 100000
int main(int argc, char **argv)
{
if (argc != 3)
{
std::cout << "Usage: " << argv[0] << " [DATA_LEN] [ITERATION]" << std::endl;
return 1;
}
int DATA_LEN = atoi(argv[1]);
int ITERATION = atoi(argv[2]);
// Siapkan memory untuk input data di Host
// hipfftComplex *t_HostInputData = (hipfftComplex*)malloc(sizeof(hipfftComplex)*DATA_LEN*1);
hipfftComplex *t_HostInputData;
hipError_t status = hipHostMalloc((void**)&t_HostInputData, sizeof(hipfftComplex)*DATA_LEN*1);
for(int i=0; i < DATA_LEN; i++)
{
t_HostInputData[i].x = 1.0;
t_HostInputData[i].y = 1.0;
}
// for(int i=0; i < DATA_LEN; i++)
// {
// std::cout << t_HostInputData[i].x << " + i" << t_HostInputData[i].y << std::endl;
// }
// std::cout << std::endl;
// Siapkan memory untuk data di GPU
hipfftComplex *t_InputData;
hipfftComplex *t_OutputData;
hipMalloc((void**)&t_InputData, sizeof(hipfftComplex)*DATA_LEN*1);
hipMalloc((void**)&t_OutputData, sizeof(hipfftComplex)*DATA_LEN*1);
if (hipGetLastError() != hipSuccess)
{
std::cout << "Cuda error: Failed to allocate" << std::endl;
return 1;
}
hipMemset(t_InputData, 0, DATA_LEN);
hipMemcpy(t_InputData, t_HostInputData, sizeof(hipfftComplex)*DATA_LEN*1, hipMemcpyHostToDevice);
hipMemset(t_OutputData, 0, DATA_LEN);
// FFT plan
hipfftHandle t_Plan;
if (hipfftPlan1d(&t_Plan, DATA_LEN, HIPFFT_C2C, 1) != HIPFFT_SUCCESS)
{
std::cout << "CUFFT error: Plan creation failed" << std::endl;
return 1;
}
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
float elapsedTime;
hipEventRecord(start, 0);
// Execute FFT Forward operation
for(int i=0; i < ITERATION; i++)
{
if (hipfftExecC2C(t_Plan, t_InputData, t_OutputData, HIPFFT_FORWARD) != HIPFFT_SUCCESS)
{
std::cout << "CUFFT error: ExecC2C Forward failed" << std::endl;
return 1;
}
}
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&elapsedTime, start, end);
printf("%d times for the FFT: %fms\n", ITERATION, elapsedTime);
// // Execute FFT Backward / IFFT operation
// if (hipfftExecC2C(t_Plan, t_OutputData, t_InputData, HIPFFT_BACKWARD) != HIPFFT_SUCCESS)
// {
// std::cout << "CUFFT error: ExecC2C Forward failed" << std::endl;
// return 1;
// }
// Synchro
if (hipDeviceSynchronize() != hipSuccess)
{
std::cout << "Cuda error: Failed to synchronize" << std::endl;
return 1;
}
// Siapkan host memory untuk menerima result FFT dari GPU
hipfftComplex *t_HostData = (hipfftComplex*)malloc(sizeof(hipfftComplex)*DATA_LEN*1);
// Copy from GPU to host memroy
hipMemcpy(t_HostData, t_OutputData, sizeof(hipfftComplex)*DATA_LEN*1, hipMemcpyDeviceToHost);
// Display data
// for(int i=0; i < DATA_LEN; i++)
// {
// std::cout << t_HostData[i].x << " + i" << t_HostData[i].y << std::endl;
// }
// Cleaning stuff
hipfftDestroy(t_Plan);
hipFree(t_InputData);
hipFree(t_OutputData);
hipHostFree(t_HostInputData);
return 0;
}
| 722c72efbd870336ed88c3cca8bd4c1954d1a57f.cu | #include <cufft.h>
#include <iostream>
#include <complex>
// #define DATA_LEN 1024
// #define ITERATION 100000
int main(int argc, char **argv)
{
if (argc != 3)
{
std::cout << "Usage: " << argv[0] << " [DATA_LEN] [ITERATION]" << std::endl;
return 1;
}
int DATA_LEN = atoi(argv[1]);
int ITERATION = atoi(argv[2]);
// Siapkan memory untuk input data di Host
// cufftComplex *t_HostInputData = (cufftComplex*)malloc(sizeof(cufftComplex)*DATA_LEN*1);
cufftComplex *t_HostInputData;
cudaError_t status = cudaMallocHost((void**)&t_HostInputData, sizeof(cufftComplex)*DATA_LEN*1);
for(int i=0; i < DATA_LEN; i++)
{
t_HostInputData[i].x = 1.0;
t_HostInputData[i].y = 1.0;
}
// for(int i=0; i < DATA_LEN; i++)
// {
// std::cout << t_HostInputData[i].x << " + i" << t_HostInputData[i].y << std::endl;
// }
// std::cout << std::endl;
// Siapkan memory untuk data di GPU
cufftComplex *t_InputData;
cufftComplex *t_OutputData;
cudaMalloc((void**)&t_InputData, sizeof(cufftComplex)*DATA_LEN*1);
cudaMalloc((void**)&t_OutputData, sizeof(cufftComplex)*DATA_LEN*1);
if (cudaGetLastError() != cudaSuccess)
{
std::cout << "Cuda error: Failed to allocate" << std::endl;
return 1;
}
cudaMemset(t_InputData, 0, DATA_LEN);
cudaMemcpy(t_InputData, t_HostInputData, sizeof(cufftComplex)*DATA_LEN*1, cudaMemcpyHostToDevice);
cudaMemset(t_OutputData, 0, DATA_LEN);
// FFT plan
cufftHandle t_Plan;
if (cufftPlan1d(&t_Plan, DATA_LEN, CUFFT_C2C, 1) != CUFFT_SUCCESS)
{
std::cout << "CUFFT error: Plan creation failed" << std::endl;
return 1;
}
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
float elapsedTime;
cudaEventRecord(start, 0);
// Execute FFT Forward operation
for(int i=0; i < ITERATION; i++)
{
if (cufftExecC2C(t_Plan, t_InputData, t_OutputData, CUFFT_FORWARD) != CUFFT_SUCCESS)
{
std::cout << "CUFFT error: ExecC2C Forward failed" << std::endl;
return 1;
}
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsedTime, start, end);
printf("%d times for the FFT: %fms\n", ITERATION, elapsedTime);
// // Execute FFT Backward / IFFT operation
// if (cufftExecC2C(t_Plan, t_OutputData, t_InputData, CUFFT_INVERSE) != CUFFT_SUCCESS)
// {
// std::cout << "CUFFT error: ExecC2C Forward failed" << std::endl;
// return 1;
// }
// Synchro
if (cudaDeviceSynchronize() != cudaSuccess)
{
std::cout << "Cuda error: Failed to synchronize" << std::endl;
return 1;
}
// Siapkan host memory untuk menerima result FFT dari GPU
cufftComplex *t_HostData = (cufftComplex*)malloc(sizeof(cufftComplex)*DATA_LEN*1);
// Copy from GPU to host memroy
cudaMemcpy(t_HostData, t_OutputData, sizeof(cufftComplex)*DATA_LEN*1, cudaMemcpyDeviceToHost);
// Display data
// for(int i=0; i < DATA_LEN; i++)
// {
// std::cout << t_HostData[i].x << " + i" << t_HostData[i].y << std::endl;
// }
// Cleaning stuff
cufftDestroy(t_Plan);
cudaFree(t_InputData);
cudaFree(t_OutputData);
cudaFreeHost(t_HostInputData);
return 0;
}
|
7c26dcc07b5a3d7d67c1167b3da9e067a5751229.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define CLOCKS_PAR_SEC 1000000l
typedef int vector_t;
#define N 8192
/************************************************************************/
/* Example */
/************************************************************************/
__global__ void add_matrix(vector_t *a, vector_t *b, vector_t *c)
{
int tx = threadIdx.x + blockIdx.x * blockDim.x;
c[tx] = a[tx] + b[tx];
}
/************************************************************************/
/* Main */
/************************************************************************/
int main(int argc, char* argv[])
{
vector_t *a = new vector_t[N];
vector_t *b = new vector_t[N];
vector_t *c = new vector_t[N];
for ( int i = 0; i < N; ++i )
{
a[i] = 3;
b[i] = 2;
}
vector_t *ad, *bd, *cd;
const int size = N*sizeof(vector_t);
hipMalloc( (vector_t**)&ad, size );
hipMalloc( (vector_t**)&bd, size );
hipMalloc( (vector_t**)&cd, size );
/* mesure du temps d'execution */
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
/* Copie des donnes vers le GPU */
hipMemcpy( ad, a, size, hipMemcpyHostToDevice );
hipMemcpy( bd, b, size, hipMemcpyHostToDevice );
dim3 dimBlock ( N/512, 1 );
dim3 dimGrid ( 512, 1 );
/* execution de l'opration sur GPU */
hipLaunchKernelGGL(( add_matrix), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd, cd);
hipMemcpy( c, cd, size, hipMemcpyDeviceToHost );
/* Fin de la mesure du temps d'execution du programme */
hipEventRecord(stop, 0);
hipEventSynchronize( stop );
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree( ad );
hipFree( bd );
hipFree( cd );
/* vrification des rsultats */
for (int i=0; i<N; i++)
{
if (c[i] != 5)
{
printf("erreur l'adresse %d \n", i);
printf("c[%d] = %d \n", i, c[i] );
getchar();
return 0;
}
}
/* affichage du temps d'execution */
printf("temps coule sur GPU : %f ms \n", time);
/**********************************************
execution de la mme opration sur CPU
**********************************************/
int j=0;
clock_t t1, t2;
double tempsCPU;
t1 = clock();
/* execution de l'opration sur CPU */
for (j=0; j<1000; j++)
{
for (int i=0; i<N; i++)
c[i] = a[i] + b[i];
}
t2 = clock();
tempsCPU = (double)difftime(t2, t1)/(double)CLOCKS_PAR_SEC;
/* affichage du temps d'execution */
printf("temps coule sur CPU: %f ms \n", tempsCPU * 1000.0 / j);
getchar();
delete[] a;
delete[] b;
delete[] c;
return EXIT_SUCCESS;
}
| 7c26dcc07b5a3d7d67c1167b3da9e067a5751229.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define CLOCKS_PAR_SEC 1000000l
typedef int vector_t;
#define N 8192
/************************************************************************/
/* Example */
/************************************************************************/
__global__ void add_matrix(vector_t *a, vector_t *b, vector_t *c)
{
int tx = threadIdx.x + blockIdx.x * blockDim.x;
c[tx] = a[tx] + b[tx];
}
/************************************************************************/
/* Main */
/************************************************************************/
int main(int argc, char* argv[])
{
vector_t *a = new vector_t[N];
vector_t *b = new vector_t[N];
vector_t *c = new vector_t[N];
for ( int i = 0; i < N; ++i )
{
a[i] = 3;
b[i] = 2;
}
vector_t *ad, *bd, *cd;
const int size = N*sizeof(vector_t);
cudaMalloc( (vector_t**)&ad, size );
cudaMalloc( (vector_t**)&bd, size );
cudaMalloc( (vector_t**)&cd, size );
/* mesure du temps d'execution */
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* Copie des données vers le GPU */
cudaMemcpy( ad, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, size, cudaMemcpyHostToDevice );
dim3 dimBlock ( N/512, 1 );
dim3 dimGrid ( 512, 1 );
/* execution de l'opération sur GPU */
add_matrix<<<dimGrid, dimBlock>>>( ad, bd, cd);
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
/* Fin de la mesure du temps d'execution du programme */
cudaEventRecord(stop, 0);
cudaEventSynchronize( stop );
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree( ad );
cudaFree( bd );
cudaFree( cd );
/* vérification des résultats */
for (int i=0; i<N; i++)
{
if (c[i] != 5)
{
printf("erreur à l'adresse %d \n", i);
printf("c[%d] = %d \n", i, c[i] );
getchar();
return 0;
}
}
/* affichage du temps d'execution */
printf("temps écoule sur GPU : %f ms \n", time);
/**********************************************
execution de la même opération sur CPU
**********************************************/
int j=0;
clock_t t1, t2;
double tempsCPU;
t1 = clock();
/* execution de l'opération sur CPU */
for (j=0; j<1000; j++)
{
for (int i=0; i<N; i++)
c[i] = a[i] + b[i];
}
t2 = clock();
tempsCPU = (double)difftime(t2, t1)/(double)CLOCKS_PAR_SEC;
/* affichage du temps d'execution */
printf("temps écoule sur CPU: %f ms \n", tempsCPU * 1000.0 / j);
getchar();
delete[] a;
delete[] b;
delete[] c;
return EXIT_SUCCESS;
}
|
38543fb181630ce5578c37e4e5cb91cdf7333eaa.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/device_operators.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/extrema.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/iterator_traits.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/pair.h>
#include <thrust/transform_reduce.h>
#include <type_traits>
namespace cudf {
namespace detail {
namespace {
/**
* @brief Basic element for the minmax reduce operation.
*
* Stores the minimum and maximum values that have been encountered so far
*/
template <typename T>
struct minmax_pair {
T min_val;
T max_val;
__host__ __device__ minmax_pair()
: min_val(cudf::DeviceMin::identity<T>()), max_val(cudf::DeviceMax::identity<T>()){};
__host__ __device__ minmax_pair(T val) : min_val(val), max_val(val){};
__host__ __device__ minmax_pair(T min_val_, T max_val_) : min_val(min_val_), max_val(max_val_){};
};
/**
* @brief Reduce for the minmax operation and return a device scalar.
*
* @tparam Op Binary operator functor
* @tparam InputIterator Input iterator Type
* @tparam OutputType Output scalar type
* @param d_in input iterator
* @param num_items number of items to reduce
* @param binary_op binary operator used to reduce
* @param stream CUDA stream to run kernels on.
* @return rmm::device_scalar<OutputType>
*/
template <typename Op,
typename InputIterator,
typename OutputType = typename thrust::iterator_value<InputIterator>::type>
rmm::device_scalar<OutputType> reduce_device(InputIterator d_in,
cudf::size_type num_items,
Op binary_op,
rmm::cuda_stream_view stream)
{
OutputType identity{};
rmm::device_scalar<OutputType> result{identity, stream};
// Allocate temporary storage
size_t storage_bytes = 0;
hipcub::DeviceReduce::Reduce(
nullptr, storage_bytes, d_in, result.data(), num_items, binary_op, identity, stream.value());
auto temp_storage = rmm::device_buffer{storage_bytes, stream};
// Run reduction
hipcub::DeviceReduce::Reduce(temp_storage.data(),
storage_bytes,
d_in,
result.data(),
num_items,
binary_op,
identity,
stream.value());
return result;
}
/**
* @brief Functor that accepts two minmax_pairs and returns a
* minmax_pair whose minimum and maximum values are the min() and max()
* respectively of the minimums and maximums of the input pairs.
*/
template <typename T>
struct minmax_binary_op
: public thrust::binary_function<minmax_pair<T>, minmax_pair<T>, minmax_pair<T>> {
__device__ minmax_pair<T> operator()(minmax_pair<T> const& lhs, minmax_pair<T> const& rhs) const
{
return minmax_pair<T>{thrust::min(lhs.min_val, rhs.min_val),
thrust::max(lhs.max_val, rhs.max_val)};
}
};
/**
* @brief Creates a minmax_pair<T> from a T
*/
template <typename T>
struct create_minmax {
__device__ minmax_pair<T> operator()(T e) { return minmax_pair<T>{e}; }
};
/**
* @brief Functor that takes a thrust::pair<T, bool> and produces a minmax_pair
* that is <T, T> for minimum and maximum or <cudf::DeviceMin::identity<T>(),
* cudf::DeviceMax::identity<T>()>
*/
template <typename T>
struct create_minmax_with_nulls {
__device__ minmax_pair<T> operator()(thrust::pair<T, bool> i)
{
return i.second ? minmax_pair<T>{i.first} : minmax_pair<T>{};
}
};
/**
* @brief Dispatch functor for minmax operation.
*
* This uses the reduce function to compute the min and max values
* simultaneously for a column of data.
*
* @tparam T The input column's type
*/
struct minmax_functor {
template <typename T>
static constexpr bool is_supported()
{
return !(std::is_same_v<T, cudf::list_view> || std::is_same_v<T, cudf::struct_view>);
}
template <typename T>
auto reduce(column_view const& col, rmm::cuda_stream_view stream)
{
auto device_col = column_device_view::create(col, stream);
// compute minimum and maximum values
if (col.has_nulls()) {
auto pair_to_minmax = thrust::make_transform_iterator(
make_pair_iterator<T, true>(*device_col), create_minmax_with_nulls<T>{});
return reduce_device(pair_to_minmax, col.size(), minmax_binary_op<T>{}, stream);
} else {
auto col_to_minmax =
thrust::make_transform_iterator(device_col->begin<T>(), create_minmax<T>{});
return reduce_device(col_to_minmax, col.size(), minmax_binary_op<T>{}, stream);
}
}
/**
* @brief Functor to copy a minmax_pair result to individual scalar instances.
*
* @tparam T type of the data
* @tparam ResultType result type to assign min, max to minmax_pair<T>
*/
template <typename T, typename ResultType = minmax_pair<T>>
struct assign_min_max {
__device__ void operator()()
{
*min_data = result->min_val;
*max_data = result->max_val;
}
ResultType* result;
T* min_data;
T* max_data;
};
template <typename T,
std::enable_if_t<is_supported<T>() and !std::is_same_v<T, cudf::string_view> and
!cudf::is_dictionary<T>()>* = nullptr>
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()(
cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
using storage_type = device_storage_type_t<T>;
// compute minimum and maximum values
auto dev_result = reduce<storage_type>(col, stream);
// create output scalars
using ScalarType = cudf::scalar_type_t<T>;
auto minimum = new ScalarType(T{}, true, stream, mr);
auto maximum = new ScalarType(T{}, true, stream, mr);
// copy dev_result to the output scalars
device_single_thread(
assign_min_max<storage_type>{dev_result.data(), minimum->data(), maximum->data()}, stream);
return {std::unique_ptr<scalar>(minimum), std::unique_ptr<scalar>(maximum)};
}
/**
* @brief Specialization for strings column.
*/
template <typename T, std::enable_if_t<std::is_same_v<T, cudf::string_view>>* = nullptr>
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()(
cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
// compute minimum and maximum values
auto dev_result = reduce<cudf::string_view>(col, stream);
// copy the minmax_pair to the host; does not copy the strings
using OutputType = minmax_pair<cudf::string_view>;
OutputType host_result;
CUDF_CUDA_TRY(hipMemcpyAsync(
&host_result, dev_result.data(), sizeof(OutputType), hipMemcpyDeviceToHost, stream.value()));
// strings are copied to create the scalars here
return {std::make_unique<string_scalar>(host_result.min_val, true, stream, mr),
std::make_unique<string_scalar>(host_result.max_val, true, stream, mr)};
}
/**
* @brief Specialization for dictionary column.
*/
template <typename T, std::enable_if_t<cudf::is_dictionary<T>()>* = nullptr>
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()(
cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
// compute minimum and maximum values
auto dev_result = reduce<T>(col, stream);
// copy the minmax_pair to the host to call get_element
using OutputType = minmax_pair<T>;
OutputType host_result;
CUDF_CUDA_TRY(hipMemcpyAsync(
&host_result, dev_result.data(), sizeof(OutputType), hipMemcpyDeviceToHost, stream.value()));
// get the keys for those indexes
auto const keys = dictionary_column_view(col).keys();
return {get_element(keys, static_cast<size_type>(host_result.min_val), stream, mr),
get_element(keys, static_cast<size_type>(host_result.max_val), stream, mr)};
}
template <typename T, std::enable_if_t<!is_supported<T>()>* = nullptr>
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()(
cudf::column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*)
{
CUDF_FAIL("type not supported for minmax() operation");
}
};
} // namespace
/**
* @copydoc cudf::minmax
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax(
cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
if (col.null_count() == col.size()) {
// this handles empty and all-null columns
// return scalars with valid==false
return {make_default_constructed_scalar(col.type(), stream, mr),
make_default_constructed_scalar(col.type(), stream, mr)};
}
return type_dispatcher(col.type(), minmax_functor{}, col, stream, mr);
}
} // namespace detail
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax(
const column_view& col, rmm::mr::device_memory_resource* mr)
{
return detail::minmax(col, cudf::default_stream_value, mr);
}
} // namespace cudf
| 38543fb181630ce5578c37e4e5cb91cdf7333eaa.cu | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/device_operators.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/extrema.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/iterator_traits.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/pair.h>
#include <thrust/transform_reduce.h>
#include <type_traits>
namespace cudf {
namespace detail {
namespace {
/**
* @brief Basic element for the minmax reduce operation.
*
* Stores the minimum and maximum values that have been encountered so far
*/
template <typename T>
struct minmax_pair {
T min_val;
T max_val;
__host__ __device__ minmax_pair()
: min_val(cudf::DeviceMin::identity<T>()), max_val(cudf::DeviceMax::identity<T>()){};
__host__ __device__ minmax_pair(T val) : min_val(val), max_val(val){};
__host__ __device__ minmax_pair(T min_val_, T max_val_) : min_val(min_val_), max_val(max_val_){};
};
/**
* @brief Reduce for the minmax operation and return a device scalar.
*
* @tparam Op Binary operator functor
* @tparam InputIterator Input iterator Type
* @tparam OutputType Output scalar type
* @param d_in input iterator
* @param num_items number of items to reduce
* @param binary_op binary operator used to reduce
* @param stream CUDA stream to run kernels on.
* @return rmm::device_scalar<OutputType>
*/
template <typename Op,
typename InputIterator,
typename OutputType = typename thrust::iterator_value<InputIterator>::type>
rmm::device_scalar<OutputType> reduce_device(InputIterator d_in,
cudf::size_type num_items,
Op binary_op,
rmm::cuda_stream_view stream)
{
OutputType identity{};
rmm::device_scalar<OutputType> result{identity, stream};
// Allocate temporary storage
size_t storage_bytes = 0;
cub::DeviceReduce::Reduce(
nullptr, storage_bytes, d_in, result.data(), num_items, binary_op, identity, stream.value());
auto temp_storage = rmm::device_buffer{storage_bytes, stream};
// Run reduction
cub::DeviceReduce::Reduce(temp_storage.data(),
storage_bytes,
d_in,
result.data(),
num_items,
binary_op,
identity,
stream.value());
return result;
}
/**
* @brief Functor that accepts two minmax_pairs and returns a
* minmax_pair whose minimum and maximum values are the min() and max()
* respectively of the minimums and maximums of the input pairs.
*/
template <typename T>
struct minmax_binary_op
: public thrust::binary_function<minmax_pair<T>, minmax_pair<T>, minmax_pair<T>> {
__device__ minmax_pair<T> operator()(minmax_pair<T> const& lhs, minmax_pair<T> const& rhs) const
{
return minmax_pair<T>{thrust::min(lhs.min_val, rhs.min_val),
thrust::max(lhs.max_val, rhs.max_val)};
}
};
/**
* @brief Creates a minmax_pair<T> from a T
*/
template <typename T>
struct create_minmax {
__device__ minmax_pair<T> operator()(T e) { return minmax_pair<T>{e}; }
};
/**
* @brief Functor that takes a thrust::pair<T, bool> and produces a minmax_pair
* that is <T, T> for minimum and maximum or <cudf::DeviceMin::identity<T>(),
* cudf::DeviceMax::identity<T>()>
*/
template <typename T>
struct create_minmax_with_nulls {
__device__ minmax_pair<T> operator()(thrust::pair<T, bool> i)
{
return i.second ? minmax_pair<T>{i.first} : minmax_pair<T>{};
}
};
/**
* @brief Dispatch functor for minmax operation.
*
* This uses the reduce function to compute the min and max values
* simultaneously for a column of data.
*
* @tparam T The input column's type
*/
struct minmax_functor {
template <typename T>
static constexpr bool is_supported()
{
return !(std::is_same_v<T, cudf::list_view> || std::is_same_v<T, cudf::struct_view>);
}
template <typename T>
auto reduce(column_view const& col, rmm::cuda_stream_view stream)
{
auto device_col = column_device_view::create(col, stream);
// compute minimum and maximum values
if (col.has_nulls()) {
auto pair_to_minmax = thrust::make_transform_iterator(
make_pair_iterator<T, true>(*device_col), create_minmax_with_nulls<T>{});
return reduce_device(pair_to_minmax, col.size(), minmax_binary_op<T>{}, stream);
} else {
auto col_to_minmax =
thrust::make_transform_iterator(device_col->begin<T>(), create_minmax<T>{});
return reduce_device(col_to_minmax, col.size(), minmax_binary_op<T>{}, stream);
}
}
/**
* @brief Functor to copy a minmax_pair result to individual scalar instances.
*
* @tparam T type of the data
* @tparam ResultType result type to assign min, max to minmax_pair<T>
*/
template <typename T, typename ResultType = minmax_pair<T>>
struct assign_min_max {
__device__ void operator()()
{
*min_data = result->min_val;
*max_data = result->max_val;
}
ResultType* result;
T* min_data;
T* max_data;
};
template <typename T,
std::enable_if_t<is_supported<T>() and !std::is_same_v<T, cudf::string_view> and
!cudf::is_dictionary<T>()>* = nullptr>
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()(
cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
using storage_type = device_storage_type_t<T>;
// compute minimum and maximum values
auto dev_result = reduce<storage_type>(col, stream);
// create output scalars
using ScalarType = cudf::scalar_type_t<T>;
auto minimum = new ScalarType(T{}, true, stream, mr);
auto maximum = new ScalarType(T{}, true, stream, mr);
// copy dev_result to the output scalars
device_single_thread(
assign_min_max<storage_type>{dev_result.data(), minimum->data(), maximum->data()}, stream);
return {std::unique_ptr<scalar>(minimum), std::unique_ptr<scalar>(maximum)};
}
/**
* @brief Specialization for strings column.
*/
template <typename T, std::enable_if_t<std::is_same_v<T, cudf::string_view>>* = nullptr>
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()(
cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
// compute minimum and maximum values
auto dev_result = reduce<cudf::string_view>(col, stream);
// copy the minmax_pair to the host; does not copy the strings
using OutputType = minmax_pair<cudf::string_view>;
OutputType host_result;
CUDF_CUDA_TRY(cudaMemcpyAsync(
&host_result, dev_result.data(), sizeof(OutputType), cudaMemcpyDeviceToHost, stream.value()));
// strings are copied to create the scalars here
return {std::make_unique<string_scalar>(host_result.min_val, true, stream, mr),
std::make_unique<string_scalar>(host_result.max_val, true, stream, mr)};
}
/**
* @brief Specialization for dictionary column.
*/
template <typename T, std::enable_if_t<cudf::is_dictionary<T>()>* = nullptr>
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()(
cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
// compute minimum and maximum values
auto dev_result = reduce<T>(col, stream);
// copy the minmax_pair to the host to call get_element
using OutputType = minmax_pair<T>;
OutputType host_result;
CUDF_CUDA_TRY(cudaMemcpyAsync(
&host_result, dev_result.data(), sizeof(OutputType), cudaMemcpyDeviceToHost, stream.value()));
// get the keys for those indexes
auto const keys = dictionary_column_view(col).keys();
return {get_element(keys, static_cast<size_type>(host_result.min_val), stream, mr),
get_element(keys, static_cast<size_type>(host_result.max_val), stream, mr)};
}
template <typename T, std::enable_if_t<!is_supported<T>()>* = nullptr>
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()(
cudf::column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*)
{
CUDF_FAIL("type not supported for minmax() operation");
}
};
} // namespace
/**
* @copydoc cudf::minmax
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax(
cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
if (col.null_count() == col.size()) {
// this handles empty and all-null columns
// return scalars with valid==false
return {make_default_constructed_scalar(col.type(), stream, mr),
make_default_constructed_scalar(col.type(), stream, mr)};
}
return type_dispatcher(col.type(), minmax_functor{}, col, stream, mr);
}
} // namespace detail
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax(
const column_view& col, rmm::mr::device_memory_resource* mr)
{
return detail::minmax(col, cudf::default_stream_value, mr);
}
} // namespace cudf
|
458fb44a0bb2992ab556389ff54d65f2ea2335d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/sparse_pool_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/pooling.h"
#include "paddle/phi/kernels/funcs/sparse/convolution.h"
#include "paddle/phi/kernels/sparse/gpu/convolution.cu.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT = int>
__global__ void MaxPoolCudaKernel(const T* in_features_ptr,
const IntT* rulebook_ptr,
const int n,
const int rulebook_len,
const int channels,
T* out_features_ptr) {
phi::funcs::MaxPool<T> max_pool_functor;
CUDA_KERNEL_LOOP_TYPE(i, n * channels, int64_t) {
int real_i = i / channels;
int channel_i = i - real_i * channels;
IntT in_i = rulebook_ptr[real_i];
IntT out_i = rulebook_ptr[real_i + rulebook_len];
max_pool_functor.compute(in_features_ptr[in_i * channels + channel_i],
&out_features_ptr[out_i * channels + channel_i]);
}
}
/**
* x: (N, D, H, W, C)
* kernel: (D, H, W, C, OC)
* out: (N, D, H, W, OC)
**/
template <typename T, typename IntT = int>
void MaxPoolGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const std::vector<int>& kernel_sizes,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
SparseCooTensor* out,
DenseTensor* rulebook) {
const auto& x_dims = x.dims();
int kernel_size = kernel_sizes[0] * kernel_sizes[1] * kernel_sizes[2];
const std::vector<int>& real_kernel_sizes =
phi::funcs::sparse::PoolResetKernel(kernel_sizes, x_dims[4], x_dims[4]);
DDim out_dims = {1, 1, 1, 1, 1};
phi::funcs::sparse::GetOutShape(
x_dims, real_kernel_sizes, paddings, dilations, strides, &out_dims);
const int in_channels = real_kernel_sizes[3];
std::vector<int> offsets(kernel_size + 1), counter(kernel_size);
DenseTensorMeta counter_meta(
DataType::INT32, {kernel_size}, DataLayout::NCHW);
DenseTensor counter_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta));
DenseTensor offsets_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta));
DenseTensorMeta index_meta(DataType::INT32, {1}, DataLayout::NCHW);
DenseTensor out_index = phi::Empty(dev_ctx, std::move(index_meta));
DenseTensor unique_value = phi::Empty(dev_ctx, std::move(index_meta));
// 1. product rulebook
int rulebook_len = ProductRuleBook<T, GPUContext, IntT>(dev_ctx,
x,
real_kernel_sizes,
paddings,
dilations,
strides,
out_dims,
false,
rulebook,
&counter_per_kernel,
&offsets_per_kernel,
&out_index,
&unique_value,
out,
&counter,
&offsets);
const IntT* rulebook_ptr = rulebook->data<IntT>();
T* out_features_ptr = out->mutable_non_zero_elements()->data<T>();
const T* in_features_ptr = x.non_zero_elements().data<T>();
// 2. max pool
#ifdef PADDLE_WITH_HIP
thrust::fill(thrust::hip::par.on(dev_ctx.stream()),
#else
thrust::fill(thrust::hip::par.on(dev_ctx.stream()),
#endif
out_features_ptr,
out_features_ptr + out->non_zero_elements().numel(),
static_cast<T>(0));
// TODO(zhangkaihuo) Replacing multiple calls with one kernel may be faster
for (int i = 0; i < kernel_size; i++) {
if (counter[i] <= 0) {
continue;
}
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, counter[i] * in_channels, 1);
hipLaunchKernelGGL(( MaxPoolCudaKernel<T, IntT>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(), in_features_ptr,
rulebook_ptr + offsets[i] + rulebook_len,
counter[i],
rulebook_len,
in_channels,
out_features_ptr);
}
}
template <typename T, typename Context>
void MaxPoolKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const std::vector<int>& kernel_sizes,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
SparseCooTensor* out,
DenseTensor* rulebook) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "MaxPoolGPUKernel", ([&] {
MaxPoolGPUKernel<T, data_t>(dev_ctx,
x,
kernel_sizes,
paddings,
dilations,
strides,
out,
rulebook);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_maxpool,
GPU,
ALL_LAYOUT,
phi::sparse::MaxPoolKernel,
float,
double,
phi::dtype::float16) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
| 458fb44a0bb2992ab556389ff54d65f2ea2335d8.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/sparse_pool_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/pooling.h"
#include "paddle/phi/kernels/funcs/sparse/convolution.h"
#include "paddle/phi/kernels/sparse/gpu/convolution.cu.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT = int>
__global__ void MaxPoolCudaKernel(const T* in_features_ptr,
const IntT* rulebook_ptr,
const int n,
const int rulebook_len,
const int channels,
T* out_features_ptr) {
phi::funcs::MaxPool<T> max_pool_functor;
CUDA_KERNEL_LOOP_TYPE(i, n * channels, int64_t) {
int real_i = i / channels;
int channel_i = i - real_i * channels;
IntT in_i = rulebook_ptr[real_i];
IntT out_i = rulebook_ptr[real_i + rulebook_len];
max_pool_functor.compute(in_features_ptr[in_i * channels + channel_i],
&out_features_ptr[out_i * channels + channel_i]);
}
}
/**
* x: (N, D, H, W, C)
* kernel: (D, H, W, C, OC)
* out: (N, D, H, W, OC)
**/
template <typename T, typename IntT = int>
void MaxPoolGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const std::vector<int>& kernel_sizes,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
SparseCooTensor* out,
DenseTensor* rulebook) {
const auto& x_dims = x.dims();
int kernel_size = kernel_sizes[0] * kernel_sizes[1] * kernel_sizes[2];
const std::vector<int>& real_kernel_sizes =
phi::funcs::sparse::PoolResetKernel(kernel_sizes, x_dims[4], x_dims[4]);
DDim out_dims = {1, 1, 1, 1, 1};
phi::funcs::sparse::GetOutShape(
x_dims, real_kernel_sizes, paddings, dilations, strides, &out_dims);
const int in_channels = real_kernel_sizes[3];
std::vector<int> offsets(kernel_size + 1), counter(kernel_size);
DenseTensorMeta counter_meta(
DataType::INT32, {kernel_size}, DataLayout::NCHW);
DenseTensor counter_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta));
DenseTensor offsets_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta));
DenseTensorMeta index_meta(DataType::INT32, {1}, DataLayout::NCHW);
DenseTensor out_index = phi::Empty(dev_ctx, std::move(index_meta));
DenseTensor unique_value = phi::Empty(dev_ctx, std::move(index_meta));
// 1. product rulebook
int rulebook_len = ProductRuleBook<T, GPUContext, IntT>(dev_ctx,
x,
real_kernel_sizes,
paddings,
dilations,
strides,
out_dims,
false,
rulebook,
&counter_per_kernel,
&offsets_per_kernel,
&out_index,
&unique_value,
out,
&counter,
&offsets);
const IntT* rulebook_ptr = rulebook->data<IntT>();
T* out_features_ptr = out->mutable_non_zero_elements()->data<T>();
const T* in_features_ptr = x.non_zero_elements().data<T>();
// 2. max pool
#ifdef PADDLE_WITH_HIP
thrust::fill(thrust::hip::par.on(dev_ctx.stream()),
#else
thrust::fill(thrust::cuda::par.on(dev_ctx.stream()),
#endif
out_features_ptr,
out_features_ptr + out->non_zero_elements().numel(),
static_cast<T>(0));
// TODO(zhangkaihuo) Replacing multiple calls with one kernel may be faster
for (int i = 0; i < kernel_size; i++) {
if (counter[i] <= 0) {
continue;
}
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, counter[i] * in_channels, 1);
MaxPoolCudaKernel<T, IntT>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(in_features_ptr,
rulebook_ptr + offsets[i] + rulebook_len,
counter[i],
rulebook_len,
in_channels,
out_features_ptr);
}
}
template <typename T, typename Context>
void MaxPoolKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const std::vector<int>& kernel_sizes,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
SparseCooTensor* out,
DenseTensor* rulebook) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "MaxPoolGPUKernel", ([&] {
MaxPoolGPUKernel<T, data_t>(dev_ctx,
x,
kernel_sizes,
paddings,
dilations,
strides,
out,
rulebook);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_maxpool,
GPU,
ALL_LAYOUT,
phi::sparse::MaxPoolKernel,
float,
double,
phi::dtype::float16) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
|
4355f335adb9ed3964434281e5ee725f3779e0a9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CUDA Expectation Maximization with Gaussian Mixture Models
* Multi-GPU implemenetation using OpenMP
*
* Written By: Andrew Pangborn
* 09/2009
*
* Department of Computer Engineering
* Rochester Institute of Technology
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h> // for clock(), clock_t, CLOCKS_PER_SEC
#include <stdlib.h>
#include <float.h>
#include <chrono>
#include <iostream>
#include <fstream>
#include <vector>
#include <hip/hip_runtime.h>
#include "gaussian.h"
#include "gaussian_kernel.cu"
#include "cluster.cu"
#include "readData.cu"
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv) {
int original_num_clusters, desired_num_clusters, ideal_num_clusters;
// Validate the command-line arguments, parse # of clusters, etc
// Don't continue if we had a problem with the program arguments
if(validateArguments(argc,argv,&original_num_clusters,&desired_num_clusters))
return 1;
int num_dimensions;
int num_events;
// Read FCS data
PRINT("Parsing input file...");
// This stores the data in a 1-D array with consecutive values being the dimensions from a single event
// (num_events by num_dimensions matrix)
float* fcs_data_by_event = readData(argv[2],&num_dimensions,&num_events);
if(!fcs_data_by_event) {
printf("Error parsing input file. This could be due to an empty file ");
printf("or an inconsistent number of dimensions. Aborting.\n");
return 1;
}
auto start = std::chrono::steady_clock::now();
clusters_t* clusters = cluster(original_num_clusters, desired_num_clusters, &ideal_num_clusters,
num_dimensions, num_events, fcs_data_by_event);
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
clusters_t saved_clusters;
memcpy(&saved_clusters,clusters,sizeof(clusters_t));
const char* result_suffix = ".results";
const char* summary_suffix = ".summary";
int filenamesize1 = strlen(argv[3]) + strlen(result_suffix) + 1;
int filenamesize2 = strlen(argv[3]) + strlen(summary_suffix) + 1;
char* result_filename = (char*) malloc(filenamesize1);
char* summary_filename = (char*) malloc(filenamesize2);
strcpy(result_filename,argv[3]);
strcpy(summary_filename,argv[3]);
strcat(result_filename,result_suffix);
strcat(summary_filename,summary_suffix);
PRINT("Summary filename: %s\n",summary_filename);
PRINT("Results filename: %s\n",result_filename);
// Open up the output file for cluster summary
FILE* outf = fopen(summary_filename,"w");
if(!outf) {
printf("ERROR: Unable to open file '%s' for writing.\n",argv[3]);
return -1;
}
// Print the clusters with the lowest rissanen score to the console and output file
for(int c=0; c<ideal_num_clusters; c++) {
if(ENABLE_PRINT) {
PRINT("Cluster #%d\n",c);
printCluster(saved_clusters,c,num_dimensions);
PRINT("\n\n");
}
if(ENABLE_OUTPUT) {
fprintf(outf,"Cluster #%d\n",c);
writeCluster(outf,saved_clusters,c,num_dimensions);
fprintf(outf,"\n\n");
}
}
fclose(outf);
if(ENABLE_OUTPUT) {
// Open another output file for the event level clustering results
FILE* fresults = fopen(result_filename,"w");
char header[1000];
FILE* input_file = fopen(argv[2],"r");
fgets(header,1000,input_file);
fclose(input_file);
fprintf(fresults,"%s",header);
for(int i=0; i<num_events; i++) {
for(int d=0; d<num_dimensions-1; d++) {
fprintf(fresults,"%f,",fcs_data_by_event[i*num_dimensions+d]);
}
fprintf(fresults,"%f",fcs_data_by_event[i*num_dimensions+num_dimensions-1]);
fprintf(fresults,"\t");
for(int c=0; c<ideal_num_clusters-1; c++) {
fprintf(fresults,"%f,",saved_clusters.memberships[c*num_events+i]);
}
fprintf(fresults,"%f",saved_clusters.memberships[(ideal_num_clusters-1)*num_events+i]);
fprintf(fresults,"\n");
}
fclose(fresults);
}
// cleanup host memory
free(fcs_data_by_event);
freeCluster(&saved_clusters);
printf("Execution time of the cluster function %f (s)\n", time * 1e-9f);
return 0;
}
| 4355f335adb9ed3964434281e5ee725f3779e0a9.cu | /*
* CUDA Expectation Maximization with Gaussian Mixture Models
* Multi-GPU implemenetation using OpenMP
*
* Written By: Andrew Pangborn
* 09/2009
*
* Department of Computer Engineering
* Rochester Institute of Technology
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h> // for clock(), clock_t, CLOCKS_PER_SEC
#include <stdlib.h>
#include <float.h>
#include <chrono>
#include <iostream>
#include <fstream>
#include <vector>
#include <cuda.h>
#include "gaussian.h"
#include "gaussian_kernel.cu"
#include "cluster.cu"
#include "readData.cu"
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv) {
int original_num_clusters, desired_num_clusters, ideal_num_clusters;
// Validate the command-line arguments, parse # of clusters, etc
// Don't continue if we had a problem with the program arguments
if(validateArguments(argc,argv,&original_num_clusters,&desired_num_clusters))
return 1;
int num_dimensions;
int num_events;
// Read FCS data
PRINT("Parsing input file...");
// This stores the data in a 1-D array with consecutive values being the dimensions from a single event
// (num_events by num_dimensions matrix)
float* fcs_data_by_event = readData(argv[2],&num_dimensions,&num_events);
if(!fcs_data_by_event) {
printf("Error parsing input file. This could be due to an empty file ");
printf("or an inconsistent number of dimensions. Aborting.\n");
return 1;
}
auto start = std::chrono::steady_clock::now();
clusters_t* clusters = cluster(original_num_clusters, desired_num_clusters, &ideal_num_clusters,
num_dimensions, num_events, fcs_data_by_event);
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
clusters_t saved_clusters;
memcpy(&saved_clusters,clusters,sizeof(clusters_t));
const char* result_suffix = ".results";
const char* summary_suffix = ".summary";
int filenamesize1 = strlen(argv[3]) + strlen(result_suffix) + 1;
int filenamesize2 = strlen(argv[3]) + strlen(summary_suffix) + 1;
char* result_filename = (char*) malloc(filenamesize1);
char* summary_filename = (char*) malloc(filenamesize2);
strcpy(result_filename,argv[3]);
strcpy(summary_filename,argv[3]);
strcat(result_filename,result_suffix);
strcat(summary_filename,summary_suffix);
PRINT("Summary filename: %s\n",summary_filename);
PRINT("Results filename: %s\n",result_filename);
// Open up the output file for cluster summary
FILE* outf = fopen(summary_filename,"w");
if(!outf) {
printf("ERROR: Unable to open file '%s' for writing.\n",argv[3]);
return -1;
}
// Print the clusters with the lowest rissanen score to the console and output file
for(int c=0; c<ideal_num_clusters; c++) {
if(ENABLE_PRINT) {
PRINT("Cluster #%d\n",c);
printCluster(saved_clusters,c,num_dimensions);
PRINT("\n\n");
}
if(ENABLE_OUTPUT) {
fprintf(outf,"Cluster #%d\n",c);
writeCluster(outf,saved_clusters,c,num_dimensions);
fprintf(outf,"\n\n");
}
}
fclose(outf);
if(ENABLE_OUTPUT) {
// Open another output file for the event level clustering results
FILE* fresults = fopen(result_filename,"w");
char header[1000];
FILE* input_file = fopen(argv[2],"r");
fgets(header,1000,input_file);
fclose(input_file);
fprintf(fresults,"%s",header);
for(int i=0; i<num_events; i++) {
for(int d=0; d<num_dimensions-1; d++) {
fprintf(fresults,"%f,",fcs_data_by_event[i*num_dimensions+d]);
}
fprintf(fresults,"%f",fcs_data_by_event[i*num_dimensions+num_dimensions-1]);
fprintf(fresults,"\t");
for(int c=0; c<ideal_num_clusters-1; c++) {
fprintf(fresults,"%f,",saved_clusters.memberships[c*num_events+i]);
}
fprintf(fresults,"%f",saved_clusters.memberships[(ideal_num_clusters-1)*num_events+i]);
fprintf(fresults,"\n");
}
fclose(fresults);
}
// cleanup host memory
free(fcs_data_by_event);
freeCluster(&saved_clusters);
printf("Execution time of the cluster function %f (s)\n", time * 1e-9f);
return 0;
}
|
fb059aa8bead44bddc41fa33904e3ebdf00678f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
__global__ void convertToCaps(char *str,int length){
int index = threadIdx.x+blockIdx.x*blockDim.x;
if(index<length){
if(str[index]>=97&&str[index]<=122)
str[index]-=32;
}
}
__global__ void findMaxOccurence(char *str,int *count,int length){
int index = threadIdx.x+blockIdx.x*blockDim.x;
if(index<length){
atomicAdd(&count[(int)str[index]-65],1);
}
}
int countMax(int *count){
int max=0;
for(int i=1;i<26;i++){
if(count[i]>count[max]){
max=i;
}
}
return max;
}
int main(){
char *str;
int n;
char dummy;
printf("\nEnter length of string:");
scanf("%d",&n);
scanf("%c",&dummy);
str = (char*)malloc(n*sizeof(char));
printf("\nEnter the String:");
scanf("%[^\n]s",str);
int noOfBlocks = n/1024;
int noOfThreads;
noOfBlocks++;
if(noOfBlocks==1){
noOfThreads=n;
}
else{
noOfThreads=1024;
}
char *dev_str=NULL;int *count;
hipMallocManaged((void**)&dev_str,n*sizeof(char));
hipMallocManaged((void**)&count,26*sizeof(int));
for(int i=0;i<26;i++){
count[i]=0;
}
strcpy(dev_str,str);
hipLaunchKernelGGL(( convertToCaps), dim3(noOfBlocks),dim3(noOfThreads), 0, 0, dev_str,n);
hipDeviceSynchronize();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( findMaxOccurence), dim3(noOfBlocks),dim3(noOfThreads), 0, 0, dev_str,count,n);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
//printf("\n%s",dev_str);
int max = countMax(count);
printf("\nMaximum count = %d",count[max]);
printf("\nExecution Time = %f ms",milliseconds);
//printf("%s",str);
//printf("\n%d",findLen(str));
return 0;
} | fb059aa8bead44bddc41fa33904e3ebdf00678f2.cu | #include<stdio.h>
#include<cuda.h>
__global__ void convertToCaps(char *str,int length){
int index = threadIdx.x+blockIdx.x*blockDim.x;
if(index<length){
if(str[index]>=97&&str[index]<=122)
str[index]-=32;
}
}
__global__ void findMaxOccurence(char *str,int *count,int length){
int index = threadIdx.x+blockIdx.x*blockDim.x;
if(index<length){
atomicAdd(&count[(int)str[index]-65],1);
}
}
int countMax(int *count){
int max=0;
for(int i=1;i<26;i++){
if(count[i]>count[max]){
max=i;
}
}
return max;
}
int main(){
char *str;
int n;
char dummy;
printf("\nEnter length of string:");
scanf("%d",&n);
scanf("%c",&dummy);
str = (char*)malloc(n*sizeof(char));
printf("\nEnter the String:");
scanf("%[^\n]s",str);
int noOfBlocks = n/1024;
int noOfThreads;
noOfBlocks++;
if(noOfBlocks==1){
noOfThreads=n;
}
else{
noOfThreads=1024;
}
char *dev_str=NULL;int *count;
cudaMallocManaged((void**)&dev_str,n*sizeof(char));
cudaMallocManaged((void**)&count,26*sizeof(int));
for(int i=0;i<26;i++){
count[i]=0;
}
strcpy(dev_str,str);
convertToCaps<<<noOfBlocks,noOfThreads>>>(dev_str,n);
cudaDeviceSynchronize();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
findMaxOccurence<<<noOfBlocks,noOfThreads>>>(dev_str,count,n);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
//printf("\n%s",dev_str);
int max = countMax(count);
printf("\nMaximum count = %d",count[max]);
printf("\nExecution Time = %f ms",milliseconds);
//printf("%s",str);
//printf("\n%d",findLen(str));
return 0;
} |
3066b95dd443fded56e497f3e4326c7e021b5c79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void print_hello() {
printf("hello from thread %d of block %d\n", threadIdx.x, blockIdx.x);
}
int main() {
hipLaunchKernelGGL(( print_hello), dim3(3), dim3(5), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 3066b95dd443fded56e497f3e4326c7e021b5c79.cu | #include <stdio.h>
__global__ void print_hello() {
printf("hello from thread %d of block %d\n", threadIdx.x, blockIdx.x);
}
int main() {
print_hello<<<3, 5>>>();
cudaDeviceSynchronize();
return 0;
}
|
01f56f9d29a5aef4e2dbfe90f9e99a7bad6a2d6b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <inttypes.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <string.h>
__global__ void warmup(uint8_t *arr, size_t n)
{
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
arr[tid] = 1U;
}
__global__ void test(uint8_t *arr, size_t n, size_t stride, uint64_t *timer)
{
size_t i;
size_t j;
uint64_t sumDeltas = 0;
uint64_t numReads = 0;
for (j = 0U; j < 1U; ++j)
{
for (i = 0U; i < n; i += stride)
{
uint64_t t1 = clock64();
arr[i] += 1U;
uint64_t t2 = clock64();
sumDeltas += (t2-t1);
++numReads;
}
}
sumDeltas /= numReads;
*timer = sumDeltas;
}
int main(void)
{
float ms;
size_t arraySize = 1024U * 512U;
uint8_t *gpuBuffer = NULL;
hipError_t err = hipMalloc(&gpuBuffer, arraySize);
if (err != hipSuccess)
{
printf("Failed to alloc gpu mem\n");
return -1;
}
uint64_t *gpuClock;
err = hipMalloc(&gpuClock, sizeof(*gpuClock));
if (err != hipSuccess)
{
printf("Failed to alloc clock timer\n");
return -1;
}
uint64_t gpuTimerOnCpu = 0U;
hipMemcpy(gpuClock, &gpuTimerOnCpu, sizeof(*gpuClock), hipMemcpyHostToDevice);
hipEvent_t startEvent, endEvent;
err = hipEventCreate(&startEvent);
if (err != hipSuccess)
{
printf("Failed to create start event\n");
}
err = hipEventCreate(&endEvent);
if (err != hipSuccess)
{
printf("Failed to create end event\n");
}
{
// warm-up gpu buffer
size_t threadBlockSize = 512U;
size_t numBlocks = (arraySize + threadBlockSize - 1U) / threadBlockSize;
printf("Launch warm-up kernel. Num blocks: %lu, Block size: %lu\n", numBlocks, threadBlockSize);
hipLaunchKernelGGL(( warmup), dim3(dim3(numBlocks,1,1)), dim3(dim3(threadBlockSize,1,1)), 0, 0, gpuBuffer, arraySize);
}
printf("Warm up gpu cache line kernel\n");
err = hipEventRecord(startEvent);
hipLaunchKernelGGL(( test), dim3(dim3(1,1,1)), dim3(dim3(1,1,1)), 0, 0, gpuBuffer, arraySize, 1U, gpuClock);
err = hipEventRecord(endEvent);
err = hipEventSynchronize(endEvent);
err = hipEventElapsedTime(&ms, startEvent, endEvent);
printf("Warm-up took %f\n", ms);
FILE *out = fopen("gpu_cache_line_size_data.txt", "w+");
const size_t maxStrideSize = 2048U;
for (size_t i = 1; i < maxStrideSize; ++i)
{
double totalms = .0f;
for (size_t q = 0U; q < 64U; ++q)
{
hipLaunchKernelGGL(( test), dim3(dim3(1,1,1)), dim3(dim3(1,1,1)), 0, 0, gpuBuffer, arraySize, i, gpuClock);
hipMemcpy(&gpuTimerOnCpu, gpuClock, sizeof(*gpuClock), hipMemcpyDeviceToHost);
totalms += (double)gpuTimerOnCpu;
}
fprintf(out, "%lu %f\n", i, i*totalms/(arraySize * 64U));
printf("Done %lu/%lu\n", i, maxStrideSize);
}
fclose(out);
hipEventDestroy(endEvent);
hipEventDestroy(startEvent);
hipFree(gpuBuffer);
return 0;
}
| 01f56f9d29a5aef4e2dbfe90f9e99a7bad6a2d6b.cu | #include <stdio.h>
#include <inttypes.h>
#include <cuda.h>
#include <stdlib.h>
#include <string.h>
__global__ void warmup(uint8_t *arr, size_t n)
{
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
arr[tid] = 1U;
}
__global__ void test(uint8_t *arr, size_t n, size_t stride, uint64_t *timer)
{
size_t i;
size_t j;
uint64_t sumDeltas = 0;
uint64_t numReads = 0;
for (j = 0U; j < 1U; ++j)
{
for (i = 0U; i < n; i += stride)
{
uint64_t t1 = clock64();
arr[i] += 1U;
uint64_t t2 = clock64();
sumDeltas += (t2-t1);
++numReads;
}
}
sumDeltas /= numReads;
*timer = sumDeltas;
}
int main(void)
{
float ms;
size_t arraySize = 1024U * 512U;
uint8_t *gpuBuffer = NULL;
cudaError_t err = cudaMalloc(&gpuBuffer, arraySize);
if (err != cudaSuccess)
{
printf("Failed to alloc gpu mem\n");
return -1;
}
uint64_t *gpuClock;
err = cudaMalloc(&gpuClock, sizeof(*gpuClock));
if (err != cudaSuccess)
{
printf("Failed to alloc clock timer\n");
return -1;
}
uint64_t gpuTimerOnCpu = 0U;
cudaMemcpy(gpuClock, &gpuTimerOnCpu, sizeof(*gpuClock), cudaMemcpyHostToDevice);
cudaEvent_t startEvent, endEvent;
err = cudaEventCreate(&startEvent);
if (err != cudaSuccess)
{
printf("Failed to create start event\n");
}
err = cudaEventCreate(&endEvent);
if (err != cudaSuccess)
{
printf("Failed to create end event\n");
}
{
// warm-up gpu buffer
size_t threadBlockSize = 512U;
size_t numBlocks = (arraySize + threadBlockSize - 1U) / threadBlockSize;
printf("Launch warm-up kernel. Num blocks: %lu, Block size: %lu\n", numBlocks, threadBlockSize);
warmup<<<dim3(numBlocks,1,1), dim3(threadBlockSize,1,1)>>>(gpuBuffer, arraySize);
}
printf("Warm up gpu cache line kernel\n");
err = cudaEventRecord(startEvent);
test<<<dim3(1,1,1), dim3(1,1,1)>>>(gpuBuffer, arraySize, 1U, gpuClock);
err = cudaEventRecord(endEvent);
err = cudaEventSynchronize(endEvent);
err = cudaEventElapsedTime(&ms, startEvent, endEvent);
printf("Warm-up took %f\n", ms);
FILE *out = fopen("gpu_cache_line_size_data.txt", "w+");
const size_t maxStrideSize = 2048U;
for (size_t i = 1; i < maxStrideSize; ++i)
{
double totalms = .0f;
for (size_t q = 0U; q < 64U; ++q)
{
test<<<dim3(1,1,1), dim3(1,1,1)>>>(gpuBuffer, arraySize, i, gpuClock);
cudaMemcpy(&gpuTimerOnCpu, gpuClock, sizeof(*gpuClock), cudaMemcpyDeviceToHost);
totalms += (double)gpuTimerOnCpu;
}
fprintf(out, "%lu %f\n", i, i*totalms/(arraySize * 64U));
printf("Done %lu/%lu\n", i, maxStrideSize);
}
fclose(out);
cudaEventDestroy(endEvent);
cudaEventDestroy(startEvent);
cudaFree(gpuBuffer);
return 0;
}
|
287497e9b2b46f13e84af1da038a3fb5987446ab.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/datasets/make_regression.hpp>
#include <cuml/svm/linear.hpp>
#include <raft/core/handle.hpp>
#include <gtest/gtest.h>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/linalg/reduce.cuh>
#include <raft/linalg/transpose.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/random/rng.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <test_utils.h>
namespace ML {
namespace SVM {
struct LinearSVMTestParams {
int nRowsTrain;
int nRowsTest;
int nCols;
/** nClasses == 1 implies regression. */
int nClasses;
/** Standard deviation of clusters or noise. */
double errStd;
double bias;
double tolerance;
uint64_t seed;
LinearSVMParams modelParams;
};
template <typename T, typename ParamsReader>
struct LinearSVMTest : public ::testing::TestWithParam<typename ParamsReader::Params> {
const LinearSVMTestParams params;
const raft::handle_t handle;
hipStream_t stream;
LinearSVMTest()
: testing::TestWithParam<typename ParamsReader::Params>(),
params(
ParamsReader::read(::testing::TestWithParam<typename ParamsReader::Params>::GetParam())),
handle(rmm::cuda_stream_per_thread, std::make_shared<rmm::cuda_stream_pool>(8)),
stream(handle.get_stream())
{
}
bool isInputValid() const
{
/* Fail to fit data with bias. */
if (params.nClasses == 1 && params.bias != 0 && !params.modelParams.fit_intercept) return false;
/* This means we don't have enough dimensions to linearly separate every cluster
from the rest.
In such case, the error is always huge (fitting is impossible).
*/
if (params.nClasses > 1 && params.nClasses > (1 << min(30, params.nCols))) return false;
return true;
}
testing::AssertionResult errorRate()
{
auto [XBuf, yBuf] = genData(params.nRowsTrain + params.nRowsTest);
auto [XTrain, XTest] = splitData(XBuf, params.nRowsTrain, params.nCols);
auto [yTrain, yTest] = splitData(yBuf, params.nRowsTrain, 1);
auto model = LinearSVMModel<T>::fit(handle,
params.modelParams,
XTrain.data(),
params.nRowsTrain,
params.nCols,
yTrain.data(),
(const T*)nullptr);
rmm::device_uvector<T> yOut(yTest.size(), stream);
LinearSVMModel<T>::predict(
handle, params.modelParams, model, XTest.data(), params.nRowsTest, params.nCols, yOut.data());
rmm::device_scalar<T> errorBuf(stream);
if (params.nClasses == 1) // regression
raft::linalg::mapThenSumReduce(
errorBuf.data(),
params.nRowsTest,
[] __device__(const T yRef, const T yOut) {
T s = yRef * yRef + yOut * yOut;
T d = yRef - yOut;
return d * d / s;
},
stream,
yTest.data(),
yOut.data());
else // classification
raft::linalg::mapThenSumReduce(
errorBuf.data(),
params.nRowsTest,
[] __device__(const T yRef, const T yOut) { return T(yRef != yOut); },
stream,
yTest.data(),
yOut.data());
// getting the error value forces the stream synchronization
T error = errorBuf.value(stream) / T(params.nRowsTest);
LinearSVMModel<T>::free(handle, model);
if (error <= params.tolerance)
return testing::AssertionSuccess();
else
return testing::AssertionFailure()
<< "Error rate = " << error << " > tolerance = " << params.tolerance;
}
testing::AssertionResult probabilitySumsToOne()
{
if (!params.modelParams.probability)
return testing::AssertionFailure() << "Non-probabolistic model does not support this test.";
if (params.nClasses < 2)
return testing::AssertionFailure() << "Regression model does not support this test.";
auto [XBuf, yBuf] = genData(params.nRowsTrain + params.nRowsTest);
auto [XTrain, XTest] = splitData(XBuf, params.nRowsTrain, params.nCols);
auto [yTrain, yTest] = splitData(yBuf, params.nRowsTrain, 1);
auto model = LinearSVMModel<T>::fit(handle,
params.modelParams,
XTrain.data(),
params.nRowsTrain,
params.nCols,
yTrain.data(),
(const T*)nullptr);
rmm::device_scalar<T> errorBuf(stream);
rmm::device_uvector<T> yProbs(yTest.size() * params.nClasses, stream);
LinearSVMModel<T>::predictProba(handle,
params.modelParams,
model,
XTest.data(),
params.nRowsTest,
params.nCols,
false,
yProbs.data());
rmm::device_uvector<T> yOut(yTest.size(), stream);
raft::linalg::reduce<T, T, int>(
yOut.data(), yProbs.data(), params.nClasses, params.nRowsTest, 0, true, true, stream);
raft::linalg::mapThenReduce(
errorBuf.data(),
params.nRowsTest,
T(0),
[] __device__(const T yOut) { return raft::myAbs<T>(1.0 - yOut); },
hipcub::Max(),
stream,
yOut.data());
T error = errorBuf.value(stream);
LinearSVMModel<T>::free(handle, model);
if (error <= params.tolerance)
return testing::AssertionSuccess();
else
return testing::AssertionFailure()
<< "Sum of probabilities deviated from zero (error = " << error << ")";
}
testing::AssertionResult probabilityErrorRate()
{
if (!params.modelParams.probability)
return testing::AssertionFailure() << "Non-probabolistic model does not support this test.";
if (params.nClasses < 2)
return testing::AssertionFailure() << "Regression model does not support this test.";
auto [XBuf, yBuf] = genData(params.nRowsTrain + params.nRowsTest);
auto [XTrain, XTest] = splitData(XBuf, params.nRowsTrain, params.nCols);
auto [yTrain, yTest] = splitData(yBuf, params.nRowsTrain, 1);
auto model = LinearSVMModel<T>::fit(handle,
params.modelParams,
XTrain.data(),
params.nRowsTrain,
params.nCols,
yTrain.data(),
(const T*)nullptr);
rmm::device_scalar<T> errorBuf(stream);
rmm::device_uvector<T> yProbs(yTest.size() * params.nClasses, stream);
rmm::device_uvector<T> yOut(yTest.size(), stream);
LinearSVMModel<T>::predictProba(handle,
params.modelParams,
model,
XTest.data(),
params.nRowsTest,
params.nCols,
false,
yProbs.data());
raft::linalg::reduce<T, T, int>(
yOut.data(),
yProbs.data(),
params.nClasses,
params.nRowsTest,
0,
true,
true,
stream,
false,
[] __device__(const T p, const int i) { return T(i * 2) + p + 0.5; },
[] __device__(const T a, const T b) { return fmod(a, 2) >= fmod(b, 2) ? a : b; });
raft::linalg::mapThenSumReduce(
errorBuf.data(),
params.nRowsTest,
[] __device__(const T yRef, const T yOut) {
T p = yOut - 2 * yRef;
return T(p <= 0 || p >= 2);
},
stream,
yTest.data(),
yOut.data());
// getting the error value forces the stream synchronization
T error = errorBuf.value(stream) / T(params.nRowsTest);
LinearSVMModel<T>::free(handle, model);
if (error <= params.tolerance)
return testing::AssertionSuccess();
else
return testing::AssertionFailure()
<< "Error rate = " << error << " > tolerance = " << params.tolerance;
}
/** Generate a required amount of (X, y) data at once. */
std::tuple<rmm::device_uvector<T>, rmm::device_uvector<T>> genData(const int nRows)
{
rmm::device_uvector<T> X(nRows * params.nCols, stream);
rmm::device_uvector<T> y(nRows * params.nClasses, stream);
if (params.nClasses == 1) // regression
{
int nInformative = max(params.nCols / 3, min(params.nCols, 5));
rmm::device_uvector<T> Xt(nRows * params.nCols, stream);
ML::Datasets::make_regression(handle,
Xt.data(),
y.data(),
nRows,
params.nCols,
nInformative,
nullptr,
1,
params.bias,
-1,
T(0),
T(params.errStd),
true,
params.seed);
raft::linalg::transpose(handle, Xt.data(), X.data(), params.nCols, nRows, stream);
} else // classification
{
rmm::device_uvector<int> labels(nRows * params.nClasses, stream);
raft::random::Rng r(params.seed);
rmm::device_uvector<T> centers(params.nCols * params.nClasses, stream);
r.uniform(centers.data(), params.nCols * params.nClasses, T(0), T(1), stream);
// override manually some of the cluster coordinates to ensure
// the distance between any of them is large enough.
int d = max(2, int(::ceil(::pow(double(params.nClasses), 1.0 / double(params.nCols)))));
int modCols = int(::ceil(std::log2(double(params.nClasses)) / std::log2(double(d))));
for (int i = 0; i < params.nClasses; i++) {
int r = i;
for (int j = 0; j < modCols; j++) {
T value = T((r % d) * params.nClasses) + T(params.bias);
centers.set_element_async(j * params.nClasses + i, value, stream);
r /= d;
}
}
ML::Datasets::make_blobs(handle,
X.data(),
labels.data(),
nRows,
params.nCols,
params.nClasses,
false,
centers.data(),
nullptr,
T(params.errStd),
true,
0,
0,
params.seed);
raft::linalg::unaryOp(
y.data(), labels.data(), labels.size(), [] __device__(int x) { return T(x); }, stream);
}
return std::make_tuple(std::move(X), std::move(y));
}
/** Split a column-major matrix in two along the rows. */
std::tuple<rmm::device_uvector<T>, rmm::device_uvector<T>> splitData(rmm::device_uvector<T>& x,
const int takeNRows,
const int nCols)
{
const int nRows = x.size() / nCols;
const int dropNRows = nRows - takeNRows;
rmm::device_uvector<T> x1(takeNRows * nCols, stream);
rmm::device_uvector<T> x2(dropNRows * nCols, stream);
RAFT_CUDA_TRY(hipMemcpy2DAsync(x1.data(),
sizeof(T) * takeNRows,
x.data(),
sizeof(T) * nRows,
sizeof(T) * takeNRows,
nCols,
hipMemcpyDeviceToDevice,
stream));
RAFT_CUDA_TRY(hipMemcpy2DAsync(x2.data(),
sizeof(T) * dropNRows,
x.data() + takeNRows,
sizeof(T) * nRows,
sizeof(T) * dropNRows,
nCols,
hipMemcpyDeviceToDevice,
stream));
return std::make_tuple(std::move(x1), std::move(x2));
}
};
#define TEST_SVM(fun, TestClass, ElemType) \
typedef LinearSVMTest<ElemType, TestClass> TestClass##_##ElemType; \
TEST_P(TestClass##_##ElemType, fun) \
{ \
if (!isInputValid()) GTEST_SKIP(); \
ASSERT_TRUE(fun()); \
} \
INSTANTIATE_TEST_SUITE_P(LinearSVM, TestClass##_##ElemType, TestClass##Params)
auto TestClasTargetsParams =
::testing::Combine(::testing::Values(LinearSVMParams::HINGE, LinearSVMParams::SQUARED_HINGE),
::testing::Values(LinearSVMParams::L1, LinearSVMParams::L2),
::testing::Values(2, 3, 8),
::testing::Values(1, 50));
struct TestClasTargets {
typedef std::tuple<LinearSVMParams::Loss, LinearSVMParams::Penalty, int, int> Params;
static LinearSVMTestParams read(Params ps)
{
LinearSVMParams mp;
mp.penalty = std::get<1>(ps);
mp.loss = std::get<0>(ps);
return {/* .nRowsTrain */ 100,
/* .nRowsTest */ 100,
/* .nCols */ std::get<3>(ps),
/* .nClasses */ std::get<2>(ps),
/* .errStd */ 0.4,
/* .bias */ 0.0,
/* .tolerance */ 0.05,
/* .seed */ 42ULL,
/* .modelParams */ mp};
}
};
auto TestClasBiasParams = ::testing::Combine(::testing::Bool(),
::testing::Bool(),
::testing::Values(2, 3),
::testing::Values(10, 50),
::testing::Values(0.0, -10.0));
struct TestClasBias {
typedef std::tuple<bool, bool, int, int, double> Params;
static LinearSVMTestParams read(Params ps)
{
LinearSVMParams mp;
mp.fit_intercept = std::get<0>(ps);
mp.penalized_intercept = std::get<1>(ps);
return {/* .nRowsTrain */ 1000,
/* .nRowsTest */ 100,
/* .nCols */ std::get<3>(ps),
/* .nClasses */ std::get<2>(ps),
/* .errStd */ 0.2,
/* .bias */ std::get<4>(ps),
/* .tolerance */ 0.05,
/* .seed */ 42ULL,
/* .modelParams */ mp};
}
};
auto TestClasManyClassesParams = ::testing::Values(2, 3, 16, 31, 32, 33, 67);
struct TestClasManyClasses {
typedef int Params;
static LinearSVMTestParams read(Params ps)
{
LinearSVMParams mp;
return {/* .nRowsTrain */ 1000,
/* .nRowsTest */ 1000,
/* .nCols */ 200,
/* .nClasses */ ps,
/* .errStd */ 1.0,
/* .bias */ 0,
/* .tolerance */ 0.01,
/* .seed */ 42ULL,
/* .modelParams */ mp};
}
};
auto TestClasProbsSumParams = ::testing::Values(2, 3, 16, 31, 32, 33, 67);
struct TestClasProbsSum {
typedef int Params;
static LinearSVMTestParams read(Params ps)
{
LinearSVMParams mp;
mp.probability = true;
mp.max_iter = 100;
return {/* .nRowsTrain */ 100,
/* .nRowsTest */ 100,
/* .nCols */ 80,
/* .nClasses */ ps,
/* .errStd */ 1.0,
/* .bias */ 0,
/* .tolerance */ 1e-5,
/* .seed */ 42ULL,
/* .modelParams */ mp};
}
};
auto TestClasProbsParams = ::testing::Values(2, 3, 16, 31, 32, 33, 67);
struct TestClasProbs {
typedef int Params;
static LinearSVMTestParams read(Params ps)
{
LinearSVMParams mp;
mp.probability = true;
return {/* .nRowsTrain */ 1000,
/* .nRowsTest */ 1000,
/* .nCols */ 200,
/* .nClasses */ ps,
/* .errStd */ 0.9,
/* .bias */ 0,
/* .tolerance */ 0.01,
/* .seed */ 42ULL,
/* .modelParams */ mp};
}
};
auto TestRegTargetsParams =
::testing::Combine(::testing::Values(LinearSVMParams::EPSILON_INSENSITIVE,
LinearSVMParams::SQUARED_EPSILON_INSENSITIVE),
::testing::Values(LinearSVMParams::L1, LinearSVMParams::L2),
::testing::Bool(),
::testing::Values(1, 50),
::testing::Values(0.0, -10.0),
::testing::Values(0.0, 0.01));
struct TestRegTargets {
typedef std::tuple<LinearSVMParams::Loss, LinearSVMParams::Penalty, bool, int, double, double>
Params;
static LinearSVMTestParams read(Params ps)
{
LinearSVMParams mp;
mp.loss = std::get<0>(ps);
mp.penalty = std::get<1>(ps);
mp.fit_intercept = std::get<2>(ps);
// The regularization parameter strongly affects the model performance in some cases,
// a larger-than-default value of C seems to always yield better scores on this generated
// dataset.
mp.C = 100.0;
mp.epsilon = std::get<5>(ps);
mp.verbose = 2;
return {/* .nRowsTrain */ 1000,
/* .nRowsTest */ 100,
/* .nCols */ std::get<3>(ps),
/* .nClasses */ 1,
/* .errStd */ 0.02,
/* .bias */ std::get<4>(ps),
/* .tolerance */ 0.05,
/* .seed */ 42ULL,
/* .modelParams */ mp};
}
};
TEST_SVM(errorRate, TestClasTargets, float);
TEST_SVM(errorRate, TestClasTargets, double);
TEST_SVM(errorRate, TestClasBias, float);
TEST_SVM(errorRate, TestClasManyClasses, float);
TEST_SVM(errorRate, TestClasManyClasses, double);
TEST_SVM(errorRate, TestRegTargets, float);
TEST_SVM(errorRate, TestRegTargets, double);
TEST_SVM(probabilitySumsToOne, TestClasProbsSum, float);
TEST_SVM(probabilitySumsToOne, TestClasProbsSum, double);
TEST_SVM(probabilityErrorRate, TestClasProbs, float);
TEST_SVM(probabilityErrorRate, TestClasProbs, double);
} // namespace SVM
} // namespace ML
| 287497e9b2b46f13e84af1da038a3fb5987446ab.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/datasets/make_regression.hpp>
#include <cuml/svm/linear.hpp>
#include <raft/core/handle.hpp>
#include <gtest/gtest.h>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/linalg/reduce.cuh>
#include <raft/linalg/transpose.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/random/rng.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <test_utils.h>
namespace ML {
namespace SVM {
struct LinearSVMTestParams {
int nRowsTrain;
int nRowsTest;
int nCols;
/** nClasses == 1 implies regression. */
int nClasses;
/** Standard deviation of clusters or noise. */
double errStd;
double bias;
double tolerance;
uint64_t seed;
LinearSVMParams modelParams;
};
template <typename T, typename ParamsReader>
struct LinearSVMTest : public ::testing::TestWithParam<typename ParamsReader::Params> {
const LinearSVMTestParams params;
const raft::handle_t handle;
cudaStream_t stream;
LinearSVMTest()
: testing::TestWithParam<typename ParamsReader::Params>(),
params(
ParamsReader::read(::testing::TestWithParam<typename ParamsReader::Params>::GetParam())),
handle(rmm::cuda_stream_per_thread, std::make_shared<rmm::cuda_stream_pool>(8)),
stream(handle.get_stream())
{
}
bool isInputValid() const
{
/* Fail to fit data with bias. */
if (params.nClasses == 1 && params.bias != 0 && !params.modelParams.fit_intercept) return false;
/* This means we don't have enough dimensions to linearly separate every cluster
from the rest.
In such case, the error is always huge (fitting is impossible).
*/
if (params.nClasses > 1 && params.nClasses > (1 << min(30, params.nCols))) return false;
return true;
}
testing::AssertionResult errorRate()
{
auto [XBuf, yBuf] = genData(params.nRowsTrain + params.nRowsTest);
auto [XTrain, XTest] = splitData(XBuf, params.nRowsTrain, params.nCols);
auto [yTrain, yTest] = splitData(yBuf, params.nRowsTrain, 1);
auto model = LinearSVMModel<T>::fit(handle,
params.modelParams,
XTrain.data(),
params.nRowsTrain,
params.nCols,
yTrain.data(),
(const T*)nullptr);
rmm::device_uvector<T> yOut(yTest.size(), stream);
LinearSVMModel<T>::predict(
handle, params.modelParams, model, XTest.data(), params.nRowsTest, params.nCols, yOut.data());
rmm::device_scalar<T> errorBuf(stream);
if (params.nClasses == 1) // regression
raft::linalg::mapThenSumReduce(
errorBuf.data(),
params.nRowsTest,
[] __device__(const T yRef, const T yOut) {
T s = yRef * yRef + yOut * yOut;
T d = yRef - yOut;
return d * d / s;
},
stream,
yTest.data(),
yOut.data());
else // classification
raft::linalg::mapThenSumReduce(
errorBuf.data(),
params.nRowsTest,
[] __device__(const T yRef, const T yOut) { return T(yRef != yOut); },
stream,
yTest.data(),
yOut.data());
// getting the error value forces the stream synchronization
T error = errorBuf.value(stream) / T(params.nRowsTest);
LinearSVMModel<T>::free(handle, model);
if (error <= params.tolerance)
return testing::AssertionSuccess();
else
return testing::AssertionFailure()
<< "Error rate = " << error << " > tolerance = " << params.tolerance;
}
testing::AssertionResult probabilitySumsToOne()
{
if (!params.modelParams.probability)
return testing::AssertionFailure() << "Non-probabolistic model does not support this test.";
if (params.nClasses < 2)
return testing::AssertionFailure() << "Regression model does not support this test.";
auto [XBuf, yBuf] = genData(params.nRowsTrain + params.nRowsTest);
auto [XTrain, XTest] = splitData(XBuf, params.nRowsTrain, params.nCols);
auto [yTrain, yTest] = splitData(yBuf, params.nRowsTrain, 1);
auto model = LinearSVMModel<T>::fit(handle,
params.modelParams,
XTrain.data(),
params.nRowsTrain,
params.nCols,
yTrain.data(),
(const T*)nullptr);
rmm::device_scalar<T> errorBuf(stream);
rmm::device_uvector<T> yProbs(yTest.size() * params.nClasses, stream);
LinearSVMModel<T>::predictProba(handle,
params.modelParams,
model,
XTest.data(),
params.nRowsTest,
params.nCols,
false,
yProbs.data());
rmm::device_uvector<T> yOut(yTest.size(), stream);
raft::linalg::reduce<T, T, int>(
yOut.data(), yProbs.data(), params.nClasses, params.nRowsTest, 0, true, true, stream);
raft::linalg::mapThenReduce(
errorBuf.data(),
params.nRowsTest,
T(0),
[] __device__(const T yOut) { return raft::myAbs<T>(1.0 - yOut); },
cub::Max(),
stream,
yOut.data());
T error = errorBuf.value(stream);
LinearSVMModel<T>::free(handle, model);
if (error <= params.tolerance)
return testing::AssertionSuccess();
else
return testing::AssertionFailure()
<< "Sum of probabilities deviated from zero (error = " << error << ")";
}
testing::AssertionResult probabilityErrorRate()
{
if (!params.modelParams.probability)
return testing::AssertionFailure() << "Non-probabolistic model does not support this test.";
if (params.nClasses < 2)
return testing::AssertionFailure() << "Regression model does not support this test.";
auto [XBuf, yBuf] = genData(params.nRowsTrain + params.nRowsTest);
auto [XTrain, XTest] = splitData(XBuf, params.nRowsTrain, params.nCols);
auto [yTrain, yTest] = splitData(yBuf, params.nRowsTrain, 1);
auto model = LinearSVMModel<T>::fit(handle,
params.modelParams,
XTrain.data(),
params.nRowsTrain,
params.nCols,
yTrain.data(),
(const T*)nullptr);
rmm::device_scalar<T> errorBuf(stream);
rmm::device_uvector<T> yProbs(yTest.size() * params.nClasses, stream);
rmm::device_uvector<T> yOut(yTest.size(), stream);
LinearSVMModel<T>::predictProba(handle,
params.modelParams,
model,
XTest.data(),
params.nRowsTest,
params.nCols,
false,
yProbs.data());
raft::linalg::reduce<T, T, int>(
yOut.data(),
yProbs.data(),
params.nClasses,
params.nRowsTest,
0,
true,
true,
stream,
false,
[] __device__(const T p, const int i) { return T(i * 2) + p + 0.5; },
[] __device__(const T a, const T b) { return fmod(a, 2) >= fmod(b, 2) ? a : b; });
raft::linalg::mapThenSumReduce(
errorBuf.data(),
params.nRowsTest,
[] __device__(const T yRef, const T yOut) {
T p = yOut - 2 * yRef;
return T(p <= 0 || p >= 2);
},
stream,
yTest.data(),
yOut.data());
// getting the error value forces the stream synchronization
T error = errorBuf.value(stream) / T(params.nRowsTest);
LinearSVMModel<T>::free(handle, model);
if (error <= params.tolerance)
return testing::AssertionSuccess();
else
return testing::AssertionFailure()
<< "Error rate = " << error << " > tolerance = " << params.tolerance;
}
/** Generate a required amount of (X, y) data at once. */
std::tuple<rmm::device_uvector<T>, rmm::device_uvector<T>> genData(const int nRows)
{
rmm::device_uvector<T> X(nRows * params.nCols, stream);
rmm::device_uvector<T> y(nRows * params.nClasses, stream);
if (params.nClasses == 1) // regression
{
int nInformative = max(params.nCols / 3, min(params.nCols, 5));
rmm::device_uvector<T> Xt(nRows * params.nCols, stream);
ML::Datasets::make_regression(handle,
Xt.data(),
y.data(),
nRows,
params.nCols,
nInformative,
nullptr,
1,
params.bias,
-1,
T(0),
T(params.errStd),
true,
params.seed);
raft::linalg::transpose(handle, Xt.data(), X.data(), params.nCols, nRows, stream);
} else // classification
{
rmm::device_uvector<int> labels(nRows * params.nClasses, stream);
raft::random::Rng r(params.seed);
rmm::device_uvector<T> centers(params.nCols * params.nClasses, stream);
r.uniform(centers.data(), params.nCols * params.nClasses, T(0), T(1), stream);
// override manually some of the cluster coordinates to ensure
// the distance between any of them is large enough.
int d = max(2, int(std::ceil(std::pow(double(params.nClasses), 1.0 / double(params.nCols)))));
int modCols = int(std::ceil(std::log2(double(params.nClasses)) / std::log2(double(d))));
for (int i = 0; i < params.nClasses; i++) {
int r = i;
for (int j = 0; j < modCols; j++) {
T value = T((r % d) * params.nClasses) + T(params.bias);
centers.set_element_async(j * params.nClasses + i, value, stream);
r /= d;
}
}
ML::Datasets::make_blobs(handle,
X.data(),
labels.data(),
nRows,
params.nCols,
params.nClasses,
false,
centers.data(),
nullptr,
T(params.errStd),
true,
0,
0,
params.seed);
raft::linalg::unaryOp(
y.data(), labels.data(), labels.size(), [] __device__(int x) { return T(x); }, stream);
}
return std::make_tuple(std::move(X), std::move(y));
}
/** Split a column-major matrix in two along the rows. */
std::tuple<rmm::device_uvector<T>, rmm::device_uvector<T>> splitData(rmm::device_uvector<T>& x,
const int takeNRows,
const int nCols)
{
const int nRows = x.size() / nCols;
const int dropNRows = nRows - takeNRows;
rmm::device_uvector<T> x1(takeNRows * nCols, stream);
rmm::device_uvector<T> x2(dropNRows * nCols, stream);
RAFT_CUDA_TRY(cudaMemcpy2DAsync(x1.data(),
sizeof(T) * takeNRows,
x.data(),
sizeof(T) * nRows,
sizeof(T) * takeNRows,
nCols,
cudaMemcpyDeviceToDevice,
stream));
RAFT_CUDA_TRY(cudaMemcpy2DAsync(x2.data(),
sizeof(T) * dropNRows,
x.data() + takeNRows,
sizeof(T) * nRows,
sizeof(T) * dropNRows,
nCols,
cudaMemcpyDeviceToDevice,
stream));
return std::make_tuple(std::move(x1), std::move(x2));
}
};
#define TEST_SVM(fun, TestClass, ElemType) \
typedef LinearSVMTest<ElemType, TestClass> TestClass##_##ElemType; \
TEST_P(TestClass##_##ElemType, fun) \
{ \
if (!isInputValid()) GTEST_SKIP(); \
ASSERT_TRUE(fun()); \
} \
INSTANTIATE_TEST_SUITE_P(LinearSVM, TestClass##_##ElemType, TestClass##Params)
auto TestClasTargetsParams =
::testing::Combine(::testing::Values(LinearSVMParams::HINGE, LinearSVMParams::SQUARED_HINGE),
::testing::Values(LinearSVMParams::L1, LinearSVMParams::L2),
::testing::Values(2, 3, 8),
::testing::Values(1, 50));
struct TestClasTargets {
typedef std::tuple<LinearSVMParams::Loss, LinearSVMParams::Penalty, int, int> Params;
static LinearSVMTestParams read(Params ps)
{
LinearSVMParams mp;
mp.penalty = std::get<1>(ps);
mp.loss = std::get<0>(ps);
return {/* .nRowsTrain */ 100,
/* .nRowsTest */ 100,
/* .nCols */ std::get<3>(ps),
/* .nClasses */ std::get<2>(ps),
/* .errStd */ 0.4,
/* .bias */ 0.0,
/* .tolerance */ 0.05,
/* .seed */ 42ULL,
/* .modelParams */ mp};
}
};
auto TestClasBiasParams = ::testing::Combine(::testing::Bool(),
::testing::Bool(),
::testing::Values(2, 3),
::testing::Values(10, 50),
::testing::Values(0.0, -10.0));
struct TestClasBias {
typedef std::tuple<bool, bool, int, int, double> Params;
static LinearSVMTestParams read(Params ps)
{
LinearSVMParams mp;
mp.fit_intercept = std::get<0>(ps);
mp.penalized_intercept = std::get<1>(ps);
return {/* .nRowsTrain */ 1000,
/* .nRowsTest */ 100,
/* .nCols */ std::get<3>(ps),
/* .nClasses */ std::get<2>(ps),
/* .errStd */ 0.2,
/* .bias */ std::get<4>(ps),
/* .tolerance */ 0.05,
/* .seed */ 42ULL,
/* .modelParams */ mp};
}
};
auto TestClasManyClassesParams = ::testing::Values(2, 3, 16, 31, 32, 33, 67);
struct TestClasManyClasses {
typedef int Params;
static LinearSVMTestParams read(Params ps)
{
LinearSVMParams mp;
return {/* .nRowsTrain */ 1000,
/* .nRowsTest */ 1000,
/* .nCols */ 200,
/* .nClasses */ ps,
/* .errStd */ 1.0,
/* .bias */ 0,
/* .tolerance */ 0.01,
/* .seed */ 42ULL,
/* .modelParams */ mp};
}
};
auto TestClasProbsSumParams = ::testing::Values(2, 3, 16, 31, 32, 33, 67);
struct TestClasProbsSum {
typedef int Params;
static LinearSVMTestParams read(Params ps)
{
LinearSVMParams mp;
mp.probability = true;
mp.max_iter = 100;
return {/* .nRowsTrain */ 100,
/* .nRowsTest */ 100,
/* .nCols */ 80,
/* .nClasses */ ps,
/* .errStd */ 1.0,
/* .bias */ 0,
/* .tolerance */ 1e-5,
/* .seed */ 42ULL,
/* .modelParams */ mp};
}
};
auto TestClasProbsParams = ::testing::Values(2, 3, 16, 31, 32, 33, 67);
struct TestClasProbs {
typedef int Params;
static LinearSVMTestParams read(Params ps)
{
LinearSVMParams mp;
mp.probability = true;
return {/* .nRowsTrain */ 1000,
/* .nRowsTest */ 1000,
/* .nCols */ 200,
/* .nClasses */ ps,
/* .errStd */ 0.9,
/* .bias */ 0,
/* .tolerance */ 0.01,
/* .seed */ 42ULL,
/* .modelParams */ mp};
}
};
auto TestRegTargetsParams =
::testing::Combine(::testing::Values(LinearSVMParams::EPSILON_INSENSITIVE,
LinearSVMParams::SQUARED_EPSILON_INSENSITIVE),
::testing::Values(LinearSVMParams::L1, LinearSVMParams::L2),
::testing::Bool(),
::testing::Values(1, 50),
::testing::Values(0.0, -10.0),
::testing::Values(0.0, 0.01));
struct TestRegTargets {
typedef std::tuple<LinearSVMParams::Loss, LinearSVMParams::Penalty, bool, int, double, double>
Params;
static LinearSVMTestParams read(Params ps)
{
LinearSVMParams mp;
mp.loss = std::get<0>(ps);
mp.penalty = std::get<1>(ps);
mp.fit_intercept = std::get<2>(ps);
// The regularization parameter strongly affects the model performance in some cases,
// a larger-than-default value of C seems to always yield better scores on this generated
// dataset.
mp.C = 100.0;
mp.epsilon = std::get<5>(ps);
mp.verbose = 2;
return {/* .nRowsTrain */ 1000,
/* .nRowsTest */ 100,
/* .nCols */ std::get<3>(ps),
/* .nClasses */ 1,
/* .errStd */ 0.02,
/* .bias */ std::get<4>(ps),
/* .tolerance */ 0.05,
/* .seed */ 42ULL,
/* .modelParams */ mp};
}
};
TEST_SVM(errorRate, TestClasTargets, float);
TEST_SVM(errorRate, TestClasTargets, double);
TEST_SVM(errorRate, TestClasBias, float);
TEST_SVM(errorRate, TestClasManyClasses, float);
TEST_SVM(errorRate, TestClasManyClasses, double);
TEST_SVM(errorRate, TestRegTargets, float);
TEST_SVM(errorRate, TestRegTargets, double);
TEST_SVM(probabilitySumsToOne, TestClasProbsSum, float);
TEST_SVM(probabilitySumsToOne, TestClasProbsSum, double);
TEST_SVM(probabilityErrorRate, TestClasProbs, float);
TEST_SVM(probabilityErrorRate, TestClasProbs, double);
} // namespace SVM
} // namespace ML
|
68e57b87469478abd7a33521e587bb9e7fc945a5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define M 20
// RED = 0, BLACK = 1
enum nodeColor {
RED,
BLACK
};
enum result {
Failure,
Success,
FirstInsert
};
enum caseFlag {
NOOP,
DID_CASE1,
DID_CASE3
};
struct par_rbNode {
int key, color;
int flag;
struct par_rbNode *left, *right, *parent;
};
// /*Function prototypes */
__device__ void createNIL();
__device__ struct par_rbNode * createNode(int);
__device__ void createTree();
__device__ struct par_rbNode * Traverse(struct par_rbNode *,int);
__device__ enum result PlaceNode(struct par_rbNode *);
__device__ void Insert_Rebalance(struct par_rbNode *);
__device__ bool Update_Rotation(struct par_rbNode *, enum caseFlag *);
__device__ bool Left_Rotate(struct par_rbNode *);
__device__ bool Right_Rotate(struct par_rbNode *);
__device__ struct par_rbNode *nodes;
__device__ struct par_rbNode *root;
__device__ struct par_rbNode *NIL;
__device__ struct par_rbNode *rtParent;
__device__ struct par_rbNode *rtSibling; // U might feel this is unncessary, but it will be used
__device__ int nodeIndex = 0;
__device__ int tmpIndex = 2;
__device__ struct par_rbNode *tmp[M];// need M tmps
__device__ int createFlag = false;
__device__ void createNIL(){
NIL = &nodes[0];
NIL->color = BLACK;
NIL->key = -1;
NIL->left = NIL->right = NIL->parent = NIL;
printf("NIL created\n");
}
__device__ struct par_rbNode * createNode(int key){
bool ok;
do{
ok = atomicCAS(&createFlag,false,true); //Capture the lock
}while(!ok);
atomicAdd(&nodeIndex,1);
atomicAdd(&tmpIndex,1);
nodes[nodeIndex].key = key;
nodes[nodeIndex].flag = true;
nodes[nodeIndex].left = nodes[nodeIndex].right = nodes[nodeIndex].parent = NIL;
tmp[tmpIndex] = &nodes[nodeIndex];
createFlag = false;
// atomicCAS(&createFlag,true,false); //Release the lock
printf("Created %d\n",key);
return tmp[tmpIndex]; // Even if this thread pauses it will eventually return the correct pointer
}
__device__ void createTree(){
rtParent = createNode(-1);
rtSibling = createNode(-1);
// NIL = createNode(-1);
root = NIL;
rtParent->parent = NIL;
rtSibling->parent = rtParent;
rtSibling->right = NIL;
rtSibling->left = NIL;
rtParent->left = root;
//rtParent->left = root; Why only left, y not right?
//ANS: Since we check for left parent condition first
//(if u don't understand, try to insert a node to a tree with only one node)
rtParent->right = rtSibling;
rtParent->flag = false;
rtSibling->flag = false;
rtParent->color = BLACK;
rtSibling->color = BLACK;
// NIL->left = NIL;
// NIL->right = NIL;
NIL->parent = rtParent;
NIL->flag = false;
// NIL->color = BLACK;
printf("Tree Created \n");
printf("\n");
}
__device__ struct par_rbNode * Traverse(struct par_rbNode *newNode,int key){
struct par_rbNode *x;
// struct par_rbNode *inertPoint;
// struct par_rbNode *savert;
bool success;
bool ok;
// do{
// savert = root;
// success = DCAS(&root->flag,false,true,&root,savert,savert); //Catching the flag of the root
// }while(!success);
//An alternate for DCAS - should check if it works or not
// do{
// savert = root;
// success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
// }while(savert!=root || !success);
do{
// savert = root;
success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
}while(!success);
//success => captured the root flag
//savert != root => root has changed
//!success => root is under lock
//thread will come out of the loop only after "success" and "savert==root"
x = root;
if(x != NIL){
while(x != NIL){
struct par_rbNode *y = x->left->parent;
printf("Inside while\n");
if(key == x->key) {
x->flag = false; // Release the flag that was just caught
return NULL; // Traversing is done. Node is already there so Insert() fails.
}
printf("New Key\n");
if(key < x->key){
if(x->left != NIL){
ok = atomicCAS(&x->left->flag,false,true);
if(!ok){
x->flag = false; // Release the flag of x
return NULL;
}//end if
x->left->parent = y->left;
x->flag = false;
x = x->left;
}else{
x->left->parent = y->left;
x = x->left;
if(x == NIL){
newNode->parent = x->parent;
return x->parent;
}
}//end if
}else{
if(x->right != NIL){
ok = atomicCAS(&x->right->flag,false,true);
if(!ok){
x->flag = false;
return NULL;
}//end if
x->right->parent = y->right;
x->flag = false;
x = x->right;
}else{
x->right->parent = y->right;
x = x->right;
if(x == NIL){
newNode->parent = x->parent;
return x->parent;
}
}//end if
}//end if
}//end while
// return x->parent;
}else{
return NIL;
}
}
__device__ enum result PlaceNode(struct par_rbNode *newNode){
//flags on newNode and insPoint are held
bool ok = true;
// struct par_rbNode *uncle,*savep;
if(newNode->parent == NIL){ //tree is empty
newNode->color = BLACK;
newNode->parent = rtParent;
rtParent->left = newNode;
root=newNode;
NIL->flag = false; // release NIL node, that u caught during Traverse
newNode->flag = false;
return FirstInsert;
}else{ // the tree is not empty so...
// newNode->parent = insPoint;
//set the flags of the grandparent and uncle
struct par_rbNode *insPoint = newNode->parent;
printf("Insert Key %d\n",insPoint->key);
if(insPoint == insPoint->parent->left){ //uncle is right child
// savep = insPoint->parent; // save parent ptr
// uncle = savep->right; // rtSibling is used here, when insPoint is root
ok = atomicCAS(&insPoint->parent->flag,false,true);//GIVING FALSE EVEN THOUGH ITS TRUE
printf("OK -- %d\n",ok);
if(ok){
ok = atomicCAS(&insPoint->parent->right->flag,false,true);
// if(ok){
// ok = atomicCAS(&insPoint->parent,savep,savep) && atomicCAS(&savep->right,uncle,uncle);
// }
if(!ok){ //back off
insPoint->parent->flag = false;
insPoint->parent->right->flag = false;
}else{
insPoint->parent->flag = false;
}//end if
}
}else{// uncle is left child
// savep = insPoint->parent; // save parent ptr
// uncle = savep->left;
ok = atomicCAS(&insPoint->parent->flag,false,true);
if(ok){
ok = atomicCAS(&insPoint->parent->left->flag,false,true);
// if(ok){
// ok = atomicCAS(&insPoint->parent,savep,savep) && atomicCAS(&savep->left,uncle,uncle);
// }
if(!ok){ //back off
insPoint->parent->flag = false;
insPoint->parent->left->flag = false;
}else{
insPoint->parent->flag = false;
}//end if
}
}//end if
if(!ok){
// This "!ok" is when u fail to capture the grandparent flag,
// u haven't caught any extra flags so just get rid of the flag of insPoint
insPoint->flag = false; // release flag
insPoint = NIL;
return Failure; //avoid deadlock
}
// When u have successfully captured all the required flags.
// i.e. parent, grandparent, uncle
if(newNode->key < insPoint->key){
//insert as left child
insPoint->left = newNode;
return Success;
}else{//insertas right child
insPoint->right = newNode;
return Success;
}
}
}
__device__ void Insert_Rebalance(struct par_rbNode *x){ //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//we hold flags on x, p(x), p(p(x)), and uncle(x)
struct par_rbNode *oldx;
struct par_rbNode *uncle, *olduncle;
// struct par_rbNode *savep, *savegp;
struct par_rbNode *brother;
struct par_rbNode *nephew;
bool ok;
bool updateSucceeds; //Update-Rotation successded?
//caseF is short for caseFlag (avoiding confusion between global enum and local variable)
enum caseFlag caseF = NOOP; // initially not doing any case
//define uncle for first iteration
if(x->parent == x->parent->parent->left){
uncle = x->parent->parent->right;
}else{ // uncle is the left child not right
uncle = x->parent->parent->left;
}
while((x != root) && (x->parent->color == RED)){
//do color-update and/or rotaion as required
do{
updateSucceeds = Update_Rotation(x,&caseF);
}while(!updateSucceeds);
//CASE 1: move to grandparent after color update
if(caseF == DID_CASE1){
oldx = x; //save pointer to the old x
olduncle = uncle; // save pointer to old uncle;
x = x->parent->parent; // up to grandparent
do{ //find new uncle of x and get flags
if(x->parent == x->parent->parent->left){
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->right;
ok = atomicCAS(&x->parent->flag,false,true);
if(ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(ok){
ok = atomicCAS(&x->parent->parent->right->flag,false,true);
if(!ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->right->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}else{
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->left;
ok = atomicCAS(&x->parent->flag,false,true);
if(ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(ok){
ok = atomicCAS(&x->parent->parent->left->flag,false,true);
if(!ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->left->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}
}while(!ok); //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//Release old flags for CASE 1
oldx->parent->flag = false;
olduncle->flag = false;
oldx->flag = false;
}
//in CASE 3 loop will exit: parent will be BLACK
}
switch(caseF){
case NOOP: //In the beginning of this function we had
//x,p(x),p(p(x)),uncle(x) - release them
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE1: //Release the last set of flags acquired
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE3: //release flags on ROTATED x, etc
if(x == x->parent->left){
brother = x->parent->right;
nephew = x->parent->right->right;
}else{
brother = x->parent->left;
nephew = x->parent->left->left;
}
x->parent->flag = false;
brother->flag = false;
nephew->flag = false;
x->flag = false;
break;
}
root->color = BLACK;
}
__device__ bool Update_Rotation(struct par_rbNode *x, enum caseFlag *caseF){
//we hold flags on x, p(x), p(p(x)) and uncle(x)
struct par_rbNode *xUncle;
struct par_rbNode *oldx; //*ggp; // ggp -> greatgrandparent
bool ok;
if(x->parent == x->parent->parent->left){
//the parent is a left child
xUncle = x->parent->parent->right;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true; // This true is for "updateSucceeds"
}else{ // rotation(s) will be needed
if(x == x->parent->right){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Left_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false; //This false is for "updateSucceeds"
}
}
//In CASE 3, if the right-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(!ok); //KEEPS TRYING, DOESN'T BACK OFF
ok = Right_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false; //This false is for "updateSucceeds"
}else{
x->parent->color = BLACK;
x->parent->right->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false; //remove the ggp flag as rotation was successful
return true;
}
}
//symmetric to above code
}else{
//the parent is a right child
xUncle = x->parent->parent->left;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true;
}else{ // rotation(s) will be needed
if(x == x->parent->left){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Right_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false;
}
}
//In CASE 3, if the left-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(!ok);
ok = Left_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false;
}else{
x->parent->color = BLACK;
x->parent->left->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false;
return true;
}
}
}
}
//A rotation will always be successful(true), as u can reach the rotate command
//only after u have cptured all the requried flags
__device__ bool Left_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->right (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
// ok = CAS3(z->right,zrl,z->right,
// z->right,z,zrl->parent,
// zrl,zrl,z->right->left);
//update other links
root = zr;
rtParent->left = root;
root->parent = rtParent;
z->parent = root;
root->left = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->left){
//z is left child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->left = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}else{
// z is right child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->right = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}
}
return true;
}
//symmetric to Left_rotate
__device__ bool Right_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->left (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
// ok = CAS3(z->left,zrl,z->left,
// z->left,z,zrl->parent,
// zrl,zrl,z->left->right);
//update other links
root = zr;
rtParent->right = root;
root->parent = rtParent;
z->parent = root;
root->right = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->right){
//z is right child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->right = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}else{
// z is left child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->left = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}
}
return true;
}
__device__ void Insert(int key){
struct par_rbNode *newNode = createNode(key); //Internally the flag of the newNode is held
struct par_rbNode *insertPoint;
// Create and initialize the new node
enum result res = Failure;
//insert the new node
do{
//Traverse tree to find insertion point
insertPoint = Traverse(newNode,key);
if(insertPoint != NULL){
//add new node to tree
printf("Placing Node\n");
res = PlaceNode(newNode);
printf("res = %d\n",res);
// res is short for result (avoiding confusion b/w global enum and local variable)
if(res == Success){
printf("rebalance\n");
//node was added succcessfully so make
//tree red-black again by doing the
//necessary color updates and rotations
Insert_Rebalance(newNode);
}
}else{
printf("Key Exists\n");
res = Success;
break;
}
}while(res == Failure);
}
//Functions for printing the tree
__device__ void printPreorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* then recur on left child */
printPreorder(node->left);
/* now recur on right child */
printPreorder(node->right);
}
__device__ void printInorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printInorder(node->left);
/* then print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* now recur on right child */
printInorder(node->right);
}
__device__ void printPostorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printPostorder(node->left);
/* then recur on right child */
printPostorder(node->right);
/* now print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
}
__device__ int threadsFinished = 0;
__device__ int passCreate = 0;
__global__ void RBT(struct par_rbNode *d_nodes) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
int threadCount = gridDim.x*blockDim.x;
if(id == 0){
printf("Starting the Tree\n");
nodes = d_nodes; // Make it a global variable
createNIL();
createTree();
atomicAdd(&passCreate,1);
}
Insert(2);
Insert(1);
// while(1){
// if(passCreate){
// printf("Root Flag %d\n",root->flag);
// Insert(1);
// break;
// }
// }
// //Print the time
// //This will keep track of number of threads that are done
atomicAdd(&threadsFinished,1);
// // //Print the tree after all the threads are done
if(threadsFinished == threadCount){
if(id == 0){
printf("PreOrder: ");
printPreorder(root);
printf("\n");
printf("\n");
printf("InOrder: ");
printInorder(root);
printf("\n");
printf("\n");
printf("PostOrder: ");
printPostorder(root);
printf("\n");
printf("\n");
printf("Done\n");
}
}
//return to main
}
int main() {
struct par_rbNode h_nodes[M];
struct par_rbNode *d_nodes;
float time;
// 1. Allocate device array.
hipMalloc(&d_nodes, M * sizeof(struct par_rbNode));
for(int i=0;i<M;i++){
h_nodes[i].flag = false;
h_nodes[i].color = RED;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// 2. Copy array contents from host to device.
hipMemcpy(d_nodes, h_nodes, M * sizeof(struct par_rbNode), hipMemcpyHostToDevice);
printf("Kernel Launched\n");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( RBT), dim3(1),dim3(1), 0, 0, d_nodes);
hipMemcpy(h_nodes, d_nodes, M * sizeof(struct par_rbNode), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
printf("Came back\n");
hipEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
} | 68e57b87469478abd7a33521e587bb9e7fc945a5.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define M 20
// RED = 0, BLACK = 1
enum nodeColor {
RED,
BLACK
};
enum result {
Failure,
Success,
FirstInsert
};
enum caseFlag {
NOOP,
DID_CASE1,
DID_CASE3
};
struct par_rbNode {
int key, color;
int flag;
struct par_rbNode *left, *right, *parent;
};
// /*Function prototypes */
__device__ void createNIL();
__device__ struct par_rbNode * createNode(int);
__device__ void createTree();
__device__ struct par_rbNode * Traverse(struct par_rbNode *,int);
__device__ enum result PlaceNode(struct par_rbNode *);
__device__ void Insert_Rebalance(struct par_rbNode *);
__device__ bool Update_Rotation(struct par_rbNode *, enum caseFlag *);
__device__ bool Left_Rotate(struct par_rbNode *);
__device__ bool Right_Rotate(struct par_rbNode *);
__device__ struct par_rbNode *nodes;
__device__ struct par_rbNode *root;
__device__ struct par_rbNode *NIL;
__device__ struct par_rbNode *rtParent;
__device__ struct par_rbNode *rtSibling; // U might feel this is unncessary, but it will be used
__device__ int nodeIndex = 0;
__device__ int tmpIndex = 2;
__device__ struct par_rbNode *tmp[M];// need M tmps
__device__ int createFlag = false;
__device__ void createNIL(){
NIL = &nodes[0];
NIL->color = BLACK;
NIL->key = -1;
NIL->left = NIL->right = NIL->parent = NIL;
printf("NIL created\n");
}
__device__ struct par_rbNode * createNode(int key){
bool ok;
do{
ok = atomicCAS(&createFlag,false,true); //Capture the lock
}while(!ok);
atomicAdd(&nodeIndex,1);
atomicAdd(&tmpIndex,1);
nodes[nodeIndex].key = key;
nodes[nodeIndex].flag = true;
nodes[nodeIndex].left = nodes[nodeIndex].right = nodes[nodeIndex].parent = NIL;
tmp[tmpIndex] = &nodes[nodeIndex];
createFlag = false;
// atomicCAS(&createFlag,true,false); //Release the lock
printf("Created %d\n",key);
return tmp[tmpIndex]; // Even if this thread pauses it will eventually return the correct pointer
}
__device__ void createTree(){
rtParent = createNode(-1);
rtSibling = createNode(-1);
// NIL = createNode(-1);
root = NIL;
rtParent->parent = NIL;
rtSibling->parent = rtParent;
rtSibling->right = NIL;
rtSibling->left = NIL;
rtParent->left = root;
//rtParent->left = root; Why only left, y not right?
//ANS: Since we check for left parent condition first
//(if u don't understand, try to insert a node to a tree with only one node)
rtParent->right = rtSibling;
rtParent->flag = false;
rtSibling->flag = false;
rtParent->color = BLACK;
rtSibling->color = BLACK;
// NIL->left = NIL;
// NIL->right = NIL;
NIL->parent = rtParent;
NIL->flag = false;
// NIL->color = BLACK;
printf("Tree Created \n");
printf("\n");
}
__device__ struct par_rbNode * Traverse(struct par_rbNode *newNode,int key){
struct par_rbNode *x;
// struct par_rbNode *inertPoint;
// struct par_rbNode *savert;
bool success;
bool ok;
// do{
// savert = root;
// success = DCAS(&root->flag,false,true,&root,savert,savert); //Catching the flag of the root
// }while(!success);
//An alternate for DCAS - should check if it works or not
// do{
// savert = root;
// success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
// }while(savert!=root || !success);
do{
// savert = root;
success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
}while(!success);
//success => captured the root flag
//savert != root => root has changed
//!success => root is under lock
//thread will come out of the loop only after "success" and "savert==root"
x = root;
if(x != NIL){
while(x != NIL){
struct par_rbNode *y = x->left->parent;
printf("Inside while\n");
if(key == x->key) {
x->flag = false; // Release the flag that was just caught
return NULL; // Traversing is done. Node is already there so Insert() fails.
}
printf("New Key\n");
if(key < x->key){
if(x->left != NIL){
ok = atomicCAS(&x->left->flag,false,true);
if(!ok){
x->flag = false; // Release the flag of x
return NULL;
}//end if
x->left->parent = y->left;
x->flag = false;
x = x->left;
}else{
x->left->parent = y->left;
x = x->left;
if(x == NIL){
newNode->parent = x->parent;
return x->parent;
}
}//end if
}else{
if(x->right != NIL){
ok = atomicCAS(&x->right->flag,false,true);
if(!ok){
x->flag = false;
return NULL;
}//end if
x->right->parent = y->right;
x->flag = false;
x = x->right;
}else{
x->right->parent = y->right;
x = x->right;
if(x == NIL){
newNode->parent = x->parent;
return x->parent;
}
}//end if
}//end if
}//end while
// return x->parent;
}else{
return NIL;
}
}
__device__ enum result PlaceNode(struct par_rbNode *newNode){
//flags on newNode and insPoint are held
bool ok = true;
// struct par_rbNode *uncle,*savep;
if(newNode->parent == NIL){ //tree is empty
newNode->color = BLACK;
newNode->parent = rtParent;
rtParent->left = newNode;
root=newNode;
NIL->flag = false; // release NIL node, that u caught during Traverse
newNode->flag = false;
return FirstInsert;
}else{ // the tree is not empty so...
// newNode->parent = insPoint;
//set the flags of the grandparent and uncle
struct par_rbNode *insPoint = newNode->parent;
printf("Insert Key %d\n",insPoint->key);
if(insPoint == insPoint->parent->left){ //uncle is right child
// savep = insPoint->parent; // save parent ptr
// uncle = savep->right; // rtSibling is used here, when insPoint is root
ok = atomicCAS(&insPoint->parent->flag,false,true);//GIVING FALSE EVEN THOUGH ITS TRUE
printf("OK -- %d\n",ok);
if(ok){
ok = atomicCAS(&insPoint->parent->right->flag,false,true);
// if(ok){
// ok = atomicCAS(&insPoint->parent,savep,savep) && atomicCAS(&savep->right,uncle,uncle);
// }
if(!ok){ //back off
insPoint->parent->flag = false;
insPoint->parent->right->flag = false;
}else{
insPoint->parent->flag = false;
}//end if
}
}else{// uncle is left child
// savep = insPoint->parent; // save parent ptr
// uncle = savep->left;
ok = atomicCAS(&insPoint->parent->flag,false,true);
if(ok){
ok = atomicCAS(&insPoint->parent->left->flag,false,true);
// if(ok){
// ok = atomicCAS(&insPoint->parent,savep,savep) && atomicCAS(&savep->left,uncle,uncle);
// }
if(!ok){ //back off
insPoint->parent->flag = false;
insPoint->parent->left->flag = false;
}else{
insPoint->parent->flag = false;
}//end if
}
}//end if
if(!ok){
// This "!ok" is when u fail to capture the grandparent flag,
// u haven't caught any extra flags so just get rid of the flag of insPoint
insPoint->flag = false; // release flag
insPoint = NIL;
return Failure; //avoid deadlock
}
// When u have successfully captured all the required flags.
// i.e. parent, grandparent, uncle
if(newNode->key < insPoint->key){
//insert as left child
insPoint->left = newNode;
return Success;
}else{//insertas right child
insPoint->right = newNode;
return Success;
}
}
}
__device__ void Insert_Rebalance(struct par_rbNode *x){ //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//we hold flags on x, p(x), p(p(x)), and uncle(x)
struct par_rbNode *oldx;
struct par_rbNode *uncle, *olduncle;
// struct par_rbNode *savep, *savegp;
struct par_rbNode *brother;
struct par_rbNode *nephew;
bool ok;
bool updateSucceeds; //Update-Rotation successded?
//caseF is short for caseFlag (avoiding confusion between global enum and local variable)
enum caseFlag caseF = NOOP; // initially not doing any case
//define uncle for first iteration
if(x->parent == x->parent->parent->left){
uncle = x->parent->parent->right;
}else{ // uncle is the left child not right
uncle = x->parent->parent->left;
}
while((x != root) && (x->parent->color == RED)){
//do color-update and/or rotaion as required
do{
updateSucceeds = Update_Rotation(x,&caseF);
}while(!updateSucceeds);
//CASE 1: move to grandparent after color update
if(caseF == DID_CASE1){
oldx = x; //save pointer to the old x
olduncle = uncle; // save pointer to old uncle;
x = x->parent->parent; // up to grandparent
do{ //find new uncle of x and get flags
if(x->parent == x->parent->parent->left){
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->right;
ok = atomicCAS(&x->parent->flag,false,true);
if(ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(ok){
ok = atomicCAS(&x->parent->parent->right->flag,false,true);
if(!ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->right->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}else{
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->left;
ok = atomicCAS(&x->parent->flag,false,true);
if(ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(ok){
ok = atomicCAS(&x->parent->parent->left->flag,false,true);
if(!ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->left->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}
}while(!ok); //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//Release old flags for CASE 1
oldx->parent->flag = false;
olduncle->flag = false;
oldx->flag = false;
}
//in CASE 3 loop will exit: parent will be BLACK
}
switch(caseF){
case NOOP: //In the beginning of this function we had
//x,p(x),p(p(x)),uncle(x) - release them
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE1: //Release the last set of flags acquired
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE3: //release flags on ROTATED x, etc
if(x == x->parent->left){
brother = x->parent->right;
nephew = x->parent->right->right;
}else{
brother = x->parent->left;
nephew = x->parent->left->left;
}
x->parent->flag = false;
brother->flag = false;
nephew->flag = false;
x->flag = false;
break;
}
root->color = BLACK;
}
__device__ bool Update_Rotation(struct par_rbNode *x, enum caseFlag *caseF){
//we hold flags on x, p(x), p(p(x)) and uncle(x)
struct par_rbNode *xUncle;
struct par_rbNode *oldx; //*ggp; // ggp -> greatgrandparent
bool ok;
if(x->parent == x->parent->parent->left){
//the parent is a left child
xUncle = x->parent->parent->right;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true; // This true is for "updateSucceeds"
}else{ // rotation(s) will be needed
if(x == x->parent->right){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Left_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false; //This false is for "updateSucceeds"
}
}
//In CASE 3, if the right-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(!ok); //KEEPS TRYING, DOESN'T BACK OFF
ok = Right_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false; //This false is for "updateSucceeds"
}else{
x->parent->color = BLACK;
x->parent->right->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false; //remove the ggp flag as rotation was successful
return true;
}
}
//symmetric to above code
}else{
//the parent is a right child
xUncle = x->parent->parent->left;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true;
}else{ // rotation(s) will be needed
if(x == x->parent->left){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Right_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false;
}
}
//In CASE 3, if the left-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(!ok);
ok = Left_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false;
}else{
x->parent->color = BLACK;
x->parent->left->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false;
return true;
}
}
}
}
//A rotation will always be successful(true), as u can reach the rotate command
//only after u have cptured all the requried flags
__device__ bool Left_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->right (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
// ok = CAS3(z->right,zrl,z->right,
// z->right,z,zrl->parent,
// zrl,zrl,z->right->left);
//update other links
root = zr;
rtParent->left = root;
root->parent = rtParent;
z->parent = root;
root->left = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->left){
//z is left child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->left = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}else{
// z is right child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->right = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}
}
return true;
}
//symmetric to Left_rotate
__device__ bool Right_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->left (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
// ok = CAS3(z->left,zrl,z->left,
// z->left,z,zrl->parent,
// zrl,zrl,z->left->right);
//update other links
root = zr;
rtParent->right = root;
root->parent = rtParent;
z->parent = root;
root->right = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->right){
//z is right child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->right = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}else{
// z is left child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->left = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}
}
return true;
}
__device__ void Insert(int key){
struct par_rbNode *newNode = createNode(key); //Internally the flag of the newNode is held
struct par_rbNode *insertPoint;
// Create and initialize the new node
enum result res = Failure;
//insert the new node
do{
//Traverse tree to find insertion point
insertPoint = Traverse(newNode,key);
if(insertPoint != NULL){
//add new node to tree
printf("Placing Node\n");
res = PlaceNode(newNode);
printf("res = %d\n",res);
// res is short for result (avoiding confusion b/w global enum and local variable)
if(res == Success){
printf("rebalance\n");
//node was added succcessfully so make
//tree red-black again by doing the
//necessary color updates and rotations
Insert_Rebalance(newNode);
}
}else{
printf("Key Exists\n");
res = Success;
break;
}
}while(res == Failure);
}
//Functions for printing the tree
__device__ void printPreorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* then recur on left child */
printPreorder(node->left);
/* now recur on right child */
printPreorder(node->right);
}
__device__ void printInorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printInorder(node->left);
/* then print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* now recur on right child */
printInorder(node->right);
}
__device__ void printPostorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printPostorder(node->left);
/* then recur on right child */
printPostorder(node->right);
/* now print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
}
__device__ int threadsFinished = 0;
__device__ int passCreate = 0;
__global__ void RBT(struct par_rbNode *d_nodes) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
int threadCount = gridDim.x*blockDim.x;
if(id == 0){
printf("Starting the Tree\n");
nodes = d_nodes; // Make it a global variable
createNIL();
createTree();
atomicAdd(&passCreate,1);
}
Insert(2);
Insert(1);
// while(1){
// if(passCreate){
// printf("Root Flag %d\n",root->flag);
// Insert(1);
// break;
// }
// }
// //Print the time
// //This will keep track of number of threads that are done
atomicAdd(&threadsFinished,1);
// // //Print the tree after all the threads are done
if(threadsFinished == threadCount){
if(id == 0){
printf("PreOrder: ");
printPreorder(root);
printf("\n");
printf("\n");
printf("InOrder: ");
printInorder(root);
printf("\n");
printf("\n");
printf("PostOrder: ");
printPostorder(root);
printf("\n");
printf("\n");
printf("Done\n");
}
}
//return to main
}
int main() {
struct par_rbNode h_nodes[M];
struct par_rbNode *d_nodes;
float time;
// 1. Allocate device array.
cudaMalloc(&d_nodes, M * sizeof(struct par_rbNode));
for(int i=0;i<M;i++){
h_nodes[i].flag = false;
h_nodes[i].color = RED;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// 2. Copy array contents from host to device.
cudaMemcpy(d_nodes, h_nodes, M * sizeof(struct par_rbNode), cudaMemcpyHostToDevice);
printf("Kernel Launched\n");
cudaEventRecord(start, 0);
RBT<<<1,1>>>(d_nodes);
cudaMemcpy(h_nodes, d_nodes, M * sizeof(struct par_rbNode), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
printf("Came back\n");
cudaEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
} |
ce04eb84b381dc406ac112187a7a0861d9f81389.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/opencv.hpp>
#include <thrust/window_2d.h>
#include <thrust/window_transform.h>
#include <thrust/window_for_each.h>
using namespace cv;
class pyrupTransformFunctor : public thrust::shared_window_for_each_functor<uchar>
{
public:
thrust::block_2d<uchar> *inBlock;
pyrupTransformFunctor(thrust::block_2d<uchar> * inBlock)
{
this->inBlock = inBlock->device_pointer;
}
__device__ void operator() (const thrust::window_2d<uchar> &outputWindow) const
{
int x_in, y_in;
if(outputWindow.start_x%2 && outputWindow.start_y%2)
{
x_in = outputWindow.start_x/2;
y_in = outputWindow.start_y/2;
outputWindow[0][0]=(*inBlock)[y_in][x_in];
}
}
};
int main(int argc, char const *argv[])
{
hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop,0);
Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat image;
int dim = 512;
if(argc ==2)
{
dim = atoi(argv[1]);
}
resize(small,image,Size(dim,dim));
thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows);
thrust::block_2d<uchar> intermediate_image_block (image.cols*2,image.rows*2);
thrust::block_2d<uchar> outBlock (image.cols*2,image.rows*2,0.0f);
thrust::window_vector<uchar> output_wv(&outBlock,1,1,1,1);
uchar * img = (uchar * )malloc(sizeof(uchar)*(uchar_image_block.end()-uchar_image_block.begin()));
uchar * img_out = (uchar * )malloc(sizeof(uchar)*(outBlock.end()-outBlock.begin()));
for(int i = 0; i<image.cols*image.rows;i++)
{
img[i]=(uchar)image.ptr()[i];
}
uchar_image_block.upload(img);
thrust::window_vector<uchar> inputVector(&intermediate_image_block,1,1,1,1);
pyrupTransformFunctor ptf(&uchar_image_block);
thrust::for_each(inputVector.begin(),inputVector.end(),ptf);
hipDeviceSynchronize();
float kernel[5] = {0.0625*2,0.25*2,0.375*2,0.25*2,0.0625*2};
thrust::convolve(thrust::hip::shared,&intermediate_image_block,kernel,kernel,5,&outBlock);
outBlock.download(&img_out);
Mat output (Size(image.cols*2,image.rows*2),CV_8UC1,img_out);
#ifdef OWRITE
imwrite("input.png",image);
imwrite("pyrup.png",output);
#endif
#ifdef SHOW
imshow("input.png",image);
imshow("pyrup.png",output);
waitKey(0);
#endif
return 0;
}
| ce04eb84b381dc406ac112187a7a0861d9f81389.cu | #include <opencv2/opencv.hpp>
#include <thrust/window_2d.h>
#include <thrust/window_transform.h>
#include <thrust/window_for_each.h>
using namespace cv;
class pyrupTransformFunctor : public thrust::shared_window_for_each_functor<uchar>
{
public:
thrust::block_2d<uchar> *inBlock;
pyrupTransformFunctor(thrust::block_2d<uchar> * inBlock)
{
this->inBlock = inBlock->device_pointer;
}
__device__ void operator() (const thrust::window_2d<uchar> &outputWindow) const
{
int x_in, y_in;
if(outputWindow.start_x%2 && outputWindow.start_y%2)
{
x_in = outputWindow.start_x/2;
y_in = outputWindow.start_y/2;
outputWindow[0][0]=(*inBlock)[y_in][x_in];
}
}
};
int main(int argc, char const *argv[])
{
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop,0);
Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat image;
int dim = 512;
if(argc ==2)
{
dim = atoi(argv[1]);
}
resize(small,image,Size(dim,dim));
thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows);
thrust::block_2d<uchar> intermediate_image_block (image.cols*2,image.rows*2);
thrust::block_2d<uchar> outBlock (image.cols*2,image.rows*2,0.0f);
thrust::window_vector<uchar> output_wv(&outBlock,1,1,1,1);
uchar * img = (uchar * )malloc(sizeof(uchar)*(uchar_image_block.end()-uchar_image_block.begin()));
uchar * img_out = (uchar * )malloc(sizeof(uchar)*(outBlock.end()-outBlock.begin()));
for(int i = 0; i<image.cols*image.rows;i++)
{
img[i]=(uchar)image.ptr()[i];
}
uchar_image_block.upload(img);
thrust::window_vector<uchar> inputVector(&intermediate_image_block,1,1,1,1);
pyrupTransformFunctor ptf(&uchar_image_block);
thrust::for_each(inputVector.begin(),inputVector.end(),ptf);
cudaDeviceSynchronize();
float kernel[5] = {0.0625*2,0.25*2,0.375*2,0.25*2,0.0625*2};
thrust::convolve(thrust::cuda::shared,&intermediate_image_block,kernel,kernel,5,&outBlock);
outBlock.download(&img_out);
Mat output (Size(image.cols*2,image.rows*2),CV_8UC1,img_out);
#ifdef OWRITE
imwrite("input.png",image);
imwrite("pyrup.png",output);
#endif
#ifdef SHOW
imshow("input.png",image);
imshow("pyrup.png",output);
waitKey(0);
#endif
return 0;
}
|
6a4441ec677aa941407b128a0438adc66ef22395.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
//kernel for GPU
__global__ void addKernel(int* c, const int* a, const int* b) {
int x = threadIdx.x;
int y = threadIdx.y;
int i = y * (blockDim.x) + x;
c[i] = a[i] + b[i];
}
//CPU
void add(const int x, const int y, const int WIDTH, int* c, const int* a, const int* b) {
int i = y * (WIDTH)+x;//cuda 2
c[i] = a[i] + b[i];
}
int main() {
//host-side
const int WIDTH = 5;
int a[WIDTH][WIDTH];
int b[WIDTH][WIDTH];
int c[WIDTH][WIDTH] = { 0 };
//make a,b matrices
for (int x = 0; x < WIDTH; x++) {
for (int y = 0; y < WIDTH; y++) {
a[x][y] = x * 10 + y;
b[x][y] = (x * 10 + y) * 100;
}
}
//device-side
int* dev_a = 0;
int* dev_b = 0;
int* dev_c = 0;
//allocate device memory
hipMalloc((void**)&dev_a, WIDTH * WIDTH*sizeof(int));
hipMalloc((void**)&dev_b, WIDTH * WIDTH * sizeof(int));
hipMalloc((void**)&dev_c, WIDTH * WIDTH * sizeof(int));
// copy from host to device
hipMemcpy(dev_a, a, WIDTH * WIDTH * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, WIDTH * WIDTH * sizeof(int), hipMemcpyHostToDevice);
//launch a kernel
dim3 dimBlock(WIDTH, WIDTH, 1);
dim3 dimGrid(1, 1, 1);
addKernel << <dimGrid, dimBlock >> > (dev_c, dev_a, dev_b);
//copy from device to host
hipMemcpy(c, dev_c, WIDTH * WIDTH * sizeof(int), hipMemcpyDeviceToHost);
//Free
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
//calculate
//for (int x = 0; x < WIDTH; x++) {
// for (int y = 0; y < WIDTH; y++) {
// add(x, y, WIDTH, (int*)(c), (int*)(a), (int*)(b));
// }
//}
//print
for (int x = 0; x < WIDTH; x++) {
for (int y = 0; y < WIDTH; y++) {
printf("%5d", c[x][y]);
}
printf("\n");
}
return 0;
} | 6a4441ec677aa941407b128a0438adc66ef22395.cu | #include <cstdio>
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
//kernel for GPU
__global__ void addKernel(int* c, const int* a, const int* b) {
int x = threadIdx.x;
int y = threadIdx.y;
int i = y * (blockDim.x) + x;
c[i] = a[i] + b[i];
}
//CPU
void add(const int x, const int y, const int WIDTH, int* c, const int* a, const int* b) {
int i = y * (WIDTH)+x;//cuda에서는 2차원 배열에 접근할 수 있는 방법이 없기 때문에 이렇게 접근
c[i] = a[i] + b[i];
}
int main() {
//host-side
const int WIDTH = 5;
int a[WIDTH][WIDTH];
int b[WIDTH][WIDTH];
int c[WIDTH][WIDTH] = { 0 };
//make a,b matrices
for (int x = 0; x < WIDTH; x++) {
for (int y = 0; y < WIDTH; y++) {
a[x][y] = x * 10 + y;
b[x][y] = (x * 10 + y) * 100;
}
}
//device-side
int* dev_a = 0;
int* dev_b = 0;
int* dev_c = 0;
//allocate device memory
cudaMalloc((void**)&dev_a, WIDTH * WIDTH*sizeof(int));
cudaMalloc((void**)&dev_b, WIDTH * WIDTH * sizeof(int));
cudaMalloc((void**)&dev_c, WIDTH * WIDTH * sizeof(int));
// copy from host to device
cudaMemcpy(dev_a, a, WIDTH * WIDTH * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, WIDTH * WIDTH * sizeof(int), cudaMemcpyHostToDevice);
//launch a kernel
dim3 dimBlock(WIDTH, WIDTH, 1);
dim3 dimGrid(1, 1, 1);
addKernel << <dimGrid, dimBlock >> > (dev_c, dev_a, dev_b);
//copy from device to host
cudaMemcpy(c, dev_c, WIDTH * WIDTH * sizeof(int), cudaMemcpyDeviceToHost);
//Free
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
//calculate
//for (int x = 0; x < WIDTH; x++) {
// for (int y = 0; y < WIDTH; y++) {
// add(x, y, WIDTH, (int*)(c), (int*)(a), (int*)(b));
// }
//}
//print
for (int x = 0; x < WIDTH; x++) {
for (int y = 0; y < WIDTH; y++) {
printf("%5d", c[x][y]);
}
printf("\n");
}
return 0;
} |
e47df9856c92e38540aa267b6234cf28fc2e4ae8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include<sys/time.h>
__global__ void dkernel(unsigned *vector, unsigned vectorsize) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<vectorsize)
vector[id]++;
}
#define BLOCKSIZE 1024
int main(int nn, char *str[]) {
unsigned long long N=1024;
for(int j=1;j<1024;j+=10){
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
unsigned *vector, *hvector;
unsigned vec[N*j];
for (int i = 0; i < N*j; i++) {
vec[i] = i;
}
hipMalloc(&vector, N * sizeof(unsigned));
hipMemcpy(vector, vec, N * sizeof(unsigned), hipMemcpyHostToDevice);
hvector = (unsigned *)malloc(N * sizeof(unsigned));
unsigned nblocks = ceil((float)N / BLOCKSIZE);
// printf("nblocks = %d\n", nblocks);
hipLaunchKernelGGL(( dkernel), dim3(nblocks), dim3(BLOCKSIZE), 0, 0, vector, N);
hipMemcpy(hvector, vector, N * sizeof(unsigned), hipMemcpyDeviceToHost);
gettimeofday(&tv2, NULL);
printf ("%d\t%f\n",j,
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
}
return 0;
}
| e47df9856c92e38540aa267b6234cf28fc2e4ae8.cu | #include <stdio.h>
#include <cuda.h>
#include<sys/time.h>
__global__ void dkernel(unsigned *vector, unsigned vectorsize) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<vectorsize)
vector[id]++;
}
#define BLOCKSIZE 1024
int main(int nn, char *str[]) {
unsigned long long N=1024;
for(int j=1;j<1024;j+=10){
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
unsigned *vector, *hvector;
unsigned vec[N*j];
for (int i = 0; i < N*j; i++) {
vec[i] = i;
}
cudaMalloc(&vector, N * sizeof(unsigned));
cudaMemcpy(vector, vec, N * sizeof(unsigned), cudaMemcpyHostToDevice);
hvector = (unsigned *)malloc(N * sizeof(unsigned));
unsigned nblocks = ceil((float)N / BLOCKSIZE);
// printf("nblocks = %d\n", nblocks);
dkernel<<<nblocks, BLOCKSIZE>>>(vector, N);
cudaMemcpy(hvector, vector, N * sizeof(unsigned), cudaMemcpyDeviceToHost);
gettimeofday(&tv2, NULL);
printf ("%d\t%f\n",j,
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
}
return 0;
}
|
db8324eadef8a8c4dec3ef309d8a109ad1ba1027.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/**
* CPU version of our CUDA Hello World!
*/
void cpu_helloworld()
{
printf("Hello from the CPU!\n");
}
/**
* GPU version of our CUDA Hello World!
*/
__global__ void gpu_helloworld()
{
int threadId = threadIdx.x;
printf("Hello from the GPU! My threadId is %d\n", threadId);
}
int main(int argc, char **argv)
{
dim3 grid(1); // 1 block in the grid
dim3 block(32); // 32 threads per block
// Call the CPU version
cpu_helloworld();
// Call the GPU version
hipLaunchKernelGGL(( gpu_helloworld), dim3(grid), dim3(block), 0, 0, );
////////////////
// TO-DO #1.2 ////////////////////
// Introduce your changes here! //
// synchronize the threads
hipDeviceSynchronize();
//////////////////////////////////
return 0;
}
| db8324eadef8a8c4dec3ef309d8a109ad1ba1027.cu |
#include <stdio.h>
/**
* CPU version of our CUDA Hello World!
*/
void cpu_helloworld()
{
printf("Hello from the CPU!\n");
}
/**
* GPU version of our CUDA Hello World!
*/
__global__ void gpu_helloworld()
{
int threadId = threadIdx.x;
printf("Hello from the GPU! My threadId is %d\n", threadId);
}
int main(int argc, char **argv)
{
dim3 grid(1); // 1 block in the grid
dim3 block(32); // 32 threads per block
// Call the CPU version
cpu_helloworld();
// Call the GPU version
gpu_helloworld<<<grid, block>>>();
////////////////
// TO-DO #1.2 ////////////////////
// Introduce your changes here! //
// synchronize the threads
cudaDeviceSynchronize();
//////////////////////////////////
return 0;
}
|
05f6c8998f280db51e11be0c622fac7fd621e55e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <glew.h>
#include <freeglut.h>
#include <cudaDefs.h>
#include <imageManager.h>
// includes, cuda
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <helper_gl.h> // helper functions for CUDA/GL interop
#include "imageKernels.cuh"
#define BLOCK_DIM 8
//hipError_t error = hipSuccess;
//hipDeviceProp_t deviceProp = hipDeviceProp_t();
//CUDA variables
unsigned int imageWidth;
unsigned int imageHeight;
unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit
unsigned int imagePitch;
cudaGraphicsResource_t cuda_pbo_resource;
cudaGraphicsResource_t cuda_tex_resource;
texture<uchar4, 2, hipReadModeElementType> cuda_tex_ref;
hipChannelFormatDesc cuda_tex_channel_desc;
KernelSetting ks;
unsigned char some_value = 0;
//OpenGL
unsigned int pbo_id;
unsigned int texture_id;
unsigned int viewport_width = 1024;
unsigned int viewport_height = 1024;
#pragma region CUDA Routines
__global__ void apply_filter(const unsigned char some_value, const unsigned int pbo_width, const unsigned int pbo_height, unsigned char *pbo)
{
//TODO 9: Create a filter that replaces Red spectrum of RGBA pbo such that RED=someValue
//
auto col = (threadIdx.x + blockIdx.x * blockDim.x);
auto row = (threadIdx.y + blockIdx.y * blockDim.y);
const auto offset = col + row * pbo_width;
uchar4 texel = tex2D(cuda_tex_ref, col, row);
texel.x = some_value;
const auto uchar4_pbo = reinterpret_cast<uchar4*>(pbo);
uchar4_pbo[offset] = texel;
}
void cuda_worker()
{
hipArray* array;
//T ODO 3: Map cudaTexResource
hipGraphicsMapResources(1, &cuda_tex_resource, nullptr);
//T ODO 4: Get Mapped Array of cudaTexResource
hipGraphicsSubResourceGetMappedArray(&array, cuda_tex_resource, 0, 0);
//T ODO 5: Get cudaTexChannelDesc from previously obtained array
hipGetChannelDesc(&cuda_tex_channel_desc, array);
//T ODO 6: Binf cudaTexRef to array
hipBindTextureToArray(&cuda_tex_ref, array, &cuda_tex_channel_desc);
checkError();
unsigned char *pbo_data;
size_t pboSize;
//T ODO 7: Map cudaPBOResource
hipGraphicsMapResources(1, &cuda_pbo_resource, nullptr);
//T ODO 7: Map Mapped pointer to cudaPBOResource data
hipGraphicsResourceGetMappedPointer(reinterpret_cast<void**>(&pbo_data), &pboSize, cuda_pbo_resource);
checkError();
//T ODO 8: Set KernelSetting variable ks (dimBlock, dimGrid, etc.) such that block will have BLOCK_DIM x BLOCK_DIM threads
//...
ks.blockSize = BLOCK_DIM * BLOCK_DIM;
ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1);
ks.dimGrid = dim3((imageWidth + BLOCK_DIM - 1) / BLOCK_DIM, (imageHeight + BLOCK_DIM - 1) / BLOCK_DIM, 1);
//Calling applyFileter kernel
some_value++;
if (some_value>255) some_value = 0;
hipLaunchKernelGGL(( apply_filter), dim3(ks.dimGrid), dim3(ks.dimBlock), 0, 0, some_value, imageWidth, imageHeight, pbo_data);
//Following code release mapped resources, unbinds texture and ensures that PBO data will be coppied into OpenGL texture. Do not modify following code!
hipUnbindTexture(&cuda_tex_ref);
hipGraphicsUnmapResources(1, &cuda_pbo_resource, nullptr);
hipGraphicsUnmapResources(1, &cuda_tex_resource, nullptr);
glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pbo_id);
glBindTexture( GL_TEXTURE_2D, texture_id);
glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, imageWidth, imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory
}
void init_cud_atex()
{
hipGLSetGLDevice(0);
checkError();
//T ODO 1: Register OpenGL texture to CUDA resource
hipGraphicsGLRegisterImage(&cuda_tex_resource, texture_id, GL_TEXTURE_2D, hipGraphicsRegisterFlagsReadOnly);
//CUDA Texture settings
cuda_tex_ref.normalized = false; //Otherwise TRUE to access with normalized texture coordinates
cuda_tex_ref.filterMode = hipFilterModePoint; //Otherwise texRef.filterMode = hipFilterModeLinear; for Linear interpolation of texels
cuda_tex_ref.addressMode[0] = hipAddressModeClamp; //No repeat texture pattern
cuda_tex_ref.addressMode[1] = hipAddressModeClamp; //No repeat texture pattern
checkError();
//T ODO 2: Register PBO to CUDA resource
hipGraphicsGLRegisterBuffer(&cuda_pbo_resource, pbo_id, hipGraphicsRegisterFlagsWriteDiscard);
checkError();
}
void release_cuda()
{
hipGraphicsUnregisterResource(cuda_pbo_resource);
hipGraphicsUnregisterResource(cuda_tex_resource);
}
#pragma endregion
#pragma region OpenGL Routines - DO NOT MODIFY THIS SECTION !!!
void load_texture(const char* image_file_name)
{
FreeImage_Initialise();
const auto tmp = ImageManager::GenericLoader(image_file_name, 0);
imageWidth = FreeImage_GetWidth(tmp);
imageHeight = FreeImage_GetHeight(tmp);
imageBPP = FreeImage_GetBPP(tmp);
imagePitch = FreeImage_GetPitch(tmp);
//OpenGL Texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1,&texture_id);
glBindTexture( GL_TEXTURE_2D, texture_id);
//WARNING: Just some of inner format are supported by CUDA!!!
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, FreeImage_GetBits(tmp));
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
FreeImage_Unload(tmp);
}
void prepare_pbo()
{
glGenBuffers(1, &pbo_id);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo_id); // Make this the current UNPACK buffer (OpenGL is state-based)
glBufferData(GL_PIXEL_UNPACK_BUFFER, imageWidth * imageHeight * 4, nullptr, GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image
}
void my_display()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture_id);
//I know this is a very old OpenGL, but we want to practice CUDA :-)
//Now it will be a wasted time to learn you current features of OpenGL. Sorry for that however, you can visit my second seminar dealing with Computer Graphics (CG2).
glBegin(GL_QUADS);
glTexCoord2d(0,0); glVertex2d(0,0);
glTexCoord2d(1,0); glVertex2d(viewport_width, 0);
glTexCoord2d(1,1); glVertex2d(viewport_width, viewport_height);
glTexCoord2d(0,1); glVertex2d(0, viewport_height);
glEnd();
glDisable(GL_TEXTURE_2D);
glFlush();
glutSwapBuffers();
}
void my_resize(GLsizei w, GLsizei h)
{
viewport_width=w;
viewport_height=h;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0,0,viewport_width,viewport_height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0,viewport_width, 0,viewport_height);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glutPostRedisplay();
}
void my_idle()
{
cuda_worker();
glutPostRedisplay();
}
void init_gl(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(viewport_width,viewport_height);
glutInitWindowPosition(0,0);
glutCreateWindow(":-)");
glutDisplayFunc(my_display);
glutReshapeFunc(my_resize);
glutIdleFunc(my_idle);
glutSetCursor(GLUT_CURSOR_CROSSHAIR);
// initialize necessary OpenGL extensions
glewInit();
glClearColor(0.0, 0.0, 0.0, 1.0);
glShadeModel(GL_SMOOTH);
glViewport(0,0,viewport_width,viewport_height);
glFlush();
}
void release_open_gl()
{
if (texture_id > 0)
glDeleteTextures(1, &texture_id);
if (pbo_id > 0)
glDeleteBuffers(1, &pbo_id);
}
#pragma endregion
void release_resources()
{
release_cuda();
release_open_gl();
}
void exercise7(int argc, char *argv[])
{
//initializeCUDA(deviceProp);
init_gl(argc, argv);
load_texture("lena.png");
prepare_pbo();
init_cud_atex();
//start rendering mainloop
glutMainLoop();
atexit(release_resources);
}
| 05f6c8998f280db51e11be0c622fac7fd621e55e.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <glew.h>
#include <freeglut.h>
#include <cudaDefs.h>
#include <imageManager.h>
// includes, cuda
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <helper_gl.h> // helper functions for CUDA/GL interop
#include "imageKernels.cuh"
#define BLOCK_DIM 8
//cudaError_t error = cudaSuccess;
//cudaDeviceProp deviceProp = cudaDeviceProp();
//CUDA variables
unsigned int imageWidth;
unsigned int imageHeight;
unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit
unsigned int imagePitch;
cudaGraphicsResource_t cuda_pbo_resource;
cudaGraphicsResource_t cuda_tex_resource;
texture<uchar4, 2, cudaReadModeElementType> cuda_tex_ref;
cudaChannelFormatDesc cuda_tex_channel_desc;
KernelSetting ks;
unsigned char some_value = 0;
//OpenGL
unsigned int pbo_id;
unsigned int texture_id;
unsigned int viewport_width = 1024;
unsigned int viewport_height = 1024;
#pragma region CUDA Routines
__global__ void apply_filter(const unsigned char some_value, const unsigned int pbo_width, const unsigned int pbo_height, unsigned char *pbo)
{
//TODO 9: Create a filter that replaces Red spectrum of RGBA pbo such that RED=someValue
//
auto col = (threadIdx.x + blockIdx.x * blockDim.x);
auto row = (threadIdx.y + blockIdx.y * blockDim.y);
const auto offset = col + row * pbo_width;
uchar4 texel = tex2D(cuda_tex_ref, col, row);
texel.x = some_value;
const auto uchar4_pbo = reinterpret_cast<uchar4*>(pbo);
uchar4_pbo[offset] = texel;
}
void cuda_worker()
{
cudaArray* array;
//T ODO 3: Map cudaTexResource
cudaGraphicsMapResources(1, &cuda_tex_resource, nullptr);
//T ODO 4: Get Mapped Array of cudaTexResource
cudaGraphicsSubResourceGetMappedArray(&array, cuda_tex_resource, 0, 0);
//T ODO 5: Get cudaTexChannelDesc from previously obtained array
cudaGetChannelDesc(&cuda_tex_channel_desc, array);
//T ODO 6: Binf cudaTexRef to array
cudaBindTextureToArray(&cuda_tex_ref, array, &cuda_tex_channel_desc);
checkError();
unsigned char *pbo_data;
size_t pboSize;
//T ODO 7: Map cudaPBOResource
cudaGraphicsMapResources(1, &cuda_pbo_resource, nullptr);
//T ODO 7: Map Mapped pointer to cudaPBOResource data
cudaGraphicsResourceGetMappedPointer(reinterpret_cast<void**>(&pbo_data), &pboSize, cuda_pbo_resource);
checkError();
//T ODO 8: Set KernelSetting variable ks (dimBlock, dimGrid, etc.) such that block will have BLOCK_DIM x BLOCK_DIM threads
//...
ks.blockSize = BLOCK_DIM * BLOCK_DIM;
ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1);
ks.dimGrid = dim3((imageWidth + BLOCK_DIM - 1) / BLOCK_DIM, (imageHeight + BLOCK_DIM - 1) / BLOCK_DIM, 1);
//Calling applyFileter kernel
some_value++;
if (some_value>255) some_value = 0;
apply_filter<<<ks.dimGrid, ks.dimBlock>>>(some_value, imageWidth, imageHeight, pbo_data);
//Following code release mapped resources, unbinds texture and ensures that PBO data will be coppied into OpenGL texture. Do not modify following code!
cudaUnbindTexture(&cuda_tex_ref);
cudaGraphicsUnmapResources(1, &cuda_pbo_resource, nullptr);
cudaGraphicsUnmapResources(1, &cuda_tex_resource, nullptr);
glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pbo_id);
glBindTexture( GL_TEXTURE_2D, texture_id);
glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, imageWidth, imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory
}
void init_cud_atex()
{
cudaGLSetGLDevice(0);
checkError();
//T ODO 1: Register OpenGL texture to CUDA resource
cudaGraphicsGLRegisterImage(&cuda_tex_resource, texture_id, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsReadOnly);
//CUDA Texture settings
cuda_tex_ref.normalized = false; //Otherwise TRUE to access with normalized texture coordinates
cuda_tex_ref.filterMode = cudaFilterModePoint; //Otherwise texRef.filterMode = cudaFilterModeLinear; for Linear interpolation of texels
cuda_tex_ref.addressMode[0] = cudaAddressModeClamp; //No repeat texture pattern
cuda_tex_ref.addressMode[1] = cudaAddressModeClamp; //No repeat texture pattern
checkError();
//T ODO 2: Register PBO to CUDA resource
cudaGraphicsGLRegisterBuffer(&cuda_pbo_resource, pbo_id, cudaGraphicsRegisterFlagsWriteDiscard);
checkError();
}
void release_cuda()
{
cudaGraphicsUnregisterResource(cuda_pbo_resource);
cudaGraphicsUnregisterResource(cuda_tex_resource);
}
#pragma endregion
#pragma region OpenGL Routines - DO NOT MODIFY THIS SECTION !!!
void load_texture(const char* image_file_name)
{
FreeImage_Initialise();
const auto tmp = ImageManager::GenericLoader(image_file_name, 0);
imageWidth = FreeImage_GetWidth(tmp);
imageHeight = FreeImage_GetHeight(tmp);
imageBPP = FreeImage_GetBPP(tmp);
imagePitch = FreeImage_GetPitch(tmp);
//OpenGL Texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1,&texture_id);
glBindTexture( GL_TEXTURE_2D, texture_id);
//WARNING: Just some of inner format are supported by CUDA!!!
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, FreeImage_GetBits(tmp));
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
FreeImage_Unload(tmp);
}
void prepare_pbo()
{
glGenBuffers(1, &pbo_id);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo_id); // Make this the current UNPACK buffer (OpenGL is state-based)
glBufferData(GL_PIXEL_UNPACK_BUFFER, imageWidth * imageHeight * 4, nullptr, GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image
}
void my_display()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture_id);
//I know this is a very old OpenGL, but we want to practice CUDA :-)
//Now it will be a wasted time to learn you current features of OpenGL. Sorry for that however, you can visit my second seminar dealing with Computer Graphics (CG2).
glBegin(GL_QUADS);
glTexCoord2d(0,0); glVertex2d(0,0);
glTexCoord2d(1,0); glVertex2d(viewport_width, 0);
glTexCoord2d(1,1); glVertex2d(viewport_width, viewport_height);
glTexCoord2d(0,1); glVertex2d(0, viewport_height);
glEnd();
glDisable(GL_TEXTURE_2D);
glFlush();
glutSwapBuffers();
}
void my_resize(GLsizei w, GLsizei h)
{
viewport_width=w;
viewport_height=h;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0,0,viewport_width,viewport_height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0,viewport_width, 0,viewport_height);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glutPostRedisplay();
}
void my_idle()
{
cuda_worker();
glutPostRedisplay();
}
void init_gl(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(viewport_width,viewport_height);
glutInitWindowPosition(0,0);
glutCreateWindow(":-)");
glutDisplayFunc(my_display);
glutReshapeFunc(my_resize);
glutIdleFunc(my_idle);
glutSetCursor(GLUT_CURSOR_CROSSHAIR);
// initialize necessary OpenGL extensions
glewInit();
glClearColor(0.0, 0.0, 0.0, 1.0);
glShadeModel(GL_SMOOTH);
glViewport(0,0,viewport_width,viewport_height);
glFlush();
}
void release_open_gl()
{
if (texture_id > 0)
glDeleteTextures(1, &texture_id);
if (pbo_id > 0)
glDeleteBuffers(1, &pbo_id);
}
#pragma endregion
void release_resources()
{
release_cuda();
release_open_gl();
}
void exercise7(int argc, char *argv[])
{
//initializeCUDA(deviceProp);
init_gl(argc, argv);
load_texture("lena.png");
prepare_pbo();
init_cud_atex();
//start rendering mainloop
glutMainLoop();
atexit(release_resources);
}
|
1fbebbba76b9d91c1bf55fc828a88bc616293f5c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include <iostream>
#include <string>
#include <nvmatrix.cuh>
#include <nvmatrix_operators.cuh>
#include <matrix.h>
#include <convnet.cuh>
#include <util.cuh>
using namespace std;
#define cutGetMaxGflopsDeviceId() gpuGetMaxGflopsDeviceId()
/*
* =======================
* ConvNet
* =======================
*/
ConvNet::ConvNet(PyListObject* layerParams, int minibatchSize, int deviceID) : Thread(false), _deviceID(deviceID), _data(NULL) {
try {
int numLayers = PyList_GET_SIZE(layerParams);
for (int i = 0; i < numLayers; i++) {
PyObject* paramsDict = PyList_GET_ITEM(layerParams, i);
string layerType = pyDictGetString(paramsDict, "type");
Layer* l = initLayer(layerType, paramsDict);
// Connect backward links in graph for this layer
intv* inputLayers = pyDictGetIntV(paramsDict, "inputs");
if (inputLayers != NULL) {
for (int i = 0; i < inputLayers->size(); i++) {
l->addPrev(&getLayer(inputLayers->at(i)));
}
}
delete inputLayers;
}
// Connect the forward links in the graph
for (int i = 0; i < _layers.size(); i++) {
vector<Layer*>& prev = _layers[i]->getPrev();
for (int j = 0; j < prev.size(); j++) {
prev[j]->addNext(_layers[i]);
}
}
// Execute post-initialization stuff
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->postInit();
}
_dp = new DataProvider(minibatchSize);
} catch (string& s) {
cout << "Error creating ConvNet: " << s << endl;
exit(1);
}
}
/*
* Override this in derived classes
*/
Layer* ConvNet::initLayer(string& layerType, PyObject* paramsDict) {
if (layerType == "fc") {
_layers.push_back(new FCLayer(this, paramsDict));
} else if (layerType == "conv") {
_layers.push_back(new ConvLayer(this, paramsDict));
} else if (layerType == "local") {
_layers.push_back(new LocalUnsharedLayer(this, paramsDict));
} else if (layerType == "pool") {
_layers.push_back(&PoolLayer::makePoolLayer(this, paramsDict));
} else if (layerType == "rnorm") {
_layers.push_back(new ResponseNormLayer(this, paramsDict));
} else if (layerType == "cmrnorm") {
_layers.push_back(new CrossMapResponseNormLayer(this, paramsDict));
} else if (layerType == "cnorm") {
_layers.push_back(new ContrastNormLayer(this, paramsDict));
} else if (layerType == "softmax") {
_layers.push_back(new SoftmaxLayer(this, paramsDict));
} else if (layerType == "eltsum") {
_layers.push_back(new EltwiseSumLayer(this, paramsDict));
} else if (layerType == "eltmax") {
_layers.push_back(new EltwiseMaxLayer(this, paramsDict));
} else if (layerType == "neuron") {
_layers.push_back(new NeuronLayer(this, paramsDict));
} else if (layerType == "nailbed") {
_layers.push_back(new NailbedLayer(this, paramsDict));
} else if (layerType == "blur") {
_layers.push_back(new GaussianBlurLayer(this, paramsDict));
} else if (layerType == "resize") {
_layers.push_back(new ResizeLayer(this, paramsDict));
} else if (layerType == "rgb2yuv") {
_layers.push_back(new RGBToYUVLayer(this, paramsDict));
} else if (layerType == "rgb2lab") {
_layers.push_back(new RGBToLABLayer(this, paramsDict));
} else if (layerType == "data") {
DataLayer *d = new DataLayer(this, paramsDict);
_layers.push_back(d);
_dataLayers.push_back(d);
} else if (strncmp(layerType.c_str(), "cost.", 5) == 0) {
CostLayer *c = &CostLayer::makeCostLayer(this, layerType, paramsDict);
_layers.push_back(c);
_costs.push_back(c);
} else {
throw string("Unknown layer type ") + layerType;
}
return _layers.back();
}
/*
* This executes in a new CPU thread so it's OK to initialize CUDA stuff here.
*/
void ConvNet::initCuda() {
int randomSeed = time(0);
char* randomSeedEnv;
hipSetDevice(_deviceID < 0 ? gpuGetMaxGflopsDeviceId() : _deviceID);
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
hipblasInit();
randomSeedEnv = getenv("CONVNET_RANDOM_SEED");
if (randomSeedEnv != NULL) {
randomSeed = atoi(randomSeedEnv);
}
NVMatrix::initRandom(randomSeed);
copyToGPU();
}
void* ConvNet::run() {
initCuda();
while (true) {
Worker* worker = _workerQueue.dequeue();
worker->run();
delete worker;
}
return NULL;
}
Queue<Worker*>& ConvNet::getWorkerQueue() {
return _workerQueue;
}
Queue<WorkResult*>& ConvNet::getResultQueue() {
return _resultQueue;
}
DataProvider& ConvNet::getDataProvider() {
return *_dp;
}
Layer& ConvNet::operator[](int idx) {
return *_layers[idx];
}
Layer& ConvNet::getLayer(int idx) {
return *_layers[idx];
}
void ConvNet::copyToCPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToCPU();
}
}
void ConvNet::copyToGPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToGPU();
}
}
void ConvNet::updateWeights() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->updateWeights();
}
}
void ConvNet::reset() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->reset();
}
}
int ConvNet::getNumLayers() {
return _layers.size();
}
void ConvNet::bprop(PASS_TYPE passType) {
for (int i = 0; i < _costs.size(); i++) {
_costs[i]->bprop(passType);
}
reset();
}
void ConvNet::fprop(PASS_TYPE passType) {
assert(_data != NULL);
reset();
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->fprop(_data->getData(), passType);
}
}
void ConvNet::fprop(GPUData& data, PASS_TYPE passType) {
if (&data != _data) {
delete _data;
}
_data = &data;
fprop(passType);
}
void ConvNet::fprop(int miniIdx, PASS_TYPE passType) {
delete _data;
_data = &_dp->getMinibatch(miniIdx);
fprop(passType);
}
Cost& ConvNet::getCost() {
return *new Cost(_data->getNumCases(), _costs);
}
// Same as getCost() but adds results to given cost and returns it
Cost& ConvNet::getCost(Cost& cost) {
Cost& newCost = getCost();
cost += newCost;
delete &newCost;
return cost;
}
double ConvNet::getCostValue() {
Cost& cost = getCost();
double val = cost.getValue();
delete &cost;
return val;
}
/*
* Gradient checking stuff
*/
void ConvNet::checkGradients() {
_numFailures = 0;
_numTests = 0;
fprop(0, PASS_GC);
_baseErr = getCostValue();
bprop(PASS_GC);
for (vector<Layer*>::iterator it = _layers.begin(); it != _layers.end(); ++it) {
(*it)->checkGradients();
}
cout << "------------------------" << endl;
if (_numFailures > 0) {
cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl;
} else {
cout << "ALL " << _numTests << " TESTS PASSED" << endl;
}
}
/*
* name: weight matrix name
* eps: finite difference step
*/
bool ConvNet::checkGradient(const string& name, float eps, Weights& weights) {
Matrix numGrad(weights.getNumRows(), weights.getNumCols());
Matrix diff(numGrad);
numGrad.apply(Matrix::ZERO);
Matrix weightsCPU;
weights.getW().copyToHost(weightsCPU, true);
for(int i = 0; i < weights.getNumRows(); i++) {
for (int j = 0; j < weights.getNumCols(); j++) {
float v = weightsCPU(i,j);
weightsCPU(i,j) += eps;
weights.getW().copyFromHost(weightsCPU);
weightsCPU(i,j) = v;
fprop(PASS_GC);
double err = getCostValue();
numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps);
if (isnan(numGrad(i,j)) || isinf(numGrad(i,j))) {
cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl;
cout << "Consider reducing the sizes of the weights or finite difference steps." << endl;
cout << "Exiting." << endl;
exit(1);
}
weights.getW().copyFromHost(weightsCPU);
}
}
Matrix gradCPU;
weights.getGrad().copyToHost(gradCPU, true);
gradCPU.scale(-1.0 / _data->getNumCases());
float analNorm = gradCPU.norm();
float numNorm = numGrad.norm();
numGrad.subtract(gradCPU, diff);
float relErr = diff.norm() / analNorm;
bool fail = relErr >= GC_REL_ERR_THRESH;
if (fail || !GC_SUPPRESS_PASSES) {
cout << "========================" << endl;
printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str());
cout << "========================" << endl;
cout << "Analytic:" << endl;
gradCPU.print(6,4);
cout << "Numeric:" << endl;
numGrad.print(6,4);
printf("Analytic norm: %e\n", analNorm);
printf("Numeric norm: %e\n", numNorm);
printf("Relative error: %e\n", relErr);
}
_numTests++;
_numFailures += fail;
return fail;
}
| 1fbebbba76b9d91c1bf55fc828a88bc616293f5c.cu | /*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include <iostream>
#include <string>
#include <nvmatrix.cuh>
#include <nvmatrix_operators.cuh>
#include <matrix.h>
#include <convnet.cuh>
#include <util.cuh>
using namespace std;
#define cutGetMaxGflopsDeviceId() gpuGetMaxGflopsDeviceId()
/*
* =======================
* ConvNet
* =======================
*/
ConvNet::ConvNet(PyListObject* layerParams, int minibatchSize, int deviceID) : Thread(false), _deviceID(deviceID), _data(NULL) {
try {
int numLayers = PyList_GET_SIZE(layerParams);
for (int i = 0; i < numLayers; i++) {
PyObject* paramsDict = PyList_GET_ITEM(layerParams, i);
string layerType = pyDictGetString(paramsDict, "type");
Layer* l = initLayer(layerType, paramsDict);
// Connect backward links in graph for this layer
intv* inputLayers = pyDictGetIntV(paramsDict, "inputs");
if (inputLayers != NULL) {
for (int i = 0; i < inputLayers->size(); i++) {
l->addPrev(&getLayer(inputLayers->at(i)));
}
}
delete inputLayers;
}
// Connect the forward links in the graph
for (int i = 0; i < _layers.size(); i++) {
vector<Layer*>& prev = _layers[i]->getPrev();
for (int j = 0; j < prev.size(); j++) {
prev[j]->addNext(_layers[i]);
}
}
// Execute post-initialization stuff
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->postInit();
}
_dp = new DataProvider(minibatchSize);
} catch (string& s) {
cout << "Error creating ConvNet: " << s << endl;
exit(1);
}
}
/*
* Override this in derived classes
*/
Layer* ConvNet::initLayer(string& layerType, PyObject* paramsDict) {
if (layerType == "fc") {
_layers.push_back(new FCLayer(this, paramsDict));
} else if (layerType == "conv") {
_layers.push_back(new ConvLayer(this, paramsDict));
} else if (layerType == "local") {
_layers.push_back(new LocalUnsharedLayer(this, paramsDict));
} else if (layerType == "pool") {
_layers.push_back(&PoolLayer::makePoolLayer(this, paramsDict));
} else if (layerType == "rnorm") {
_layers.push_back(new ResponseNormLayer(this, paramsDict));
} else if (layerType == "cmrnorm") {
_layers.push_back(new CrossMapResponseNormLayer(this, paramsDict));
} else if (layerType == "cnorm") {
_layers.push_back(new ContrastNormLayer(this, paramsDict));
} else if (layerType == "softmax") {
_layers.push_back(new SoftmaxLayer(this, paramsDict));
} else if (layerType == "eltsum") {
_layers.push_back(new EltwiseSumLayer(this, paramsDict));
} else if (layerType == "eltmax") {
_layers.push_back(new EltwiseMaxLayer(this, paramsDict));
} else if (layerType == "neuron") {
_layers.push_back(new NeuronLayer(this, paramsDict));
} else if (layerType == "nailbed") {
_layers.push_back(new NailbedLayer(this, paramsDict));
} else if (layerType == "blur") {
_layers.push_back(new GaussianBlurLayer(this, paramsDict));
} else if (layerType == "resize") {
_layers.push_back(new ResizeLayer(this, paramsDict));
} else if (layerType == "rgb2yuv") {
_layers.push_back(new RGBToYUVLayer(this, paramsDict));
} else if (layerType == "rgb2lab") {
_layers.push_back(new RGBToLABLayer(this, paramsDict));
} else if (layerType == "data") {
DataLayer *d = new DataLayer(this, paramsDict);
_layers.push_back(d);
_dataLayers.push_back(d);
} else if (strncmp(layerType.c_str(), "cost.", 5) == 0) {
CostLayer *c = &CostLayer::makeCostLayer(this, layerType, paramsDict);
_layers.push_back(c);
_costs.push_back(c);
} else {
throw string("Unknown layer type ") + layerType;
}
return _layers.back();
}
/*
* This executes in a new CPU thread so it's OK to initialize CUDA stuff here.
*/
void ConvNet::initCuda() {
int randomSeed = time(0);
char* randomSeedEnv;
cudaSetDevice(_deviceID < 0 ? gpuGetMaxGflopsDeviceId() : _deviceID);
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
cublasInit();
randomSeedEnv = getenv("CONVNET_RANDOM_SEED");
if (randomSeedEnv != NULL) {
randomSeed = atoi(randomSeedEnv);
}
NVMatrix::initRandom(randomSeed);
copyToGPU();
}
void* ConvNet::run() {
initCuda();
while (true) {
Worker* worker = _workerQueue.dequeue();
worker->run();
delete worker;
}
return NULL;
}
Queue<Worker*>& ConvNet::getWorkerQueue() {
return _workerQueue;
}
Queue<WorkResult*>& ConvNet::getResultQueue() {
return _resultQueue;
}
DataProvider& ConvNet::getDataProvider() {
return *_dp;
}
Layer& ConvNet::operator[](int idx) {
return *_layers[idx];
}
Layer& ConvNet::getLayer(int idx) {
return *_layers[idx];
}
void ConvNet::copyToCPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToCPU();
}
}
void ConvNet::copyToGPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToGPU();
}
}
void ConvNet::updateWeights() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->updateWeights();
}
}
void ConvNet::reset() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->reset();
}
}
int ConvNet::getNumLayers() {
return _layers.size();
}
void ConvNet::bprop(PASS_TYPE passType) {
for (int i = 0; i < _costs.size(); i++) {
_costs[i]->bprop(passType);
}
reset();
}
void ConvNet::fprop(PASS_TYPE passType) {
assert(_data != NULL);
reset();
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->fprop(_data->getData(), passType);
}
}
void ConvNet::fprop(GPUData& data, PASS_TYPE passType) {
if (&data != _data) {
delete _data;
}
_data = &data;
fprop(passType);
}
void ConvNet::fprop(int miniIdx, PASS_TYPE passType) {
delete _data;
_data = &_dp->getMinibatch(miniIdx);
fprop(passType);
}
Cost& ConvNet::getCost() {
return *new Cost(_data->getNumCases(), _costs);
}
// Same as getCost() but adds results to given cost and returns it
Cost& ConvNet::getCost(Cost& cost) {
Cost& newCost = getCost();
cost += newCost;
delete &newCost;
return cost;
}
double ConvNet::getCostValue() {
Cost& cost = getCost();
double val = cost.getValue();
delete &cost;
return val;
}
/*
* Gradient checking stuff
*/
void ConvNet::checkGradients() {
_numFailures = 0;
_numTests = 0;
fprop(0, PASS_GC);
_baseErr = getCostValue();
bprop(PASS_GC);
for (vector<Layer*>::iterator it = _layers.begin(); it != _layers.end(); ++it) {
(*it)->checkGradients();
}
cout << "------------------------" << endl;
if (_numFailures > 0) {
cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl;
} else {
cout << "ALL " << _numTests << " TESTS PASSED" << endl;
}
}
/*
* name: weight matrix name
* eps: finite difference step
*/
bool ConvNet::checkGradient(const string& name, float eps, Weights& weights) {
Matrix numGrad(weights.getNumRows(), weights.getNumCols());
Matrix diff(numGrad);
numGrad.apply(Matrix::ZERO);
Matrix weightsCPU;
weights.getW().copyToHost(weightsCPU, true);
for(int i = 0; i < weights.getNumRows(); i++) {
for (int j = 0; j < weights.getNumCols(); j++) {
float v = weightsCPU(i,j);
weightsCPU(i,j) += eps;
weights.getW().copyFromHost(weightsCPU);
weightsCPU(i,j) = v;
fprop(PASS_GC);
double err = getCostValue();
numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps);
if (isnan(numGrad(i,j)) || isinf(numGrad(i,j))) {
cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl;
cout << "Consider reducing the sizes of the weights or finite difference steps." << endl;
cout << "Exiting." << endl;
exit(1);
}
weights.getW().copyFromHost(weightsCPU);
}
}
Matrix gradCPU;
weights.getGrad().copyToHost(gradCPU, true);
gradCPU.scale(-1.0 / _data->getNumCases());
float analNorm = gradCPU.norm();
float numNorm = numGrad.norm();
numGrad.subtract(gradCPU, diff);
float relErr = diff.norm() / analNorm;
bool fail = relErr >= GC_REL_ERR_THRESH;
if (fail || !GC_SUPPRESS_PASSES) {
cout << "========================" << endl;
printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str());
cout << "========================" << endl;
cout << "Analytic:" << endl;
gradCPU.print(6,4);
cout << "Numeric:" << endl;
numGrad.print(6,4);
printf("Analytic norm: %e\n", analNorm);
printf("Numeric norm: %e\n", numNorm);
printf("Relative error: %e\n", relErr);
}
_numTests++;
_numFailures += fail;
return fail;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.