hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
50731b333c9a4989596fd1ea024e2c1137e068eb.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <builtin_types.h>
extern "C" {
__global__ void vectorAdditionCUDA(const float* a, const float* b, float* c, int n)
{
int ii = blockDim.x * blockIdx.x + threadIdx.x;
if (ii < n)
c[ii] = a[ii] + b[ii];
}
void vectorAddition(const float* a, const float* b, float* c, int n)
{
float *a_cuda, *b_cuda, *c_cuda;
unsigned int nBytes = sizeof(float) * n;
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
// allocate and copy memory into the device
hipMalloc((void **)& a_cuda, nBytes);
hipMalloc((void **)& b_cuda, nBytes);
hipMalloc((void **)& c_cuda, nBytes);
hipMemcpy(a_cuda, a, nBytes, hipMemcpyHostToDevice);
hipMemcpy(b_cuda, b, nBytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vectorAdditionCUDA), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, a_cuda, b_cuda, c_cuda, n);
// load the answer back into the host
hipMemcpy(c, c_cuda, nBytes, hipMemcpyDeviceToHost);
hipFree(a_cuda);
hipFree(b_cuda);
hipFree(c_cuda);
}
}
| 50731b333c9a4989596fd1ea024e2c1137e068eb.cu | #include <cuda.h>
#include <builtin_types.h>
extern "C" {
__global__ void vectorAdditionCUDA(const float* a, const float* b, float* c, int n)
{
int ii = blockDim.x * blockIdx.x + threadIdx.x;
if (ii < n)
c[ii] = a[ii] + b[ii];
}
void vectorAddition(const float* a, const float* b, float* c, int n)
{
float *a_cuda, *b_cuda, *c_cuda;
unsigned int nBytes = sizeof(float) * n;
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
// allocate and copy memory into the device
cudaMalloc((void **)& a_cuda, nBytes);
cudaMalloc((void **)& b_cuda, nBytes);
cudaMalloc((void **)& c_cuda, nBytes);
cudaMemcpy(a_cuda, a, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(b_cuda, b, nBytes, cudaMemcpyHostToDevice);
vectorAdditionCUDA<<<blocksPerGrid, threadsPerBlock>>>(a_cuda, b_cuda, c_cuda, n);
// load the answer back into the host
cudaMemcpy(c, c_cuda, nBytes, cudaMemcpyDeviceToHost);
cudaFree(a_cuda);
cudaFree(b_cuda);
cudaFree(c_cuda);
}
}
|
836657df7e9de813090a63ada8629c5ff2ab6bfd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************
**
** cvr_spmv_gpu.cu: GPU version of CVR spmv
**
** run:
** $ make
** $ ./cvr_spmv_gpu data.txt [#blocks #threads] [#n_iterations]
** data.txt: matrix market format input file
** default parameters: # of blocks and threads per block: autoselect, 1000 iteration
** default compute capability 5.2 (Maxwell)
**
** Default Matrix Market Format store base: 1.
** If your file is 0-based, please change "#define COO_BASE 1" into 0.
** Default float type: double-precision(double).
** If you need single-precision(float) type, please comment "#define DOUBLE".
**
****************************************************/
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<omp.h>
#include<sys/time.h>
#include<cuda_runtime.h>
#include<math.h>
#define OK 0
#define ERROR -1
#define CMP_EQUAL 0
#define FIELD_LENGTH 128
#define COO_BASE 1
#define OMP_THREADS 12
#define THREADS_PER_WARP 32
#define DOUBLE
#ifdef DOUBLE
#define floatType double
#else
#define floatType float
#endif
#define CHECK(call){\
const hipError_t error = call;\
if(error != hipSuccess){\
printf("Error: %s:%d\n", __FILE__, __LINE__);\
printf("code: %d, reason: %s\n", error, hipGetErrorString(error));\
exit(ERROR);\
}\
}
#define HOST_CHECK(ptr){\
if(ptr == NULL){\
printf("Error: %s:%d\n", __FILE__, __LINE__);\
printf("Memory overflow!\n");\
exit(ERROR);\
}\
}
typedef struct triple{
int x;
int y;
floatType val;
}triple_t;
typedef struct coo{
triple_t *triple;
int ncol;
int nrow;
int nnz;
}coo_t; // coordinate format
typedef struct csr{
int ncol;
int nrow;
int nnz;
floatType *val;
int *col_idx;
int *row_ptr;
}csr_t; // compressed sparse row format
typedef struct record{
int pos;
unsigned mask;
int wb[THREADS_PER_WARP];
}record_t;
typedef struct cvr{
int ncol; //number of columns
int nrow; //number of rows
int nnz; //number of non-zeros
floatType *val; //values stored in cvr-special order
int *colidx; //column numbers corresponding to values
//values in cvr are re-ordered for performance, following elements are used to record how to write to vector y(in spmv: y=Wx)
record_t *rec; //records of write-back information
int *rec_threshold; //i don't know how to describe this, if you've read the paper, this is lr_rec in the paper
int *threshold_detail; //this is new, because threshold is a bit more complicated in GPU version
int *tail; //the last line number(e.g. write-back position, think about it) of each lane(or thread as you like)
int *warp_nnz;
}cvr_t; // compressed vactorization-oriented sparse row format
// auxiliary function used in qsort
inline int func_cmp(const void *a, const void *b){
triple_t *t1 = (triple_t *)a;
triple_t *t2 = (triple_t *)b;
if(t1->x != t2->x){
return t1->x - t2->x;
}else{
return t1->y - t2->y;
}
}
// auxiliary function to get row number, binary search
__forceinline__ __device__ int func_get_row(int const valID, csr_t const csr){
int start = 0, end = csr.nrow;
int mid = (start + end) / 2;
while(start <= end){
if(csr.row_ptr[mid] > valID){
end = mid - 1;
}else if(mid < csr.nrow && csr.row_ptr[mid+1] <= valID){
start = mid + 1;
}else{
while(mid < csr.nrow && csr.row_ptr[mid] == csr.row_ptr[mid+1]){
mid++;
}
return mid;
}
mid = (start + end) / 2;
}
printf("*** ERROR: a bug occured in func_get_row ***\n");
return ERROR;
}
// auxiliary function to implement atomic add, GPUs whose compute capability is lower than 6.0 do not support atomic add for double variables
__forceinline__ __device__ floatType func_floatTypeAtomicAdd(floatType const *address, floatType const val){
#if __CUDA_ARCH__ >= 600
return atomicAdd(address, val);
#else
#ifdef DOUBLE
unsigned long long int *address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
}while(assumed != old);
return __longlong_as_double(old);
#else
return atomicAdd(address, val);
#endif
#endif
}
// auxiliary function for reducing AND
__forceinline__ __device__ bool func_reduceAnd(bool count){
bool count_res = count;
int step = THREADS_PER_WARP / 2;
#pragma unroll
for(int i = 0; i < 5; i++){
count_res &= __shfl_down(count_res, step);
step /= 2;
}
count_res = __shfl(count_res, 0);
return count_res;
}
// auxiliary function for reducing OR
__forceinline__ __device__ unsigned func_reduceBitOr(bool src){
unsigned bitmap = src << (threadIdx.x % THREADS_PER_WARP);
int step = THREADS_PER_WARP / 2;
#pragma unroll
for(int i = 0; i < 5; i++){
bitmap |= __shfl_down(bitmap, step);
step /= 2;
}
bitmap = __shfl(bitmap, 0);
return bitmap;
}
// auxiliary function for reducing average
__forceinline__ __device__ int func_reduceAvg(int num){
int avg_res = num;
int step = THREADS_PER_WARP / 2;
#pragma unroll
for(int i = 0; i < 5; i++){
avg_res += __shfl_down(avg_res, step);
step /= 2;
}
avg_res = (__shfl(avg_res, 0) + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
return avg_res;
}
// auxiliary function for reducing selection
__forceinline__ __device__ int func_reduceSel(bool sel){
unsigned sel_bitmap = func_reduceBitOr(sel);
#pragma unroll
for(int i = 0; i < THREADS_PER_WARP; i++){
if(sel_bitmap & 1){
return i;
}else{
sel_bitmap >>= 1;
}
}
return -1;
}
// auxiliary function to compare result
inline int func_compare(floatType y, floatType y_verify){
if(abs(y-y_verify) > 0.00001){
return 1;
}else{
return 0;
}
}
// auxiliary function to initialize vector x(in spmv y=Wx)
inline void func_initialData(floatType *ip, int size){
time_t t;
srand((unsigned)time(&t));
for(int i = 0; i < size; i++) {
//ip[i] = (floatType)(rand() & 0xff) / 10.0f;
//ip[i] = i % 10;
ip[i] = 1;
}
}
// COO_BASE-based Matrix Market format -> CSR format
int read_matrix(csr_t *csr, char *filename);
// CSR format -> CVR format
int preprocess();
// CVR format SpMV, y = y + M * x
int spmv(floatType *d_y, floatType *d_x);
__global__ void preprocess_kernel();
__global__ void spmv_kernel(floatType * const __restrict__ y, floatType * const __restrict__ x);
// In this implementation, only one dimension is used for intuition
int grid_dim = 32;
int block_dim = 64;
int n_iterations = 1000;
__constant__ csr_t const_csr;
__constant__ cvr_t const_cvr;
int main(int argc, char **argv){
/**** runtime configuration ****/
if(argc < 2){
printf("ERROR: *** wrong parameter format ***\n");
return ERROR;
}
char *filename = argv[1];
/**** \runtime configuration ****/
/**** prepare host_csr ****/
//allocate memory
csr_t *h_csr = (csr_t *)malloc(sizeof(csr_t));
HOST_CHECK(h_csr);
//read matrix to initialize
if(read_matrix(h_csr, filename)){
printf("ERROR occured in function read_matrix()\n");
return ERROR;
}
printf("Matrix: (%d, %d), %d non-zeros.\n", h_csr->nrow, h_csr->ncol, h_csr->nnz);
/**** \prepare host_csr ****/
int total_threads = floor(0.0591 * h_csr->nnz + 116038);
int floor1 = floor(24.094 * pow(h_csr->nrow, 0.2423));
block_dim = min(1024, max(64, floor1 - floor1 % 32));
grid_dim = min(1024, max(32, total_threads / block_dim + total_threads / block_dim % 2));
if(argc == 3){
n_iterations = atoi(argv[2]);
}else if(argc == 4){
grid_dim = atoi(argv[2]);
block_dim = atoi(argv[3]);
}else if(argc == 5){
grid_dim = atoi(argv[2]);
block_dim = atoi(argv[3]);
n_iterations = atoi(argv[4]);
}
printf("Matrix file:%s. Execution config:<<<%d,%d>>>. Iterations:%d.\n", filename, grid_dim, block_dim, n_iterations);
/**** prepare device_csr ****/
csr_t temp_csr;
//allocate device global memory
temp_csr.ncol = h_csr->ncol;
temp_csr.nrow = h_csr->nrow;
temp_csr.nnz = h_csr->nnz;
CHECK(hipMalloc(&temp_csr.val, h_csr->nnz * sizeof(floatType)));
CHECK(hipMalloc(&temp_csr.col_idx, h_csr->nnz * sizeof(int)));
CHECK(hipMalloc(&temp_csr.row_ptr, (h_csr->nrow + 1) * sizeof(int)));
//initialize, device addresses like d_csr->val can't be accessed directly
CHECK(hipMemcpy(temp_csr.val, h_csr->val, h_csr->nnz * sizeof(floatType), hipMemcpyHostToDevice));
CHECK(hipMemcpy(temp_csr.col_idx, h_csr->col_idx, h_csr->nnz * sizeof(int), hipMemcpyHostToDevice));
CHECK(hipMemcpy(temp_csr.row_ptr, h_csr->row_ptr, (h_csr->nrow + 1) * sizeof(int), hipMemcpyHostToDevice));
CHECK(hipMemcpyToSymbol(const_csr, &temp_csr, sizeof(csr_t)));
/**** \prepare device_csr ****/
/**** prepare host_x, device_x, host_y, device_y and verify_y ****/
//allocate memory
floatType *h_x, *h_y, *y_verify, *d_x, *d_y;
h_x = (floatType *)malloc(h_csr->ncol * sizeof(floatType));
h_y = (floatType *)malloc(h_csr->nrow * sizeof(floatType));
y_verify = (floatType *)malloc(h_csr->nrow * sizeof(floatType));
HOST_CHECK(h_x);
HOST_CHECK(h_y);
HOST_CHECK(y_verify);
CHECK(hipMalloc(&d_x, h_csr->ncol * sizeof(floatType)));
CHECK(hipMalloc(&d_y, h_csr->nrow * sizeof(floatType)));
//initialize
func_initialData(h_x, h_csr->ncol);
memset(h_y, 0, h_csr->nrow * sizeof(floatType));
memset(y_verify, 0, h_csr->nrow * sizeof(floatType));
CHECK(hipMemcpy(d_x, h_x, h_csr->ncol * sizeof(floatType), hipMemcpyHostToDevice));
CHECK(hipMemset(d_y, 0, h_csr->nrow * sizeof(floatType)));
/**** \prepare host_x, device_x, host_y, device_y and verify_y ****/
cvr_t temp_cvr;
//cvr structure is dependent on matrix and runtime configuration
int n_threads = grid_dim * block_dim;
int n_warps = n_threads / THREADS_PER_WARP;
// average number of non-zeros per warp
int n_warp_nnz = h_csr->nnz / n_warps;
// upperbound of needed loop iterations to finish preprocess/multiplication
int ub_steps = (n_warp_nnz + 1 + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
int ub_warp_vals = ub_steps * THREADS_PER_WARP;
int ub_warp_recs = ub_steps + 1;
//allocate device global memory
temp_cvr.ncol = h_csr->ncol;
temp_cvr.nrow = h_csr->nrow;
temp_cvr.nnz = h_csr->nnz;
CHECK(hipMalloc(&temp_cvr.val, n_warps * ub_warp_vals * sizeof(floatType)));
CHECK(hipMalloc(&temp_cvr.colidx, n_warps * ub_warp_vals * sizeof(int)));
CHECK(hipMalloc(&temp_cvr.rec, n_warps * ub_warp_recs * sizeof(record_t)));
CHECK(hipMalloc(&temp_cvr.rec_threshold, n_warps * sizeof(int)));
CHECK(hipMalloc(&temp_cvr.threshold_detail, n_warps * sizeof(int)));
CHECK(hipMalloc(&temp_cvr.tail, n_threads * sizeof(int)));
CHECK(hipMalloc(&temp_cvr.warp_nnz, n_warps * sizeof(int)));
//initialize
CHECK(hipMemset(temp_cvr.tail, -1, n_threads * sizeof(int)));
CHECK(hipMemcpyToSymbol(const_cvr, &temp_cvr, sizeof(cvr_t)));
// warming up
if(preprocess()){
printf("ERROR occured while warming up\n");
return ERROR;
}
/**** preprocess time ****/
struct timeval tv1, tv2;
double tv_diff1, tv_diff2;
gettimeofday(&tv1, NULL);
// PREPROCESS KERNEL
if(preprocess()){
printf("ERROR occured in function preprocess()\n");
return ERROR;
}
gettimeofday(&tv2, NULL);
tv_diff1 = (tv2.tv_sec - tv1.tv_sec) * 1000000 + tv2.tv_usec - tv1.tv_usec;
printf("preprocess time: %lfms\n", tv_diff1/1000.0);
/**** \preprocess time ****/
/**** spmv time ****/
gettimeofday(&tv1, NULL);
// SPMV KERNEL, deciding which branch to take here for performance considering
if(spmv(d_y, d_x)){
printf("ERROR occured in function spmv()\n");
return ERROR;
}
gettimeofday(&tv2, NULL);
tv_diff2 = (tv2.tv_sec - tv1.tv_sec) * 1000000 + tv2.tv_usec - tv1.tv_usec;
printf("spmv time: %lfms\n", tv_diff2/1000.0/n_iterations);
/**** \spmv time ****/
/**** copy back ****/
CHECK(hipMemcpy(h_y, d_y, h_csr->nrow * sizeof(floatType), hipMemcpyDeviceToHost));
/**** \copy back ****/
/**** free device memory ****/
CHECK(hipFree(d_x));
CHECK(hipFree(d_y));
CHECK(hipFree(temp_cvr.val));
CHECK(hipFree(temp_cvr.colidx));
CHECK(hipFree(temp_cvr.rec));
CHECK(hipFree(temp_cvr.rec_threshold));
CHECK(hipFree(temp_cvr.threshold_detail));
CHECK(hipFree(temp_cvr.tail));
CHECK(hipFree(temp_cvr.warp_nnz));
CHECK(hipFree(temp_csr.val));
CHECK(hipFree(temp_csr.col_idx));
CHECK(hipFree(temp_csr.row_ptr));
/**** \free device memory ****/
/**** compute y_verify using csr spmv ****/
gettimeofday(&tv1, NULL);
for(int iteration = 0; iteration < n_iterations; iteration++){
#pragma omp parallel for num_threads(OMP_THREADS)
for(int i = 0; i < h_csr->nrow; i++){
floatType sum = 0;
for(int j = h_csr->row_ptr[i]; j < h_csr->row_ptr[i+1]; j++){
sum += h_csr->val[j] * h_x[h_csr->col_idx[j]];
}
y_verify[i] += sum;
}
}
gettimeofday(&tv2, NULL);
tv_diff2 = (tv2.tv_sec - tv1.tv_sec) * 1000000 + tv2.tv_usec - tv1.tv_usec;
printf("cpu_spmv time: %lfms\n", tv_diff2/1000.0);
/**** \compute y_verify using csr spmv ****/
/**** check the result ****/
int count = 0;
floatType y1 = 0, y2 = 0;
for(int i = 0; i < h_csr->nrow; i++){
if(func_compare(h_y[i], y_verify[i]) != CMP_EQUAL){
y1 += h_y[i];
y2 += y_verify[i];
count++;
if(count <= 10){
#ifdef DOUBLE
printf("y[%d] should be %lf, but the result is %lf\n", i, y_verify[i], h_y[i]);
#else
printf("y[%d] should be %f, but the result is %f\n", i, y_verify[i], h_y[i]);
#endif
}
}
}
if(0 == count){
printf("Correct\n\n");
}else{
#ifdef DOUBLE
printf("count=%d, y_sum=%lf, y_v_sum=%lf\n", count, y1, y2);
#else
printf("count=%d, y_sum=%f, y_v_sum=%f\n", count, y1, y2);
#endif
}
/**** \check the result ****/
/**** free host memory ****/
free(h_x);
free(h_y);
free(y_verify);
free(h_csr->val);
free(h_csr->col_idx);
free(h_csr->row_ptr);
free(h_csr);
/**** \free host memory ****/
return 0;
}
/*
** function: read_matrix()
** programmer: Lukasz Wesolowski
** creation: July 2, 2010
** read matrix from MMF file and covert it to csr format
** parameters:
** csr_t *csr allocated csr_t pointer
** char *filename Matrix Market Format file
*/
int read_matrix(csr_t *csr, char *filename){
FILE *fp = fopen(filename, "r");
if(!fp){
printf("ERROR: *** cannot open file: %s ***\n", filename);
return ERROR;
}
char buffer[1024];
char id[FIELD_LENGTH], object[FIELD_LENGTH], format[FIELD_LENGTH], field[FIELD_LENGTH], symmetry[FIELD_LENGTH];
int field_pattern = 0, field_complex = 0, symmetry_symmetric = 0;
//read the header of Matrix Market Format
if(fgets(buffer, sizeof(buffer), fp)){
sscanf(buffer, "%s %s %s %s %s", id, object, format, field, symmetry);
}else{
printf("ERROR: *** empty file: %s ***\n", filename);
return ERROR;
}
//check stored object and format
if(strcmp(object, "matrix")){
printf("ERROR: *** file %s does not store a matrix ***\n", filename);
return ERROR;
}
if(strcmp(format, "coordinate")){
printf("ERROR: *** matrix representation is dense ***\n");
return ERROR;
}
//specific matrix
if(0 == strcmp(field, "pattern")){
field_pattern = 1;
}
if(0 == strcmp(field, "complex")){
field_complex = 1;
}
if(0 == strcmp(symmetry, "symmetric")){
symmetry_symmetric = 1;
}
//omit comments
while(!feof(fp)){
fgets(buffer, sizeof(buffer), fp);
if('%' != buffer[0]){
break;
}
}
//number of rows, columns and non-zeros in matrix
coo_t coo;
sscanf(buffer, "%d %d %d", &coo.nrow, &coo.ncol, &coo.nnz);
if(symmetry_symmetric){
coo.nnz *= 2;
}
coo.triple = (triple_t *)malloc(coo.nnz * sizeof(triple_t)); //this pointer is useless out of this function. remember to free it.
HOST_CHECK(coo.triple);
//MMF -> coordinate format
int i = 0;
if(symmetry_symmetric){
if(field_pattern){
for(i = 0; i < coo.nnz; i++){
fgets(buffer, sizeof(buffer), fp);
sscanf(buffer, "%d %d", &coo.triple[i].x, &coo.triple[i].y);
coo.triple[i].val = 1;
if(coo.triple[i].x != coo.triple[i].y){
coo.triple[i+1].x = coo.triple[i].y;
coo.triple[i+1].y = coo.triple[i].x;
coo.triple[i+1].val = 1;
i++;
}
}
}else if(field_complex){
floatType im;
for(i = 0; i < coo.nnz; i++){
fgets(buffer, sizeof(buffer), fp);
#ifdef DOUBLE
sscanf(buffer, "%d %d %lf %lf", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val, &im);
#else
sscanf(buffer, "%d %d %f %f", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val, &im);
#endif
if(coo.triple[i].x != coo.triple[i].y){
coo.triple[i+1].x = coo.triple[i].y;
coo.triple[i+1].y = coo.triple[i].x;
coo.triple[i+1].val = coo.triple[i].val;
i++;
}
}
}else{
for(i = 0; i < coo.nnz; i++){
fgets(buffer, sizeof(buffer), fp);
#ifdef DOUBLE
sscanf(buffer, "%d %d %lf", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val);
#else
sscanf(buffer, "%d %d %f", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val);
#endif
if(coo.triple[i].x != coo.triple[i].y){
coo.triple[i+1].x = coo.triple[i].y;
coo.triple[i+1].y = coo.triple[i].x;
coo.triple[i+1].val = coo.triple[i].val;
i++;
}
}
}
}else{ // if it is not a symmetric matrix
if(field_pattern){
for(i = 0; i < coo.nnz; i++){
fgets(buffer, sizeof(buffer), fp);
sscanf(buffer, "%d %d", &coo.triple[i].x, &coo.triple[i].y);
coo.triple[i].val = 1;
}
}else if(field_complex){
floatType im;
for(i = 0; i < coo.nnz; i++){
fgets(buffer, sizeof(buffer), fp);
#ifdef DOUBLE
sscanf(buffer, "%d %d %lf %lf", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val, &im);
#else
sscanf(buffer, "%d %d %f %f", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val, &im);
#endif
}
}else{
for(i = 0; i < coo.nnz; i++){
fgets(buffer, sizeof(buffer), fp);
#ifdef DOUBLE
sscanf(buffer, "%d %d %lf", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val);
#else
sscanf(buffer, "%d %d %f", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val);
#endif
}
}
}
fclose(fp);
if(i > coo.nnz){
printf("ERROR: *** too many matrix elements occered ***\n");
return ERROR;
}
coo.nnz = i;
//COO -> CSR
csr->ncol = coo.ncol;
csr->nrow = coo.nrow;
csr->nnz = coo.nnz;
csr->val = (floatType *)malloc(csr->nnz * sizeof(floatType));
HOST_CHECK(csr->val);
csr->col_idx = (int *)malloc(csr->nnz * sizeof(int));
HOST_CHECK(csr->col_idx);
csr->row_ptr = (int *)malloc((csr->nrow + 1) * sizeof(int));
HOST_CHECK(csr->row_ptr);
qsort(coo.triple, coo.nnz, sizeof(triple_t), func_cmp);
csr->row_ptr[0] = 0;
int r = 0;
for(i = 0; i < csr->nnz; i++){
while(coo.triple[i].x - COO_BASE != r){
csr->row_ptr[++r] = i;
}
csr->val[i] = coo.triple[i].val;
csr->col_idx[i] = coo.triple[i].y - COO_BASE;
}
while(r < csr->nrow){
csr->row_ptr[++r] = i;
}
free(coo.triple);
return OK;
}
/*
** function: preprocess()
** convert csr format to cvr format
*/
int preprocess(){
int warps_per_block = block_dim / THREADS_PER_WARP;
//shared memory allocate for cur_row[] and reg_flag[]
hipLaunchKernelGGL(( preprocess_kernel), dim3(grid_dim), dim3(block_dim), 2 * warps_per_block * sizeof(int), 0, );
CHECK(hipGetLastError());
CHECK(hipDeviceSynchronize());
return OK;
}
/*
** function: spmv()
** sparse matrix-vector multiplication using cvr format
** parameters:
** floatType *d_y allocated pointer(device) to store result y
** floatType *d_x initialized pointer(device) to store vector x
*/
int spmv(floatType *d_y, floatType *d_x){
int iteration;
for(iteration = 0; iteration < n_iterations; iteration++){
//shared memory allocate for temp result (reducing global write)
hipLaunchKernelGGL(( spmv_kernel), dim3(grid_dim), dim3(block_dim), block_dim * sizeof(floatType), 0, d_y, d_x);
CHECK(hipGetLastError());
}
CHECK(hipDeviceSynchronize());
return OK;
}
__global__ void preprocess_kernel(){
extern __shared__ int var_ptr[];
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
// current warp id in global vision
int warpID = threadID / THREADS_PER_WARP;
int laneID = threadID % THREADS_PER_WARP;
// current warp id in this block
int warp_offset = threadIdx.x / THREADS_PER_WARP;
int n_warps = (gridDim.x * blockDim.x + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
// use register to store csr members for pointer reuse
csr_t reg_csr;
reg_csr.ncol = const_csr.ncol;
reg_csr.nrow = const_csr.nrow;
reg_csr.nnz = const_csr.nnz;
reg_csr.val = const_csr.val;
reg_csr.col_idx = const_csr.col_idx;
reg_csr.row_ptr = const_csr.row_ptr;
// use register to store cvr members
cvr_t reg_cvr;
reg_cvr.ncol = const_cvr.ncol;
reg_cvr.nrow = const_cvr.nrow;
reg_cvr.nnz = const_cvr.nnz;
reg_cvr.val = const_cvr.val;
reg_cvr.colidx = const_cvr.colidx;
reg_cvr.rec = const_cvr.rec;
reg_cvr.rec_threshold = const_cvr.rec_threshold;
reg_cvr.threshold_detail = const_cvr.threshold_detail;
reg_cvr.tail = const_cvr.tail;
reg_cvr.warp_nnz = const_cvr.warp_nnz;
// non-zero id
int warp_start, warp_end;
int warp_nnz;
int warp_start_row, warp_end_row;
// average number of non-zeros in a warp
int n_warp_nnz = reg_csr.nnz / n_warps;
// a few warps have one more non-zero to deal with
int change_warp_nnz = reg_csr.nnz % n_warps;
// information about row range and non-zeros in this warp
if(warpID < change_warp_nnz){
warp_start = warpID * n_warp_nnz + warpID * 1;
warp_end = (warpID + 1) * n_warp_nnz + (warpID + 1) * 1 - 1;
}else{
warp_start = warpID * n_warp_nnz + change_warp_nnz * 1;
warp_end = (warpID + 1) * n_warp_nnz + change_warp_nnz * 1 - 1;
}
warp_nnz = warp_end - warp_start + 1;
warp_start_row = func_get_row(warp_start, reg_csr);
warp_end_row = func_get_row(warp_end, reg_csr);
// upperbound of needed loop iterations to finish preprocess/multiplication
int ub_steps = (n_warp_nnz + 1 + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
int ub_warp_vals = ub_steps * THREADS_PER_WARP;
int ub_warp_recs = ub_steps + 1;
// actual number of iterations
int n_steps = (warp_nnz + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
// track non-zero/row id in csr
int valID, rowID, count, candi_valID;
// record write-back information
int recID = warpID * ub_warp_recs;
// reduction and/add/select in a warp
int count_res, average_res, candidate_res, stealer_res;
// base address to write in cvr
int warp_gather_base = warpID * ub_warp_vals;
__shared__ int *cur_row, *rec_flag;
bool rec_bit = 0;
// initialize registers and shared arrays
if(0 == threadIdx.x){
cur_row = var_ptr;
rec_flag = &var_ptr[blockDim.x / THREADS_PER_WARP];
}
__syncthreads();
if(0 == laneID){
cur_row[warp_offset] = warp_start_row;
rec_flag[warp_offset] = 0;
reg_cvr.rec_threshold[warpID] = -1;
reg_cvr.threshold_detail[warpID] = 0xffffffff;// initially, no threads can write directly to rec.wb in threshold loop
reg_cvr.warp_nnz[warpID] = warp_nnz;
}
// initialize valID, rowID, count for preprocessing
rowID = atomicAdd(&cur_row[warp_offset], 1);
// empty rows
while(rowID < warp_end_row && reg_csr.row_ptr[rowID+1] == reg_csr.row_ptr[rowID]){
rowID = atomicAdd(&cur_row[warp_offset], 1);
}
if(rowID > warp_end_row){
rowID = -1;
valID = -1;
count = 0;
}else{
valID = reg_csr.row_ptr[rowID];
count = reg_csr.row_ptr[rowID+1] - valID;
if(rowID == warp_start_row){
count = count + valID - warp_start;
valID = warp_start;
}
if(rowID == warp_end_row){
count = warp_end + 1 - valID;
}
}
// IF1: if the number of rows is less than THREADS_PER_WARP, initialize tail_ptr
if(cur_row[warp_offset] > warp_end_row){
if(rowID <= warp_end_row){
reg_cvr.tail[threadID] = rowID;
}
if(rowID != -1){
rowID = threadID;
}
if(0 == laneID){
cur_row[warp_offset] += THREADS_PER_WARP; // ensure IF4 and IF5(ELSE1) will never be executed
reg_cvr.rec_threshold[warpID] = 0;
}
} // END IF1
// FOR1: preprocessing loop
for(int i = 0; i <= n_steps; i++){
// reduce AND
count_res = func_reduceAnd(count > 0);
// IF2: if count in some lane(s) = 0, recording and feeding/stealing is needed
if(0 == count_res){
if(0 == count){
// IF3: recording
if(-1 != valID){
reg_cvr.rec[recID].pos = i - 1;
rec_bit = 1;
reg_cvr.rec[recID].wb[laneID] = rowID;
rec_flag[warp_offset] = 1;
}// END IF3
// omit empty rows and get a new row
rowID = atomicAdd(&cur_row[warp_offset], 1);
while(rowID < warp_end_row && reg_csr.row_ptr[rowID+1] == reg_csr.row_ptr[rowID]){
rowID = atomicAdd(&cur_row[warp_offset], 1);
}
}
// WHILE1: feeding/stealing one by one
while(0 == count_res){
// IF4: tracker feeding
if(cur_row[warp_offset] <= warp_end_row+THREADS_PER_WARP){
if(0 == count && rowID <= warp_end_row){
valID = reg_csr.row_ptr[rowID];
count = reg_csr.row_ptr[rowID+1] - valID;
if(warp_end_row == rowID){
count = warp_end - valID + 1;
}
}
// IF5 & ELSE1
if(cur_row[warp_offset] > warp_end_row){
bool detail_bit = 0;
if(rowID <= warp_end_row){
reg_cvr.tail[threadID] = rowID;
}
if(count == 0 && rowID <= warp_end_row){
detail_bit = 1; // these threads can write to rec.wb directly in threshold loop
}
reg_cvr.threshold_detail[warpID] ^= func_reduceBitOr(detail_bit);
rowID = threadID;
}
}// END IF4
// IF6: set rec_threshold, only executed once
if(-1 == reg_cvr.rec_threshold[warpID] && cur_row[warp_offset] > warp_end_row){
if(0 == laneID){
// make sure once IF6 is executed, IF4 will never be executed
cur_row[warp_offset] += THREADS_PER_WARP;
reg_cvr.rec_threshold[warpID] = i;
}
}// END IF6
// re-calculate count_and after possible tracker feeding
count_res = func_reduceAnd(count > 0);
// IF7: tracker stealing
if(0 == count_res && cur_row[warp_offset] > warp_end_row){
// calculate average count
average_res = func_reduceAvg(count);
// find candidate to steal
candidate_res = func_reduceSel(count > average_res);
// select one lane that need to steal
stealer_res = func_reduceSel(count == 0);
// IF8: if no candidate, padding
if(-1 == candidate_res){
if(stealer_res == laneID){
valID = -1;
count = 1;
}
}else{ // ELSE9, stealing
candi_valID = __shfl(valID, candidate_res);
if(stealer_res == laneID){
rowID = candidate_res + warpID * THREADS_PER_WARP;
valID = candi_valID;
count = average_res;
stealer_res = -1;
}
if(candidate_res == laneID){
rowID = candidate_res + warpID * THREADS_PER_WARP;
valID = valID + average_res;
count = count - average_res;
candidate_res = -1;
}
} // END IF8
} // END IF7
// re-calculate count_and, if = 1, jump out of while loop
count_res = func_reduceAnd(count > 0);
} // END WHILE1
if(1 == rec_flag[warp_offset]){
reg_cvr.rec[recID].mask = func_reduceBitOr(rec_bit);
recID++;
rec_flag[warp_offset] = 0;
rec_bit = 0;
}
} // END IF2
// in the last round of for loop, the only thing need to do is recording
if(i == n_steps){
continue;
}
int addr = warp_gather_base + laneID;
if(-1 == valID){
reg_cvr.val[addr] = 0;
reg_cvr.colidx[addr] = 0;
}else{
reg_cvr.val[addr] = reg_csr.val[valID];
reg_cvr.colidx[addr] = reg_csr.col_idx[valID];
valID++;
}
count--;
warp_gather_base += THREADS_PER_WARP;
} // END FOR1
}
__global__ void spmv_kernel(floatType * const __restrict__ y, floatType * const __restrict__ x){
extern __shared__ floatType shared_y[];
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int warpID = threadID / THREADS_PER_WARP;
int n_warps = (gridDim.x * blockDim.x + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
int laneID = threadID % THREADS_PER_WARP;
unsigned lane_mask = 1 << laneID;
shared_y[threadIdx.x] = 0;
// use register to store cvr members
cvr_t reg_cvr;
reg_cvr.ncol = const_cvr.ncol;
reg_cvr.nrow = const_cvr.nrow;
reg_cvr.nnz = const_cvr.nnz;
reg_cvr.val = const_cvr.val;
reg_cvr.colidx = const_cvr.colidx;
reg_cvr.rec = const_cvr.rec;
reg_cvr.rec_threshold = const_cvr.rec_threshold;
reg_cvr.threshold_detail = const_cvr.threshold_detail;
reg_cvr.tail = const_cvr.tail;
reg_cvr.warp_nnz = const_cvr.warp_nnz;
// upperbound of needed loop iterations to finish preprocess/multiplication
int ub_steps = (reg_cvr.nnz / n_warps + 1 + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
int ub_warp_vals = ub_steps * THREADS_PER_WARP;
int ub_warp_recs = ub_steps + 1;
// actual number of iteration loops
int n_steps = (reg_cvr.warp_nnz[warpID] + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
floatType temp_result = 0;
int valID = warpID * ub_warp_vals + laneID;
int recID = warpID * ub_warp_recs;
int threshold = reg_cvr.rec_threshold[warpID];
int x_addr, writeback, writeback2 = -1;
record_t *rec;
// FOR0
for(int i = 0; i < n_steps; i++){
x_addr = reg_cvr.colidx[valID];
// ******** this is the core multiplication!!!!!!!!! ********
temp_result += reg_cvr.val[valID] * x[x_addr];
rec = ®_cvr.rec[recID];
if(rec->pos == i){
if(0 != (rec->mask & lane_mask)){
writeback = rec->wb[laneID];
if((i < threshold) || (i == threshold && ((reg_cvr.threshold_detail[warpID] & lane_mask) == 0))){
func_floatTypeAtomicAdd(&y[writeback], temp_result);
}else{
func_floatTypeAtomicAdd(&shared_y[writeback%blockDim.x], temp_result);
}
temp_result = 0;
}
recID++;
}
valID += THREADS_PER_WARP;
} // END FOR0
writeback2 = reg_cvr.tail[threadID];
if(writeback2 != -1){
func_floatTypeAtomicAdd(&y[writeback2], shared_y[threadIdx.x]);
}
}
| 836657df7e9de813090a63ada8629c5ff2ab6bfd.cu | /***************************************************
**
** cvr_spmv_gpu.cu: GPU version of CVR spmv
**
** run:
** $ make
** $ ./cvr_spmv_gpu data.txt [#blocks #threads] [#n_iterations]
** data.txt: matrix market format input file
** default parameters: # of blocks and threads per block: autoselect, 1000 iteration
** default compute capability 5.2 (Maxwell)
**
** Default Matrix Market Format store base: 1.
** If your file is 0-based, please change "#define COO_BASE 1" into 0.
** Default float type: double-precision(double).
** If you need single-precision(float) type, please comment "#define DOUBLE".
**
****************************************************/
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<omp.h>
#include<sys/time.h>
#include<cuda_runtime.h>
#include<math.h>
#define OK 0
#define ERROR -1
#define CMP_EQUAL 0
#define FIELD_LENGTH 128
#define COO_BASE 1
#define OMP_THREADS 12
#define THREADS_PER_WARP 32
#define DOUBLE
#ifdef DOUBLE
#define floatType double
#else
#define floatType float
#endif
#define CHECK(call){\
const cudaError_t error = call;\
if(error != cudaSuccess){\
printf("Error: %s:%d\n", __FILE__, __LINE__);\
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error));\
exit(ERROR);\
}\
}
#define HOST_CHECK(ptr){\
if(ptr == NULL){\
printf("Error: %s:%d\n", __FILE__, __LINE__);\
printf("Memory overflow!\n");\
exit(ERROR);\
}\
}
typedef struct triple{
int x;
int y;
floatType val;
}triple_t;
typedef struct coo{
triple_t *triple;
int ncol;
int nrow;
int nnz;
}coo_t; // coordinate format
typedef struct csr{
int ncol;
int nrow;
int nnz;
floatType *val;
int *col_idx;
int *row_ptr;
}csr_t; // compressed sparse row format
typedef struct record{
int pos;
unsigned mask;
int wb[THREADS_PER_WARP];
}record_t;
typedef struct cvr{
int ncol; //number of columns
int nrow; //number of rows
int nnz; //number of non-zeros
floatType *val; //values stored in cvr-special order
int *colidx; //column numbers corresponding to values
//values in cvr are re-ordered for performance, following elements are used to record how to write to vector y(in spmv: y=Wx)
record_t *rec; //records of write-back information
int *rec_threshold; //i don't know how to describe this, if you've read the paper, this is lr_rec in the paper
int *threshold_detail; //this is new, because threshold is a bit more complicated in GPU version
int *tail; //the last line number(e.g. write-back position, think about it) of each lane(or thread as you like)
int *warp_nnz;
}cvr_t; // compressed vactorization-oriented sparse row format
// auxiliary function used in qsort
inline int func_cmp(const void *a, const void *b){
triple_t *t1 = (triple_t *)a;
triple_t *t2 = (triple_t *)b;
if(t1->x != t2->x){
return t1->x - t2->x;
}else{
return t1->y - t2->y;
}
}
// auxiliary function to get row number, binary search
__forceinline__ __device__ int func_get_row(int const valID, csr_t const csr){
int start = 0, end = csr.nrow;
int mid = (start + end) / 2;
while(start <= end){
if(csr.row_ptr[mid] > valID){
end = mid - 1;
}else if(mid < csr.nrow && csr.row_ptr[mid+1] <= valID){
start = mid + 1;
}else{
while(mid < csr.nrow && csr.row_ptr[mid] == csr.row_ptr[mid+1]){
mid++;
}
return mid;
}
mid = (start + end) / 2;
}
printf("*** ERROR: a bug occured in func_get_row ***\n");
return ERROR;
}
// auxiliary function to implement atomic add, GPUs whose compute capability is lower than 6.0 do not support atomic add for double variables
__forceinline__ __device__ floatType func_floatTypeAtomicAdd(floatType const *address, floatType const val){
#if __CUDA_ARCH__ >= 600
return atomicAdd(address, val);
#else
#ifdef DOUBLE
unsigned long long int *address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
}while(assumed != old);
return __longlong_as_double(old);
#else
return atomicAdd(address, val);
#endif
#endif
}
// auxiliary function for reducing AND
__forceinline__ __device__ bool func_reduceAnd(bool count){
bool count_res = count;
int step = THREADS_PER_WARP / 2;
#pragma unroll
for(int i = 0; i < 5; i++){
count_res &= __shfl_down(count_res, step);
step /= 2;
}
count_res = __shfl(count_res, 0);
return count_res;
}
// auxiliary function for reducing OR
__forceinline__ __device__ unsigned func_reduceBitOr(bool src){
unsigned bitmap = src << (threadIdx.x % THREADS_PER_WARP);
int step = THREADS_PER_WARP / 2;
#pragma unroll
for(int i = 0; i < 5; i++){
bitmap |= __shfl_down(bitmap, step);
step /= 2;
}
bitmap = __shfl(bitmap, 0);
return bitmap;
}
// auxiliary function for reducing average
__forceinline__ __device__ int func_reduceAvg(int num){
int avg_res = num;
int step = THREADS_PER_WARP / 2;
#pragma unroll
for(int i = 0; i < 5; i++){
avg_res += __shfl_down(avg_res, step);
step /= 2;
}
avg_res = (__shfl(avg_res, 0) + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
return avg_res;
}
// auxiliary function for reducing selection
__forceinline__ __device__ int func_reduceSel(bool sel){
unsigned sel_bitmap = func_reduceBitOr(sel);
#pragma unroll
for(int i = 0; i < THREADS_PER_WARP; i++){
if(sel_bitmap & 1){
return i;
}else{
sel_bitmap >>= 1;
}
}
return -1;
}
// auxiliary function to compare result
inline int func_compare(floatType y, floatType y_verify){
if(abs(y-y_verify) > 0.00001){
return 1;
}else{
return 0;
}
}
// auxiliary function to initialize vector x(in spmv y=Wx)
inline void func_initialData(floatType *ip, int size){
time_t t;
srand((unsigned)time(&t));
for(int i = 0; i < size; i++) {
//ip[i] = (floatType)(rand() & 0xff) / 10.0f;
//ip[i] = i % 10;
ip[i] = 1;
}
}
// COO_BASE-based Matrix Market format -> CSR format
int read_matrix(csr_t *csr, char *filename);
// CSR format -> CVR format
int preprocess();
// CVR format SpMV, y = y + M * x
int spmv(floatType *d_y, floatType *d_x);
__global__ void preprocess_kernel();
__global__ void spmv_kernel(floatType * const __restrict__ y, floatType * const __restrict__ x);
// In this implementation, only one dimension is used for intuition
int grid_dim = 32;
int block_dim = 64;
int n_iterations = 1000;
__constant__ csr_t const_csr;
__constant__ cvr_t const_cvr;
int main(int argc, char **argv){
/**** runtime configuration ****/
if(argc < 2){
printf("ERROR: *** wrong parameter format ***\n");
return ERROR;
}
char *filename = argv[1];
/**** \runtime configuration ****/
/**** prepare host_csr ****/
//allocate memory
csr_t *h_csr = (csr_t *)malloc(sizeof(csr_t));
HOST_CHECK(h_csr);
//read matrix to initialize
if(read_matrix(h_csr, filename)){
printf("ERROR occured in function read_matrix()\n");
return ERROR;
}
printf("Matrix: (%d, %d), %d non-zeros.\n", h_csr->nrow, h_csr->ncol, h_csr->nnz);
/**** \prepare host_csr ****/
int total_threads = floor(0.0591 * h_csr->nnz + 116038);
int floor1 = floor(24.094 * pow(h_csr->nrow, 0.2423));
block_dim = min(1024, max(64, floor1 - floor1 % 32));
grid_dim = min(1024, max(32, total_threads / block_dim + total_threads / block_dim % 2));
if(argc == 3){
n_iterations = atoi(argv[2]);
}else if(argc == 4){
grid_dim = atoi(argv[2]);
block_dim = atoi(argv[3]);
}else if(argc == 5){
grid_dim = atoi(argv[2]);
block_dim = atoi(argv[3]);
n_iterations = atoi(argv[4]);
}
printf("Matrix file:%s. Execution config:<<<%d,%d>>>. Iterations:%d.\n", filename, grid_dim, block_dim, n_iterations);
/**** prepare device_csr ****/
csr_t temp_csr;
//allocate device global memory
temp_csr.ncol = h_csr->ncol;
temp_csr.nrow = h_csr->nrow;
temp_csr.nnz = h_csr->nnz;
CHECK(cudaMalloc(&temp_csr.val, h_csr->nnz * sizeof(floatType)));
CHECK(cudaMalloc(&temp_csr.col_idx, h_csr->nnz * sizeof(int)));
CHECK(cudaMalloc(&temp_csr.row_ptr, (h_csr->nrow + 1) * sizeof(int)));
//initialize, device addresses like d_csr->val can't be accessed directly
CHECK(cudaMemcpy(temp_csr.val, h_csr->val, h_csr->nnz * sizeof(floatType), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(temp_csr.col_idx, h_csr->col_idx, h_csr->nnz * sizeof(int), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(temp_csr.row_ptr, h_csr->row_ptr, (h_csr->nrow + 1) * sizeof(int), cudaMemcpyHostToDevice));
CHECK(cudaMemcpyToSymbol(const_csr, &temp_csr, sizeof(csr_t)));
/**** \prepare device_csr ****/
/**** prepare host_x, device_x, host_y, device_y and verify_y ****/
//allocate memory
floatType *h_x, *h_y, *y_verify, *d_x, *d_y;
h_x = (floatType *)malloc(h_csr->ncol * sizeof(floatType));
h_y = (floatType *)malloc(h_csr->nrow * sizeof(floatType));
y_verify = (floatType *)malloc(h_csr->nrow * sizeof(floatType));
HOST_CHECK(h_x);
HOST_CHECK(h_y);
HOST_CHECK(y_verify);
CHECK(cudaMalloc(&d_x, h_csr->ncol * sizeof(floatType)));
CHECK(cudaMalloc(&d_y, h_csr->nrow * sizeof(floatType)));
//initialize
func_initialData(h_x, h_csr->ncol);
memset(h_y, 0, h_csr->nrow * sizeof(floatType));
memset(y_verify, 0, h_csr->nrow * sizeof(floatType));
CHECK(cudaMemcpy(d_x, h_x, h_csr->ncol * sizeof(floatType), cudaMemcpyHostToDevice));
CHECK(cudaMemset(d_y, 0, h_csr->nrow * sizeof(floatType)));
/**** \prepare host_x, device_x, host_y, device_y and verify_y ****/
cvr_t temp_cvr;
//cvr structure is dependent on matrix and runtime configuration
int n_threads = grid_dim * block_dim;
int n_warps = n_threads / THREADS_PER_WARP;
// average number of non-zeros per warp
int n_warp_nnz = h_csr->nnz / n_warps;
// upperbound of needed loop iterations to finish preprocess/multiplication
int ub_steps = (n_warp_nnz + 1 + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
int ub_warp_vals = ub_steps * THREADS_PER_WARP;
int ub_warp_recs = ub_steps + 1;
//allocate device global memory
temp_cvr.ncol = h_csr->ncol;
temp_cvr.nrow = h_csr->nrow;
temp_cvr.nnz = h_csr->nnz;
CHECK(cudaMalloc(&temp_cvr.val, n_warps * ub_warp_vals * sizeof(floatType)));
CHECK(cudaMalloc(&temp_cvr.colidx, n_warps * ub_warp_vals * sizeof(int)));
CHECK(cudaMalloc(&temp_cvr.rec, n_warps * ub_warp_recs * sizeof(record_t)));
CHECK(cudaMalloc(&temp_cvr.rec_threshold, n_warps * sizeof(int)));
CHECK(cudaMalloc(&temp_cvr.threshold_detail, n_warps * sizeof(int)));
CHECK(cudaMalloc(&temp_cvr.tail, n_threads * sizeof(int)));
CHECK(cudaMalloc(&temp_cvr.warp_nnz, n_warps * sizeof(int)));
//initialize
CHECK(cudaMemset(temp_cvr.tail, -1, n_threads * sizeof(int)));
CHECK(cudaMemcpyToSymbol(const_cvr, &temp_cvr, sizeof(cvr_t)));
// warming up
if(preprocess()){
printf("ERROR occured while warming up\n");
return ERROR;
}
/**** preprocess time ****/
struct timeval tv1, tv2;
double tv_diff1, tv_diff2;
gettimeofday(&tv1, NULL);
// PREPROCESS KERNEL
if(preprocess()){
printf("ERROR occured in function preprocess()\n");
return ERROR;
}
gettimeofday(&tv2, NULL);
tv_diff1 = (tv2.tv_sec - tv1.tv_sec) * 1000000 + tv2.tv_usec - tv1.tv_usec;
printf("preprocess time: %lfms\n", tv_diff1/1000.0);
/**** \preprocess time ****/
/**** spmv time ****/
gettimeofday(&tv1, NULL);
// SPMV KERNEL, deciding which branch to take here for performance considering
if(spmv(d_y, d_x)){
printf("ERROR occured in function spmv()\n");
return ERROR;
}
gettimeofday(&tv2, NULL);
tv_diff2 = (tv2.tv_sec - tv1.tv_sec) * 1000000 + tv2.tv_usec - tv1.tv_usec;
printf("spmv time: %lfms\n", tv_diff2/1000.0/n_iterations);
/**** \spmv time ****/
/**** copy back ****/
CHECK(cudaMemcpy(h_y, d_y, h_csr->nrow * sizeof(floatType), cudaMemcpyDeviceToHost));
/**** \copy back ****/
/**** free device memory ****/
CHECK(cudaFree(d_x));
CHECK(cudaFree(d_y));
CHECK(cudaFree(temp_cvr.val));
CHECK(cudaFree(temp_cvr.colidx));
CHECK(cudaFree(temp_cvr.rec));
CHECK(cudaFree(temp_cvr.rec_threshold));
CHECK(cudaFree(temp_cvr.threshold_detail));
CHECK(cudaFree(temp_cvr.tail));
CHECK(cudaFree(temp_cvr.warp_nnz));
CHECK(cudaFree(temp_csr.val));
CHECK(cudaFree(temp_csr.col_idx));
CHECK(cudaFree(temp_csr.row_ptr));
/**** \free device memory ****/
/**** compute y_verify using csr spmv ****/
gettimeofday(&tv1, NULL);
for(int iteration = 0; iteration < n_iterations; iteration++){
#pragma omp parallel for num_threads(OMP_THREADS)
for(int i = 0; i < h_csr->nrow; i++){
floatType sum = 0;
for(int j = h_csr->row_ptr[i]; j < h_csr->row_ptr[i+1]; j++){
sum += h_csr->val[j] * h_x[h_csr->col_idx[j]];
}
y_verify[i] += sum;
}
}
gettimeofday(&tv2, NULL);
tv_diff2 = (tv2.tv_sec - tv1.tv_sec) * 1000000 + tv2.tv_usec - tv1.tv_usec;
printf("cpu_spmv time: %lfms\n", tv_diff2/1000.0);
/**** \compute y_verify using csr spmv ****/
/**** check the result ****/
int count = 0;
floatType y1 = 0, y2 = 0;
for(int i = 0; i < h_csr->nrow; i++){
if(func_compare(h_y[i], y_verify[i]) != CMP_EQUAL){
y1 += h_y[i];
y2 += y_verify[i];
count++;
if(count <= 10){
#ifdef DOUBLE
printf("y[%d] should be %lf, but the result is %lf\n", i, y_verify[i], h_y[i]);
#else
printf("y[%d] should be %f, but the result is %f\n", i, y_verify[i], h_y[i]);
#endif
}
}
}
if(0 == count){
printf("Correct\n\n");
}else{
#ifdef DOUBLE
printf("count=%d, y_sum=%lf, y_v_sum=%lf\n", count, y1, y2);
#else
printf("count=%d, y_sum=%f, y_v_sum=%f\n", count, y1, y2);
#endif
}
/**** \check the result ****/
/**** free host memory ****/
free(h_x);
free(h_y);
free(y_verify);
free(h_csr->val);
free(h_csr->col_idx);
free(h_csr->row_ptr);
free(h_csr);
/**** \free host memory ****/
return 0;
}
/*
** function: read_matrix()
** programmer: Lukasz Wesolowski
** creation: July 2, 2010
** read matrix from MMF file and covert it to csr format
** parameters:
** csr_t *csr allocated csr_t pointer
** char *filename Matrix Market Format file
*/
int read_matrix(csr_t *csr, char *filename){
FILE *fp = fopen(filename, "r");
if(!fp){
printf("ERROR: *** cannot open file: %s ***\n", filename);
return ERROR;
}
char buffer[1024];
char id[FIELD_LENGTH], object[FIELD_LENGTH], format[FIELD_LENGTH], field[FIELD_LENGTH], symmetry[FIELD_LENGTH];
int field_pattern = 0, field_complex = 0, symmetry_symmetric = 0;
//read the header of Matrix Market Format
if(fgets(buffer, sizeof(buffer), fp)){
sscanf(buffer, "%s %s %s %s %s", id, object, format, field, symmetry);
}else{
printf("ERROR: *** empty file: %s ***\n", filename);
return ERROR;
}
//check stored object and format
if(strcmp(object, "matrix")){
printf("ERROR: *** file %s does not store a matrix ***\n", filename);
return ERROR;
}
if(strcmp(format, "coordinate")){
printf("ERROR: *** matrix representation is dense ***\n");
return ERROR;
}
//specific matrix
if(0 == strcmp(field, "pattern")){
field_pattern = 1;
}
if(0 == strcmp(field, "complex")){
field_complex = 1;
}
if(0 == strcmp(symmetry, "symmetric")){
symmetry_symmetric = 1;
}
//omit comments
while(!feof(fp)){
fgets(buffer, sizeof(buffer), fp);
if('%' != buffer[0]){
break;
}
}
//number of rows, columns and non-zeros in matrix
coo_t coo;
sscanf(buffer, "%d %d %d", &coo.nrow, &coo.ncol, &coo.nnz);
if(symmetry_symmetric){
coo.nnz *= 2;
}
coo.triple = (triple_t *)malloc(coo.nnz * sizeof(triple_t)); //this pointer is useless out of this function. remember to free it.
HOST_CHECK(coo.triple);
//MMF -> coordinate format
int i = 0;
if(symmetry_symmetric){
if(field_pattern){
for(i = 0; i < coo.nnz; i++){
fgets(buffer, sizeof(buffer), fp);
sscanf(buffer, "%d %d", &coo.triple[i].x, &coo.triple[i].y);
coo.triple[i].val = 1;
if(coo.triple[i].x != coo.triple[i].y){
coo.triple[i+1].x = coo.triple[i].y;
coo.triple[i+1].y = coo.triple[i].x;
coo.triple[i+1].val = 1;
i++;
}
}
}else if(field_complex){
floatType im;
for(i = 0; i < coo.nnz; i++){
fgets(buffer, sizeof(buffer), fp);
#ifdef DOUBLE
sscanf(buffer, "%d %d %lf %lf", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val, &im);
#else
sscanf(buffer, "%d %d %f %f", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val, &im);
#endif
if(coo.triple[i].x != coo.triple[i].y){
coo.triple[i+1].x = coo.triple[i].y;
coo.triple[i+1].y = coo.triple[i].x;
coo.triple[i+1].val = coo.triple[i].val;
i++;
}
}
}else{
for(i = 0; i < coo.nnz; i++){
fgets(buffer, sizeof(buffer), fp);
#ifdef DOUBLE
sscanf(buffer, "%d %d %lf", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val);
#else
sscanf(buffer, "%d %d %f", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val);
#endif
if(coo.triple[i].x != coo.triple[i].y){
coo.triple[i+1].x = coo.triple[i].y;
coo.triple[i+1].y = coo.triple[i].x;
coo.triple[i+1].val = coo.triple[i].val;
i++;
}
}
}
}else{ // if it is not a symmetric matrix
if(field_pattern){
for(i = 0; i < coo.nnz; i++){
fgets(buffer, sizeof(buffer), fp);
sscanf(buffer, "%d %d", &coo.triple[i].x, &coo.triple[i].y);
coo.triple[i].val = 1;
}
}else if(field_complex){
floatType im;
for(i = 0; i < coo.nnz; i++){
fgets(buffer, sizeof(buffer), fp);
#ifdef DOUBLE
sscanf(buffer, "%d %d %lf %lf", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val, &im);
#else
sscanf(buffer, "%d %d %f %f", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val, &im);
#endif
}
}else{
for(i = 0; i < coo.nnz; i++){
fgets(buffer, sizeof(buffer), fp);
#ifdef DOUBLE
sscanf(buffer, "%d %d %lf", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val);
#else
sscanf(buffer, "%d %d %f", &coo.triple[i].x, &coo.triple[i].y, &coo.triple[i].val);
#endif
}
}
}
fclose(fp);
if(i > coo.nnz){
printf("ERROR: *** too many matrix elements occered ***\n");
return ERROR;
}
coo.nnz = i;
//COO -> CSR
csr->ncol = coo.ncol;
csr->nrow = coo.nrow;
csr->nnz = coo.nnz;
csr->val = (floatType *)malloc(csr->nnz * sizeof(floatType));
HOST_CHECK(csr->val);
csr->col_idx = (int *)malloc(csr->nnz * sizeof(int));
HOST_CHECK(csr->col_idx);
csr->row_ptr = (int *)malloc((csr->nrow + 1) * sizeof(int));
HOST_CHECK(csr->row_ptr);
qsort(coo.triple, coo.nnz, sizeof(triple_t), func_cmp);
csr->row_ptr[0] = 0;
int r = 0;
for(i = 0; i < csr->nnz; i++){
while(coo.triple[i].x - COO_BASE != r){
csr->row_ptr[++r] = i;
}
csr->val[i] = coo.triple[i].val;
csr->col_idx[i] = coo.triple[i].y - COO_BASE;
}
while(r < csr->nrow){
csr->row_ptr[++r] = i;
}
free(coo.triple);
return OK;
}
/*
** function: preprocess()
** convert csr format to cvr format
*/
int preprocess(){
int warps_per_block = block_dim / THREADS_PER_WARP;
//shared memory allocate for cur_row[] and reg_flag[]
preprocess_kernel<<<grid_dim, block_dim, 2 * warps_per_block * sizeof(int)>>>();
CHECK(cudaGetLastError());
CHECK(cudaDeviceSynchronize());
return OK;
}
/*
** function: spmv()
** sparse matrix-vector multiplication using cvr format
** parameters:
** floatType *d_y allocated pointer(device) to store result y
** floatType *d_x initialized pointer(device) to store vector x
*/
int spmv(floatType *d_y, floatType *d_x){
int iteration;
for(iteration = 0; iteration < n_iterations; iteration++){
//shared memory allocate for temp result (reducing global write)
spmv_kernel<<<grid_dim, block_dim, block_dim * sizeof(floatType)>>>(d_y, d_x);
CHECK(cudaGetLastError());
}
CHECK(cudaDeviceSynchronize());
return OK;
}
__global__ void preprocess_kernel(){
extern __shared__ int var_ptr[];
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
// current warp id in global vision
int warpID = threadID / THREADS_PER_WARP;
int laneID = threadID % THREADS_PER_WARP;
// current warp id in this block
int warp_offset = threadIdx.x / THREADS_PER_WARP;
int n_warps = (gridDim.x * blockDim.x + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
// use register to store csr members for pointer reuse
csr_t reg_csr;
reg_csr.ncol = const_csr.ncol;
reg_csr.nrow = const_csr.nrow;
reg_csr.nnz = const_csr.nnz;
reg_csr.val = const_csr.val;
reg_csr.col_idx = const_csr.col_idx;
reg_csr.row_ptr = const_csr.row_ptr;
// use register to store cvr members
cvr_t reg_cvr;
reg_cvr.ncol = const_cvr.ncol;
reg_cvr.nrow = const_cvr.nrow;
reg_cvr.nnz = const_cvr.nnz;
reg_cvr.val = const_cvr.val;
reg_cvr.colidx = const_cvr.colidx;
reg_cvr.rec = const_cvr.rec;
reg_cvr.rec_threshold = const_cvr.rec_threshold;
reg_cvr.threshold_detail = const_cvr.threshold_detail;
reg_cvr.tail = const_cvr.tail;
reg_cvr.warp_nnz = const_cvr.warp_nnz;
// non-zero id
int warp_start, warp_end;
int warp_nnz;
int warp_start_row, warp_end_row;
// average number of non-zeros in a warp
int n_warp_nnz = reg_csr.nnz / n_warps;
// a few warps have one more non-zero to deal with
int change_warp_nnz = reg_csr.nnz % n_warps;
// information about row range and non-zeros in this warp
if(warpID < change_warp_nnz){
warp_start = warpID * n_warp_nnz + warpID * 1;
warp_end = (warpID + 1) * n_warp_nnz + (warpID + 1) * 1 - 1;
}else{
warp_start = warpID * n_warp_nnz + change_warp_nnz * 1;
warp_end = (warpID + 1) * n_warp_nnz + change_warp_nnz * 1 - 1;
}
warp_nnz = warp_end - warp_start + 1;
warp_start_row = func_get_row(warp_start, reg_csr);
warp_end_row = func_get_row(warp_end, reg_csr);
// upperbound of needed loop iterations to finish preprocess/multiplication
int ub_steps = (n_warp_nnz + 1 + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
int ub_warp_vals = ub_steps * THREADS_PER_WARP;
int ub_warp_recs = ub_steps + 1;
// actual number of iterations
int n_steps = (warp_nnz + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
// track non-zero/row id in csr
int valID, rowID, count, candi_valID;
// record write-back information
int recID = warpID * ub_warp_recs;
// reduction and/add/select in a warp
int count_res, average_res, candidate_res, stealer_res;
// base address to write in cvr
int warp_gather_base = warpID * ub_warp_vals;
__shared__ int *cur_row, *rec_flag;
bool rec_bit = 0;
// initialize registers and shared arrays
if(0 == threadIdx.x){
cur_row = var_ptr;
rec_flag = &var_ptr[blockDim.x / THREADS_PER_WARP];
}
__syncthreads();
if(0 == laneID){
cur_row[warp_offset] = warp_start_row;
rec_flag[warp_offset] = 0;
reg_cvr.rec_threshold[warpID] = -1;
reg_cvr.threshold_detail[warpID] = 0xffffffff;// initially, no threads can write directly to rec.wb in threshold loop
reg_cvr.warp_nnz[warpID] = warp_nnz;
}
// initialize valID, rowID, count for preprocessing
rowID = atomicAdd(&cur_row[warp_offset], 1);
// empty rows
while(rowID < warp_end_row && reg_csr.row_ptr[rowID+1] == reg_csr.row_ptr[rowID]){
rowID = atomicAdd(&cur_row[warp_offset], 1);
}
if(rowID > warp_end_row){
rowID = -1;
valID = -1;
count = 0;
}else{
valID = reg_csr.row_ptr[rowID];
count = reg_csr.row_ptr[rowID+1] - valID;
if(rowID == warp_start_row){
count = count + valID - warp_start;
valID = warp_start;
}
if(rowID == warp_end_row){
count = warp_end + 1 - valID;
}
}
// IF1: if the number of rows is less than THREADS_PER_WARP, initialize tail_ptr
if(cur_row[warp_offset] > warp_end_row){
if(rowID <= warp_end_row){
reg_cvr.tail[threadID] = rowID;
}
if(rowID != -1){
rowID = threadID;
}
if(0 == laneID){
cur_row[warp_offset] += THREADS_PER_WARP; // ensure IF4 and IF5(ELSE1) will never be executed
reg_cvr.rec_threshold[warpID] = 0;
}
} // END IF1
// FOR1: preprocessing loop
for(int i = 0; i <= n_steps; i++){
// reduce AND
count_res = func_reduceAnd(count > 0);
// IF2: if count in some lane(s) = 0, recording and feeding/stealing is needed
if(0 == count_res){
if(0 == count){
// IF3: recording
if(-1 != valID){
reg_cvr.rec[recID].pos = i - 1;
rec_bit = 1;
reg_cvr.rec[recID].wb[laneID] = rowID;
rec_flag[warp_offset] = 1;
}// END IF3
// omit empty rows and get a new row
rowID = atomicAdd(&cur_row[warp_offset], 1);
while(rowID < warp_end_row && reg_csr.row_ptr[rowID+1] == reg_csr.row_ptr[rowID]){
rowID = atomicAdd(&cur_row[warp_offset], 1);
}
}
// WHILE1: feeding/stealing one by one
while(0 == count_res){
// IF4: tracker feeding
if(cur_row[warp_offset] <= warp_end_row+THREADS_PER_WARP){
if(0 == count && rowID <= warp_end_row){
valID = reg_csr.row_ptr[rowID];
count = reg_csr.row_ptr[rowID+1] - valID;
if(warp_end_row == rowID){
count = warp_end - valID + 1;
}
}
// IF5 & ELSE1
if(cur_row[warp_offset] > warp_end_row){
bool detail_bit = 0;
if(rowID <= warp_end_row){
reg_cvr.tail[threadID] = rowID;
}
if(count == 0 && rowID <= warp_end_row){
detail_bit = 1; // these threads can write to rec.wb directly in threshold loop
}
reg_cvr.threshold_detail[warpID] ^= func_reduceBitOr(detail_bit);
rowID = threadID;
}
}// END IF4
// IF6: set rec_threshold, only executed once
if(-1 == reg_cvr.rec_threshold[warpID] && cur_row[warp_offset] > warp_end_row){
if(0 == laneID){
// make sure once IF6 is executed, IF4 will never be executed
cur_row[warp_offset] += THREADS_PER_WARP;
reg_cvr.rec_threshold[warpID] = i;
}
}// END IF6
// re-calculate count_and after possible tracker feeding
count_res = func_reduceAnd(count > 0);
// IF7: tracker stealing
if(0 == count_res && cur_row[warp_offset] > warp_end_row){
// calculate average count
average_res = func_reduceAvg(count);
// find candidate to steal
candidate_res = func_reduceSel(count > average_res);
// select one lane that need to steal
stealer_res = func_reduceSel(count == 0);
// IF8: if no candidate, padding
if(-1 == candidate_res){
if(stealer_res == laneID){
valID = -1;
count = 1;
}
}else{ // ELSE9, stealing
candi_valID = __shfl(valID, candidate_res);
if(stealer_res == laneID){
rowID = candidate_res + warpID * THREADS_PER_WARP;
valID = candi_valID;
count = average_res;
stealer_res = -1;
}
if(candidate_res == laneID){
rowID = candidate_res + warpID * THREADS_PER_WARP;
valID = valID + average_res;
count = count - average_res;
candidate_res = -1;
}
} // END IF8
} // END IF7
// re-calculate count_and, if = 1, jump out of while loop
count_res = func_reduceAnd(count > 0);
} // END WHILE1
if(1 == rec_flag[warp_offset]){
reg_cvr.rec[recID].mask = func_reduceBitOr(rec_bit);
recID++;
rec_flag[warp_offset] = 0;
rec_bit = 0;
}
} // END IF2
// in the last round of for loop, the only thing need to do is recording
if(i == n_steps){
continue;
}
int addr = warp_gather_base + laneID;
if(-1 == valID){
reg_cvr.val[addr] = 0;
reg_cvr.colidx[addr] = 0;
}else{
reg_cvr.val[addr] = reg_csr.val[valID];
reg_cvr.colidx[addr] = reg_csr.col_idx[valID];
valID++;
}
count--;
warp_gather_base += THREADS_PER_WARP;
} // END FOR1
}
__global__ void spmv_kernel(floatType * const __restrict__ y, floatType * const __restrict__ x){
extern __shared__ floatType shared_y[];
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int warpID = threadID / THREADS_PER_WARP;
int n_warps = (gridDim.x * blockDim.x + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
int laneID = threadID % THREADS_PER_WARP;
unsigned lane_mask = 1 << laneID;
shared_y[threadIdx.x] = 0;
// use register to store cvr members
cvr_t reg_cvr;
reg_cvr.ncol = const_cvr.ncol;
reg_cvr.nrow = const_cvr.nrow;
reg_cvr.nnz = const_cvr.nnz;
reg_cvr.val = const_cvr.val;
reg_cvr.colidx = const_cvr.colidx;
reg_cvr.rec = const_cvr.rec;
reg_cvr.rec_threshold = const_cvr.rec_threshold;
reg_cvr.threshold_detail = const_cvr.threshold_detail;
reg_cvr.tail = const_cvr.tail;
reg_cvr.warp_nnz = const_cvr.warp_nnz;
// upperbound of needed loop iterations to finish preprocess/multiplication
int ub_steps = (reg_cvr.nnz / n_warps + 1 + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
int ub_warp_vals = ub_steps * THREADS_PER_WARP;
int ub_warp_recs = ub_steps + 1;
// actual number of iteration loops
int n_steps = (reg_cvr.warp_nnz[warpID] + THREADS_PER_WARP - 1) / THREADS_PER_WARP;
floatType temp_result = 0;
int valID = warpID * ub_warp_vals + laneID;
int recID = warpID * ub_warp_recs;
int threshold = reg_cvr.rec_threshold[warpID];
int x_addr, writeback, writeback2 = -1;
record_t *rec;
// FOR0
for(int i = 0; i < n_steps; i++){
x_addr = reg_cvr.colidx[valID];
// ******** this is the core multiplication!!!!!!!!! ********
temp_result += reg_cvr.val[valID] * x[x_addr];
rec = ®_cvr.rec[recID];
if(rec->pos == i){
if(0 != (rec->mask & lane_mask)){
writeback = rec->wb[laneID];
if((i < threshold) || (i == threshold && ((reg_cvr.threshold_detail[warpID] & lane_mask) == 0))){
func_floatTypeAtomicAdd(&y[writeback], temp_result);
}else{
func_floatTypeAtomicAdd(&shared_y[writeback%blockDim.x], temp_result);
}
temp_result = 0;
}
recID++;
}
valID += THREADS_PER_WARP;
} // END FOR0
writeback2 = reg_cvr.tail[threadID];
if(writeback2 != -1){
func_floatTypeAtomicAdd(&y[writeback2], shared_y[threadIdx.x]);
}
}
|
f59fd0d32fce50c277f96d5019ee465ed8ac058c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matrix.hu"
#include <stdio.h>
#include <stdlib.h>
#include "errorMacros.hu"
#include "cuMatrix.hu"
void Matrix::fill()
{
unsigned int i, num;
int divide;
FILE *f;
f=fopen("/dev/urandom", "r");
ASSERT(f);
for(i=0; i< w*w; i++)
{
fread(&num, sizeof(unsigned int), 1, f);
fread(÷, sizeof(int), 1, f);
h[i] = ((float)num)/((float)divide);
}
CHECK(fclose(f));
// sync matrix and host memories
copyHtoD();
}
void Matrix::print()
{
unsigned int x, y;
char answer;
printf("\n");
if(w>MAX_SANE_WIDTH)
{
printf("Matrix is very big, %dx%d. Are you sure you want to print it? [y/n] ", w, w);
while(1)
{
scanf("%c", &answer);
if(answer=='n')
return;
if(answer=='y')
break;
printf("Could not understand input. Please type 'y' or 'n'. ");
}
}
if(hasBeenTouched())
copyDtoH();
if(!isLU())
{
for(y=0; y < w; y++)
{
for(x=0; x < w; x++)
printf("%f ", h[y*w+x]);
printf("\n");
}
printf("\n");
return;
}
//print the L and the U matrices
printf("Lower part\n");
for(y=0; y < w; y++)
{
for(x=0; x < w; x++)
{
if(x<y)
printf("%f ", h[y*w+x]);
else if(x==y)
printf("%f ", 1.0);
else
printf("%f ", 0.0);
}
printf("\n");
}
printf("\nUpper part\n");
for(y=0; y < w; y++)
{
for(x=0; x < w; x++)
{
if(x>=y)
printf("%f ", h[y*w+x]);
else
printf("%f ", 0.0);
}
printf("\n");
}
printf("\n");
}
Matrix::Matrix(unsigned int width) :
w(width), touched(false), _isLU(false)
{
size_t size = width*width*sizeof(float);
if(hipMalloc((void **)&d, size)!=hipSuccess)
SPIT("Failed to allocate device array\n");
h = (float *) malloc(size);
if(!h)
{
hipFree(d);
SPIT("Failed to allocate host array\n");
}
}
Matrix *Matrix::copy()
{
size_t size = w*w*sizeof(float);
Matrix *m = new Matrix(w);
memcpy((void *)m->getH(), (const void *)h, size);
CHECK_SUCCESS(hipMemcpy(m->getD(), d, size, hipMemcpyDeviceToDevice));
m->setLU(isLU());
return m;
}
Matrix::~Matrix()
{
free(h);
hipFree(d);
}
void Matrix::decomposeLU()
{
// doolittle algorithm
unsigned int x, y, p;
dim3 dimGrid(1, 1);
dim3 dimBlock(1, w-1);
hipLaunchKernelGGL(( _prepareLeftColLU), dim3(dimGrid), dim3(dimBlock), 0, 0, d, w);
copyDtoH();
dimBlock = dim3(w-1, 1);
hipLaunchKernelGGL(( _makeLURow), dim3(dimGrid), dim3(dimBlock), 0, 0, d, 1, w+1, w);
//copy recent changes to host
CHECK_SUCCESS(hipMemcpy(&h[w+1], &d[w+1], (w-1)*sizeof(float), hipMemcpyDeviceToHost));
for(y=2; y<w; y++)
{
for(x=1; x<y; x++)
{
for(p=0; p<x; p++)
h[y*w+x] -= h[y*w+p]*h[p*w+x];
h[y*w+x] /= h[x*w+x];
}
//copy recent changes to device
CHECK_SUCCESS(hipMemcpy(&d[y*w+1], &h[y*w+1], (y-1)*sizeof(float), hipMemcpyHostToDevice));
dimBlock = dim3(w-y, 1);
hipLaunchKernelGGL(( _makeLURow), dim3(dimGrid), dim3(dimBlock), 0, 0, d, y, y*w+y, w);
//copy recent changes to host
CHECK_SUCCESS(hipMemcpy(&h[y*w+y], &d[y*w+y], (w-y)*sizeof(float), hipMemcpyDeviceToHost));
}
setLU(true);
}
void Matrix::multiply(Matrix *a, Matrix *b)
{
if(w != a->getW() || w != b->getW())
SPIT("Matrices must be of the same size\n");
dim3 dimGrid(1, 1);
dim3 dimBlock(w, w);
hipLaunchKernelGGL(( _matMultiply), dim3(dimGrid), dim3(dimBlock), 0, 0, d, a->getD(), b->getD(), w);
setLU(false);
touch();
}
void Matrix::multiplyLU()
{
float *dcopy;
size_t size = w*w*sizeof(float);
dim3 dimGrid(1, 1);
dim3 dimBlock(w, w);
CHECK_SUCCESS(hipMalloc((void **)&dcopy, size));
CHECK_SUCCESS(hipMemcpy(dcopy, d, size, hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( _matMultiplyLU), dim3(dimGrid), dim3(dimBlock), 0, 0, d, dcopy, w);
hipFree(dcopy);
setLU(false);
touch();
}
void Matrix::multiplyUL()
{
float *dcopy;
size_t size = w*w*sizeof(float);
dim3 dimGrid(1, 1);
dim3 dimBlock(w, w);
CHECK_SUCCESS(hipMalloc((void **)&dcopy, size));
CHECK_SUCCESS(hipMemcpy(dcopy, d, size, hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( _matMultiplyUL), dim3(dimGrid), dim3(dimBlock), 0, 0, d, dcopy, w);
hipFree(dcopy);
setLU(false);
touch();
}
bool Matrix::isDifferent(Matrix *m)
{
bool result, *dev_result;
dim3 dimGrid(1, 1);
dim3 dimBlock(w, w);
CHECK_SUCCESS(hipMalloc((void **)&dev_result, sizeof(bool)));
result = false;
CHECK_SUCCESS(hipMemcpy(dev_result, &result, sizeof(bool), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( _matDifferent), dim3(dimGrid), dim3(dimBlock), 0, 0, d, m->getD(), w, 0.001, dev_result);
CHECK_SUCCESS(hipMemcpy(&result, dev_result, sizeof(bool), hipMemcpyDeviceToHost));
hipFree(dev_result);
return result;
}
void Matrix::invertLU()
{
unsigned int i;
/*float *dest, *m = (float *) malloc(w*w*sizeof(float));
memcpy((void *)m, (const void *)h, w*w*sizeof(float));
dest = h;
CHECK_SUCCESS(hipMalloc((void **)&m, w*w*sizeof(float)));
for(i=0; i<w; i++)
{
min = (w-i < i+1 ? w-i : i+1);
dim3 dimGrid(1, 1);
dim3 dimBlock(min, min);
_doInversionStepUpper<<<dimGrid, dimBlock>>>(m, d, w, i);
_doInversionStepLower<<<dimGrid, dimBlock>>>(m, d, w, i);
}
CHECK_SUCCESS(hipMemcpy(d, m, w*w*sizeof(float), hipMemcpyDeviceToDevice));
copyDtoH();
hipFree(m);*/
//hardcoded 3x3 matrix inversion
if(w != 3)
SPIT("You're supposed to use a 3x3 matrix only! Support for bigger matrices soon in https://github.com/rhaps0dy\n");
copyDtoH();
//upper
h[2] = (((h[1]*h[5])/(h[4]*h[8]))-h[2]/h[8])/h[0];
for(i=0; i<3; i++)
h[i*w+i] = 1/h[i*w+i];
for(i=0; i<2; i++)
h[i*w+i+1] = -h[i*w+i+1]*h[i*w+i]*h[(i+1)*w+i+1];
//lower
for(i=0; i<2; i++)
h[(i+1)*w+i] = -h[(i+1)*w+i];
h[2*w] = h[1*w]*h[2*w+1] - h[2*w];
copyHtoD();
// free(m);
}
| f59fd0d32fce50c277f96d5019ee465ed8ac058c.cu | #include "matrix.hu"
#include <stdio.h>
#include <stdlib.h>
#include "errorMacros.hu"
#include "cuMatrix.hu"
void Matrix::fill()
{
unsigned int i, num;
int divide;
FILE *f;
f=fopen("/dev/urandom", "r");
ASSERT(f);
for(i=0; i< w*w; i++)
{
fread(&num, sizeof(unsigned int), 1, f);
fread(÷, sizeof(int), 1, f);
h[i] = ((float)num)/((float)divide);
}
CHECK(fclose(f));
// sync matrix and host memories
copyHtoD();
}
void Matrix::print()
{
unsigned int x, y;
char answer;
printf("\n");
if(w>MAX_SANE_WIDTH)
{
printf("Matrix is very big, %dx%d. Are you sure you want to print it? [y/n] ", w, w);
while(1)
{
scanf("%c", &answer);
if(answer=='n')
return;
if(answer=='y')
break;
printf("Could not understand input. Please type 'y' or 'n'. ");
}
}
if(hasBeenTouched())
copyDtoH();
if(!isLU())
{
for(y=0; y < w; y++)
{
for(x=0; x < w; x++)
printf("%f ", h[y*w+x]);
printf("\n");
}
printf("\n");
return;
}
//print the L and the U matrices
printf("Lower part\n");
for(y=0; y < w; y++)
{
for(x=0; x < w; x++)
{
if(x<y)
printf("%f ", h[y*w+x]);
else if(x==y)
printf("%f ", 1.0);
else
printf("%f ", 0.0);
}
printf("\n");
}
printf("\nUpper part\n");
for(y=0; y < w; y++)
{
for(x=0; x < w; x++)
{
if(x>=y)
printf("%f ", h[y*w+x]);
else
printf("%f ", 0.0);
}
printf("\n");
}
printf("\n");
}
Matrix::Matrix(unsigned int width) :
w(width), touched(false), _isLU(false)
{
size_t size = width*width*sizeof(float);
if(cudaMalloc((void **)&d, size)!=cudaSuccess)
SPIT("Failed to allocate device array\n");
h = (float *) malloc(size);
if(!h)
{
cudaFree(d);
SPIT("Failed to allocate host array\n");
}
}
Matrix *Matrix::copy()
{
size_t size = w*w*sizeof(float);
Matrix *m = new Matrix(w);
memcpy((void *)m->getH(), (const void *)h, size);
CHECK_SUCCESS(cudaMemcpy(m->getD(), d, size, cudaMemcpyDeviceToDevice));
m->setLU(isLU());
return m;
}
Matrix::~Matrix()
{
free(h);
cudaFree(d);
}
void Matrix::decomposeLU()
{
// doolittle algorithm
unsigned int x, y, p;
dim3 dimGrid(1, 1);
dim3 dimBlock(1, w-1);
_prepareLeftColLU<<<dimGrid, dimBlock>>>(d, w);
copyDtoH();
dimBlock = dim3(w-1, 1);
_makeLURow<<<dimGrid, dimBlock>>>(d, 1, w+1, w);
//copy recent changes to host
CHECK_SUCCESS(cudaMemcpy(&h[w+1], &d[w+1], (w-1)*sizeof(float), cudaMemcpyDeviceToHost));
for(y=2; y<w; y++)
{
for(x=1; x<y; x++)
{
for(p=0; p<x; p++)
h[y*w+x] -= h[y*w+p]*h[p*w+x];
h[y*w+x] /= h[x*w+x];
}
//copy recent changes to device
CHECK_SUCCESS(cudaMemcpy(&d[y*w+1], &h[y*w+1], (y-1)*sizeof(float), cudaMemcpyHostToDevice));
dimBlock = dim3(w-y, 1);
_makeLURow<<<dimGrid, dimBlock>>>(d, y, y*w+y, w);
//copy recent changes to host
CHECK_SUCCESS(cudaMemcpy(&h[y*w+y], &d[y*w+y], (w-y)*sizeof(float), cudaMemcpyDeviceToHost));
}
setLU(true);
}
void Matrix::multiply(Matrix *a, Matrix *b)
{
if(w != a->getW() || w != b->getW())
SPIT("Matrices must be of the same size\n");
dim3 dimGrid(1, 1);
dim3 dimBlock(w, w);
_matMultiply<<<dimGrid, dimBlock>>>(d, a->getD(), b->getD(), w);
setLU(false);
touch();
}
void Matrix::multiplyLU()
{
float *dcopy;
size_t size = w*w*sizeof(float);
dim3 dimGrid(1, 1);
dim3 dimBlock(w, w);
CHECK_SUCCESS(cudaMalloc((void **)&dcopy, size));
CHECK_SUCCESS(cudaMemcpy(dcopy, d, size, cudaMemcpyDeviceToDevice));
_matMultiplyLU<<<dimGrid, dimBlock>>>(d, dcopy, w);
cudaFree(dcopy);
setLU(false);
touch();
}
void Matrix::multiplyUL()
{
float *dcopy;
size_t size = w*w*sizeof(float);
dim3 dimGrid(1, 1);
dim3 dimBlock(w, w);
CHECK_SUCCESS(cudaMalloc((void **)&dcopy, size));
CHECK_SUCCESS(cudaMemcpy(dcopy, d, size, cudaMemcpyDeviceToDevice));
_matMultiplyUL<<<dimGrid, dimBlock>>>(d, dcopy, w);
cudaFree(dcopy);
setLU(false);
touch();
}
bool Matrix::isDifferent(Matrix *m)
{
bool result, *dev_result;
dim3 dimGrid(1, 1);
dim3 dimBlock(w, w);
CHECK_SUCCESS(cudaMalloc((void **)&dev_result, sizeof(bool)));
result = false;
CHECK_SUCCESS(cudaMemcpy(dev_result, &result, sizeof(bool), cudaMemcpyHostToDevice));
_matDifferent<<<dimGrid, dimBlock>>>(d, m->getD(), w, 0.001, dev_result);
CHECK_SUCCESS(cudaMemcpy(&result, dev_result, sizeof(bool), cudaMemcpyDeviceToHost));
cudaFree(dev_result);
return result;
}
void Matrix::invertLU()
{
unsigned int i;
/*float *dest, *m = (float *) malloc(w*w*sizeof(float));
memcpy((void *)m, (const void *)h, w*w*sizeof(float));
dest = h;
CHECK_SUCCESS(cudaMalloc((void **)&m, w*w*sizeof(float)));
for(i=0; i<w; i++)
{
min = (w-i < i+1 ? w-i : i+1);
dim3 dimGrid(1, 1);
dim3 dimBlock(min, min);
_doInversionStepUpper<<<dimGrid, dimBlock>>>(m, d, w, i);
_doInversionStepLower<<<dimGrid, dimBlock>>>(m, d, w, i);
}
CHECK_SUCCESS(cudaMemcpy(d, m, w*w*sizeof(float), cudaMemcpyDeviceToDevice));
copyDtoH();
cudaFree(m);*/
//hardcoded 3x3 matrix inversion
if(w != 3)
SPIT("You're supposed to use a 3x3 matrix only! Support for bigger matrices soon in https://github.com/rhaps0dy\n");
copyDtoH();
//upper
h[2] = (((h[1]*h[5])/(h[4]*h[8]))-h[2]/h[8])/h[0];
for(i=0; i<3; i++)
h[i*w+i] = 1/h[i*w+i];
for(i=0; i<2; i++)
h[i*w+i+1] = -h[i*w+i+1]*h[i*w+i]*h[(i+1)*w+i+1];
//lower
for(i=0; i<2; i++)
h[(i+1)*w+i] = -h[(i+1)*w+i];
h[2*w] = h[1*w]*h[2*w+1] - h[2*w];
copyHtoD();
// free(m);
}
|
e7a46fe276eef41da4ea5118cf663cecb5a11f6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaKernelMesh(float4* pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
pos[y*width+x] = make_float4(u, w, v, __int_as_float(0xff00ff00)); //Color : DirectX ARGB, OpenGL ABGR
} | e7a46fe276eef41da4ea5118cf663cecb5a11f6d.cu | #include "includes.h"
__global__ void cudaKernelMesh(float4* pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
pos[y*width+x] = make_float4(u, w, v, __int_as_float(0xff00ff00)); //Color : DirectX ARGB, OpenGL ABGR
} |
c53bb489b487e6075975b9513701f5f14dfd145c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************** MulticoreWare_Modified - Feature: Pruning / Splicing ************************************/
#include <vector>
#include <cmath>
#include<stdio.h>
#include "caffe/filler.hpp"
#include "caffe/layers/squeeze_conv_layer.hpp"
namespace caffe {
// The constant NUM_THREADS should be equal to the value in SqueezeCMomentCalc
template <typename Dtype>
__global__ void SqueezeCMomentCollect(const int n, const Dtype* wb, const Dtype* mask,
Dtype* mu, Dtype* std, unsigned int* count ) {
const int NUM_THREADS = 512;
__shared__ Dtype param [4 * NUM_THREADS];
__shared__ unsigned int tcount [2 * NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
if (s + t < n){
param[t] = fabs(mask[s + t] * wb[s + t]);
param[t + 2 * NUM_THREADS] = mask[s + t] * wb[s + t] * wb[s + t];
if(mask[s + t] * wb[s + t] != 0) tcount[t] = 1;
else tcount[t] = 0;
}
else{
param[t] = 0;param[t + 2 * NUM_THREADS] = 0;tcount[t] = 0;
}
if (s + t + NUM_THREADS < n){
param[t + NUM_THREADS] = fabs(mask[s + t + NUM_THREADS] * wb[s + t + NUM_THREADS]);
param[t + 3 * NUM_THREADS] = mask[s + t + NUM_THREADS] * wb[s + t + NUM_THREADS] * wb[s + t + NUM_THREADS];
if(mask[s + t +NUM_THREADS] * wb[s + t + NUM_THREADS] != 0) tcount[t + NUM_THREADS] = 1;
else tcount[ t + NUM_THREADS] = 0;
}
else{
param[t + NUM_THREADS] = 0; param[t + 3 * NUM_THREADS] = 0; tcount[t + NUM_THREADS] = 0;
}
__syncthreads();
for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride ){
param[t] += param[t + stride];
param[t + 2 * NUM_THREADS] += param[t + 2 * NUM_THREADS + stride];
tcount[t] += tcount[t + stride];
}
__syncthreads();
}
if (t == 0){
mu [blockIdx.x] = param[0];
std [blockIdx.x] = param[2 * NUM_THREADS];
count[blockIdx.x] = tcount[0];
}
}
// The constant NUM_THREADS should be equal to the value in SqueezeCMomentCalc
template <typename Dtype>
__global__ void SqueezeCNzeroCollect(const int n, const Dtype* mask, unsigned int* count ) {
const int NUM_THREADS = 512;
__shared__ unsigned int tcount [2 * NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
tcount[t] = 0;
if (s + t < n && mask[s + t] != 0){
tcount[t] = 1;
}
tcount[t+NUM_THREADS] = 0;
if (s + t + NUM_THREADS < n && mask[s + t + NUM_THREADS] != 0){
tcount[t + NUM_THREADS] = 1;
}
__syncthreads();
for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride ){
tcount[t] += tcount[t + stride];
}
__syncthreads();
}
if (t == 0){
count[blockIdx.x] = tcount[0];
}
}
//Check condition for pruning and splicing
template <typename Dtype>
__global__ void SqueezeCMaskCalc(const int n, const Dtype* wb,
Dtype* mask, Dtype mu, Dtype std, Dtype r) {
CUDA_KERNEL_LOOP(index, n) {
// The constants 0.9 and 1.1 is to set margin that witholds few parameters undergoing pruning / splicing
if (mask[index] > 0 && fabs(wb[index]) <= 0.9 * r * max(mu + std, Dtype(0))) {
mask[index] = 0;
}
else if (mask[index] == 0 && fabs(wb[index]) > 1.1 * r * max(mu + std, Dtype(0)) && r != 0){
mask[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SqueezeCMaskApply(const int n, const Dtype* wb,
const Dtype* mask, Dtype* wb_t) {
CUDA_KERNEL_LOOP(index, n) {
wb_t[index] = wb[index] * mask[index];
}
}
template <typename Dtype>
__global__ void ValidateMask(const int n, Dtype* wb) {
CUDA_KERNEL_LOOP(index, n) {
if (wb[index] !=0 && wb[index]!= 1)
wb[index] = fabs(rintf(wb[index]));
}
}
//Calculate Mean and std deviation of weights
template <typename Dtype>
void SqueezeCMomentCalc(const int n, const Dtype* wb, const Dtype* mask, Dtype* mu, Dtype* std, unsigned int* ncount){
const unsigned int NUM_THREADS = 512;
Dtype* pmu_g; Dtype* pstd_g; unsigned int* pncount_g;
Dtype* pmu_c; Dtype* pstd_c; unsigned int* pncount_c;
int num_p = (n + (NUM_THREADS << 1) - 1) / (NUM_THREADS << 1);
hipMalloc(&pmu_g, sizeof(Dtype) * num_p);
hipMalloc(&pstd_g, sizeof(Dtype) * num_p);
hipMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pmu_c = (Dtype*) malloc(num_p * sizeof(Dtype));
pstd_c = (Dtype*) malloc(num_p * sizeof(Dtype));
pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int));
hipLaunchKernelGGL(( SqueezeCMomentCollect<Dtype>), dim3(num_p),dim3(NUM_THREADS), 0, 0, n, wb, mask, pmu_g, pstd_g, pncount_g);
CUDA_POST_KERNEL_CHECK;
hipMemcpy(pmu_c, pmu_g, sizeof(Dtype) * num_p, hipMemcpyDeviceToHost);
hipMemcpy(pstd_c, pstd_g, sizeof(Dtype) * num_p, hipMemcpyDeviceToHost);
hipMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, hipMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*mu += pmu_c[i]; *std += pstd_c[i]; *ncount += pncount_c[i];
}
hipFree(pmu_g);hipFree(pstd_g);hipFree(pncount_g);
free(pmu_c);free(pstd_c);free(pncount_c);
}
template <typename Dtype>
void SqueezeConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = NULL;
Dtype* weightMask = NULL;
Dtype* weightTmp = NULL;
const Dtype* bias = NULL;
Dtype* biasMask = NULL;
Dtype* biasTmp = NULL;
Dtype* prune_threshold_params_gpu = NULL; // To store mu and std values
Dtype* prune_threshold_params_cpu = NULL;
prune_threshold_params_cpu = (Dtype*)malloc(sizeof(Dtype) * 2);
int maskcount = 0;
if (this->bias_term_) {
weight = this->blobs_[0]->mutable_gpu_data();
weightMask = this->blobs_[2]->mutable_gpu_data();
weightTmp = this->weight_tmp_.mutable_gpu_data();
bias = this->blobs_[1]->mutable_gpu_data();
biasMask = this->blobs_[3]->mutable_gpu_data();
prune_threshold_params_gpu = this->blobs_[4]->mutable_gpu_data();
biasTmp = this->bias_tmp_.mutable_gpu_data();
maskcount = this->blobs_[2]->count();
}
else {
weight = this->blobs_[0]->mutable_gpu_data();
weightMask = this->blobs_[1]->mutable_gpu_data();
prune_threshold_params_gpu = this->blobs_[2]->mutable_gpu_data();
weightTmp = this->weight_tmp_.mutable_gpu_data();
maskcount = this->blobs_[1]->count();
}
if (this->phase_ == TRAIN) {
// Validate mask value to avoid corrupted mask value
hipLaunchKernelGGL(( ValidateMask<Dtype>), dim3(CAFFE_GET_BLOCKS(maskcount)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, maskcount,weightMask);
CUDA_POST_KERNEL_CHECK;
// Calculate the mean and standard deviation of learnable parameters
if ((this->std == 0 && this->iter_ == 0) || this->iter_== 40 || this->iter_== 80 || this->iter_== 120 || this->iter_== 160) {
unsigned int ncount = 0;
SqueezeCMomentCalc(this->blobs_[0]->count(), weight, weightMask, &this->mu, &this->std, &ncount);
if (this->bias_term_) {
SqueezeCMomentCalc(this->blobs_[1]->count(), bias, biasMask, &this->mu, &this->std, &ncount);
}
this->mu /= ncount; this->std -= ncount * this->mu * this->mu;
this->std /= ncount; this->std = sqrt(this->std);
prune_threshold_params_cpu[0] = this->mu;
prune_threshold_params_cpu[1] = this->std;
LOG(INFO)<<mu<<" "<<std<<" "<<ncount<<"\n";
// Copy mu and std value from host to device
hipMemcpy(prune_threshold_params_gpu, prune_threshold_params_cpu, sizeof(Dtype)*2, hipMemcpyHostToDevice);
}
// Copy mu and std value from Device to host
hipMemcpy(prune_threshold_params_cpu, prune_threshold_params_gpu, sizeof(Dtype)*2, hipMemcpyDeviceToHost);
// No pruning/splicing during Retraining
// Calculate the weight mask and bias mask with probability
Dtype r = static_cast<Dtype>(rand())/static_cast<Dtype>(RAND_MAX);
if (pow(1 + (this->gamma) * (this->iter_), -(this->power)) > r && (this->iter_) < (this->iter_stop_)) {
hipLaunchKernelGGL(( SqueezeCMaskCalc<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[0]->count(), weight,
weightMask, prune_threshold_params_cpu[0], prune_threshold_params_cpu[1], this->crate);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
hipLaunchKernelGGL(( SqueezeCMaskCalc<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[1]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[1]->count(), bias,
biasMask, prune_threshold_params_cpu[0], prune_threshold_params_cpu[1], this->crate);
CUDA_POST_KERNEL_CHECK;
}
}
// Dynamic Splicing
// Unprune the pruned weights based on the splicing ratio
if(this->dynamicsplicing)
{
if (this->iter_ == 0) {
Dtype* weight_cpu = (Dtype *)malloc(this->blobs_[0]->count() *(sizeof(Dtype)));
Dtype* weightMask_cpu = (Dtype *)malloc(this->blobs_[0]->count() *(sizeof(Dtype)));
// Initially copy weight, weightMask to weight_cpu, weightMask_cpu and do Dynamic Splicing
hipMemcpy(weight_cpu, weight, this->blobs_[0]->count() *(sizeof(Dtype)), hipMemcpyDeviceToHost);
hipMemcpy(weightMask_cpu, weightMask, this->blobs_[0]->count() *(sizeof(Dtype)), hipMemcpyDeviceToHost);
// Vector Pair holds weights and corresponding index for pruned nodes
std::vector<std::pair<float, int> > prune_node;
for (unsigned int k = 0; k < this->blobs_[0]->count(); ++k) {
if(weightMask_cpu[k] == 0) {
prune_node.push_back(make_pair(fabs(weight_cpu[k]), k));
}
}
// Sort the weights and unprune the nodes
std::sort(prune_node.begin(), prune_node.end());
int zero_count = prune_node.size();
int to_bespliced = zero_count * this->splicing_rate;
int start_index = 0;
int end_index = 0;
for (unsigned int k = 0; k < zero_count; ++k) {
if (prune_node[k].first > (0.25 * (prune_threshold_params_cpu[0] + prune_threshold_params_cpu[1]))) {
start_index = k;
break;
}
}
if(start_index == 0)
start_index = zero_count - to_bespliced; //Update start index
end_index = start_index + to_bespliced;
if (end_index > zero_count) {
start_index = start_index - (end_index - zero_count);
end_index = start_index + to_bespliced;
}
for (unsigned int k = start_index; k < end_index; ++k) {
weightMask_cpu[prune_node[k].second] = 1;
}
hipMemcpy(weightMask, weightMask_cpu, this->blobs_[0]->count() *(sizeof(Dtype)), hipMemcpyHostToDevice);
free(weightMask_cpu);
free(weight_cpu);
this->dynamicsplicing = false;
}
}
}
// Calculate the current (masked) weight and bias
hipLaunchKernelGGL(( SqueezeCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[0]->count(), weight, weightMask, weightTmp);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
hipLaunchKernelGGL(( SqueezeCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[1]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[1]->count(), bias, biasMask, biasTmp);
CUDA_POST_KERNEL_CHECK;
}
// Forward calculation with (masked) weight and bias
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
for (int n = 0; n < this->num_; ++n) {
this->forward_gpu_gemm(bottom_data + bottom[i]->offset(n), weightTmp,
top_data + top[i]->offset(n));
if (this->bias_term_) {
this->forward_gpu_bias(top_data + top[i]->offset(n), biasTmp);
}
}
}
free(prune_threshold_params_cpu);
}
template <typename Dtype>
void SqueezeConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weightTmp = this->weight_tmp_.gpu_data();
const Dtype* weightMask = NULL;
if(this->bias_term_)
weightMask = this->blobs_[2]->gpu_data();
else
weightMask = this->blobs_[1]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
const Dtype* biasMask = this->blobs_[3]->gpu_data();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
hipLaunchKernelGGL(( SqueezeCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[3]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[3]->count(), bias_diff, biasMask, bias_diff);
CUDA_POST_KERNEL_CHECK;
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n));
}
}
if (this->param_propagate_down_[0] || propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
hipLaunchKernelGGL(( SqueezeCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[0]->count(), weight_diff, weightMask, weight_diff);
CUDA_POST_KERNEL_CHECK;
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_gpu_gemm(bottom_data + bottom[i]->offset(n),
top_diff + top[i]->offset(n), weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i]) {
this->backward_gpu_gemm(top_diff + top[i]->offset(n), weightTmp,
bottom_diff + bottom[i]->offset(n));
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SqueezeConvolutionLayer);
} // namespace caffe
/***********************************************************************************************************************/
| c53bb489b487e6075975b9513701f5f14dfd145c.cu | /***************************** MulticoreWare_Modified - Feature: Pruning / Splicing ************************************/
#include <vector>
#include <cmath>
#include<stdio.h>
#include "caffe/filler.hpp"
#include "caffe/layers/squeeze_conv_layer.hpp"
namespace caffe {
// The constant NUM_THREADS should be equal to the value in SqueezeCMomentCalc
template <typename Dtype>
__global__ void SqueezeCMomentCollect(const int n, const Dtype* wb, const Dtype* mask,
Dtype* mu, Dtype* std, unsigned int* count ) {
const int NUM_THREADS = 512;
__shared__ Dtype param [4 * NUM_THREADS];
__shared__ unsigned int tcount [2 * NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
if (s + t < n){
param[t] = fabs(mask[s + t] * wb[s + t]);
param[t + 2 * NUM_THREADS] = mask[s + t] * wb[s + t] * wb[s + t];
if(mask[s + t] * wb[s + t] != 0) tcount[t] = 1;
else tcount[t] = 0;
}
else{
param[t] = 0;param[t + 2 * NUM_THREADS] = 0;tcount[t] = 0;
}
if (s + t + NUM_THREADS < n){
param[t + NUM_THREADS] = fabs(mask[s + t + NUM_THREADS] * wb[s + t + NUM_THREADS]);
param[t + 3 * NUM_THREADS] = mask[s + t + NUM_THREADS] * wb[s + t + NUM_THREADS] * wb[s + t + NUM_THREADS];
if(mask[s + t +NUM_THREADS] * wb[s + t + NUM_THREADS] != 0) tcount[t + NUM_THREADS] = 1;
else tcount[ t + NUM_THREADS] = 0;
}
else{
param[t + NUM_THREADS] = 0; param[t + 3 * NUM_THREADS] = 0; tcount[t + NUM_THREADS] = 0;
}
__syncthreads();
for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride ){
param[t] += param[t + stride];
param[t + 2 * NUM_THREADS] += param[t + 2 * NUM_THREADS + stride];
tcount[t] += tcount[t + stride];
}
__syncthreads();
}
if (t == 0){
mu [blockIdx.x] = param[0];
std [blockIdx.x] = param[2 * NUM_THREADS];
count[blockIdx.x] = tcount[0];
}
}
// The constant NUM_THREADS should be equal to the value in SqueezeCMomentCalc
template <typename Dtype>
__global__ void SqueezeCNzeroCollect(const int n, const Dtype* mask, unsigned int* count ) {
const int NUM_THREADS = 512;
__shared__ unsigned int tcount [2 * NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
tcount[t] = 0;
if (s + t < n && mask[s + t] != 0){
tcount[t] = 1;
}
tcount[t+NUM_THREADS] = 0;
if (s + t + NUM_THREADS < n && mask[s + t + NUM_THREADS] != 0){
tcount[t + NUM_THREADS] = 1;
}
__syncthreads();
for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride ){
tcount[t] += tcount[t + stride];
}
__syncthreads();
}
if (t == 0){
count[blockIdx.x] = tcount[0];
}
}
//Check condition for pruning and splicing
template <typename Dtype>
__global__ void SqueezeCMaskCalc(const int n, const Dtype* wb,
Dtype* mask, Dtype mu, Dtype std, Dtype r) {
CUDA_KERNEL_LOOP(index, n) {
// The constants 0.9 and 1.1 is to set margin that witholds few parameters undergoing pruning / splicing
if (mask[index] > 0 && fabs(wb[index]) <= 0.9 * r * max(mu + std, Dtype(0))) {
mask[index] = 0;
}
else if (mask[index] == 0 && fabs(wb[index]) > 1.1 * r * max(mu + std, Dtype(0)) && r != 0){
mask[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SqueezeCMaskApply(const int n, const Dtype* wb,
const Dtype* mask, Dtype* wb_t) {
CUDA_KERNEL_LOOP(index, n) {
wb_t[index] = wb[index] * mask[index];
}
}
template <typename Dtype>
__global__ void ValidateMask(const int n, Dtype* wb) {
CUDA_KERNEL_LOOP(index, n) {
if (wb[index] !=0 && wb[index]!= 1)
wb[index] = fabs(rintf(wb[index]));
}
}
//Calculate Mean and std deviation of weights
template <typename Dtype>
void SqueezeCMomentCalc(const int n, const Dtype* wb, const Dtype* mask, Dtype* mu, Dtype* std, unsigned int* ncount){
const unsigned int NUM_THREADS = 512;
Dtype* pmu_g; Dtype* pstd_g; unsigned int* pncount_g;
Dtype* pmu_c; Dtype* pstd_c; unsigned int* pncount_c;
int num_p = (n + (NUM_THREADS << 1) - 1) / (NUM_THREADS << 1);
cudaMalloc(&pmu_g, sizeof(Dtype) * num_p);
cudaMalloc(&pstd_g, sizeof(Dtype) * num_p);
cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pmu_c = (Dtype*) malloc(num_p * sizeof(Dtype));
pstd_c = (Dtype*) malloc(num_p * sizeof(Dtype));
pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int));
SqueezeCMomentCollect<Dtype><<<num_p,NUM_THREADS>>>(n, wb, mask, pmu_g, pstd_g, pncount_g);
CUDA_POST_KERNEL_CHECK;
cudaMemcpy(pmu_c, pmu_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost);
cudaMemcpy(pstd_c, pstd_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost);
cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, cudaMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*mu += pmu_c[i]; *std += pstd_c[i]; *ncount += pncount_c[i];
}
cudaFree(pmu_g);cudaFree(pstd_g);cudaFree(pncount_g);
free(pmu_c);free(pstd_c);free(pncount_c);
}
template <typename Dtype>
void SqueezeConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = NULL;
Dtype* weightMask = NULL;
Dtype* weightTmp = NULL;
const Dtype* bias = NULL;
Dtype* biasMask = NULL;
Dtype* biasTmp = NULL;
Dtype* prune_threshold_params_gpu = NULL; // To store mu and std values
Dtype* prune_threshold_params_cpu = NULL;
prune_threshold_params_cpu = (Dtype*)malloc(sizeof(Dtype) * 2);
int maskcount = 0;
if (this->bias_term_) {
weight = this->blobs_[0]->mutable_gpu_data();
weightMask = this->blobs_[2]->mutable_gpu_data();
weightTmp = this->weight_tmp_.mutable_gpu_data();
bias = this->blobs_[1]->mutable_gpu_data();
biasMask = this->blobs_[3]->mutable_gpu_data();
prune_threshold_params_gpu = this->blobs_[4]->mutable_gpu_data();
biasTmp = this->bias_tmp_.mutable_gpu_data();
maskcount = this->blobs_[2]->count();
}
else {
weight = this->blobs_[0]->mutable_gpu_data();
weightMask = this->blobs_[1]->mutable_gpu_data();
prune_threshold_params_gpu = this->blobs_[2]->mutable_gpu_data();
weightTmp = this->weight_tmp_.mutable_gpu_data();
maskcount = this->blobs_[1]->count();
}
if (this->phase_ == TRAIN) {
// Validate mask value to avoid corrupted mask value
ValidateMask<Dtype><<<CAFFE_GET_BLOCKS(maskcount),
CAFFE_CUDA_NUM_THREADS>>>(maskcount,weightMask);
CUDA_POST_KERNEL_CHECK;
// Calculate the mean and standard deviation of learnable parameters
if ((this->std == 0 && this->iter_ == 0) || this->iter_== 40 || this->iter_== 80 || this->iter_== 120 || this->iter_== 160) {
unsigned int ncount = 0;
SqueezeCMomentCalc(this->blobs_[0]->count(), weight, weightMask, &this->mu, &this->std, &ncount);
if (this->bias_term_) {
SqueezeCMomentCalc(this->blobs_[1]->count(), bias, biasMask, &this->mu, &this->std, &ncount);
}
this->mu /= ncount; this->std -= ncount * this->mu * this->mu;
this->std /= ncount; this->std = sqrt(this->std);
prune_threshold_params_cpu[0] = this->mu;
prune_threshold_params_cpu[1] = this->std;
LOG(INFO)<<mu<<" "<<std<<" "<<ncount<<"\n";
// Copy mu and std value from host to device
cudaMemcpy(prune_threshold_params_gpu, prune_threshold_params_cpu, sizeof(Dtype)*2, cudaMemcpyHostToDevice);
}
// Copy mu and std value from Device to host
cudaMemcpy(prune_threshold_params_cpu, prune_threshold_params_gpu, sizeof(Dtype)*2, cudaMemcpyDeviceToHost);
// No pruning/splicing during Retraining
// Calculate the weight mask and bias mask with probability
Dtype r = static_cast<Dtype>(rand())/static_cast<Dtype>(RAND_MAX);
if (pow(1 + (this->gamma) * (this->iter_), -(this->power)) > r && (this->iter_) < (this->iter_stop_)) {
SqueezeCMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight,
weightMask, prune_threshold_params_cpu[0], prune_threshold_params_cpu[1], this->crate);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
SqueezeCMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias,
biasMask, prune_threshold_params_cpu[0], prune_threshold_params_cpu[1], this->crate);
CUDA_POST_KERNEL_CHECK;
}
}
// Dynamic Splicing
// Unprune the pruned weights based on the splicing ratio
if(this->dynamicsplicing)
{
if (this->iter_ == 0) {
Dtype* weight_cpu = (Dtype *)malloc(this->blobs_[0]->count() *(sizeof(Dtype)));
Dtype* weightMask_cpu = (Dtype *)malloc(this->blobs_[0]->count() *(sizeof(Dtype)));
// Initially copy weight, weightMask to weight_cpu, weightMask_cpu and do Dynamic Splicing
cudaMemcpy(weight_cpu, weight, this->blobs_[0]->count() *(sizeof(Dtype)), cudaMemcpyDeviceToHost);
cudaMemcpy(weightMask_cpu, weightMask, this->blobs_[0]->count() *(sizeof(Dtype)), cudaMemcpyDeviceToHost);
// Vector Pair holds weights and corresponding index for pruned nodes
std::vector<std::pair<float, int> > prune_node;
for (unsigned int k = 0; k < this->blobs_[0]->count(); ++k) {
if(weightMask_cpu[k] == 0) {
prune_node.push_back(make_pair(fabs(weight_cpu[k]), k));
}
}
// Sort the weights and unprune the nodes
std::sort(prune_node.begin(), prune_node.end());
int zero_count = prune_node.size();
int to_bespliced = zero_count * this->splicing_rate;
int start_index = 0;
int end_index = 0;
for (unsigned int k = 0; k < zero_count; ++k) {
if (prune_node[k].first > (0.25 * (prune_threshold_params_cpu[0] + prune_threshold_params_cpu[1]))) {
start_index = k;
break;
}
}
if(start_index == 0)
start_index = zero_count - to_bespliced; //Update start index
end_index = start_index + to_bespliced;
if (end_index > zero_count) {
start_index = start_index - (end_index - zero_count);
end_index = start_index + to_bespliced;
}
for (unsigned int k = start_index; k < end_index; ++k) {
weightMask_cpu[prune_node[k].second] = 1;
}
cudaMemcpy(weightMask, weightMask_cpu, this->blobs_[0]->count() *(sizeof(Dtype)), cudaMemcpyHostToDevice);
free(weightMask_cpu);
free(weight_cpu);
this->dynamicsplicing = false;
}
}
}
// Calculate the current (masked) weight and bias
SqueezeCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, weightMask, weightTmp);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
SqueezeCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias, biasMask, biasTmp);
CUDA_POST_KERNEL_CHECK;
}
// Forward calculation with (masked) weight and bias
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
for (int n = 0; n < this->num_; ++n) {
this->forward_gpu_gemm(bottom_data + bottom[i]->offset(n), weightTmp,
top_data + top[i]->offset(n));
if (this->bias_term_) {
this->forward_gpu_bias(top_data + top[i]->offset(n), biasTmp);
}
}
}
free(prune_threshold_params_cpu);
}
template <typename Dtype>
void SqueezeConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weightTmp = this->weight_tmp_.gpu_data();
const Dtype* weightMask = NULL;
if(this->bias_term_)
weightMask = this->blobs_[2]->gpu_data();
else
weightMask = this->blobs_[1]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
const Dtype* biasMask = this->blobs_[3]->gpu_data();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
SqueezeCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[3]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[3]->count(), bias_diff, biasMask, bias_diff);
CUDA_POST_KERNEL_CHECK;
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n));
}
}
if (this->param_propagate_down_[0] || propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
SqueezeCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight_diff, weightMask, weight_diff);
CUDA_POST_KERNEL_CHECK;
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_gpu_gemm(bottom_data + bottom[i]->offset(n),
top_diff + top[i]->offset(n), weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i]) {
this->backward_gpu_gemm(top_diff + top[i]->offset(n), weightTmp,
bottom_diff + bottom[i]->offset(n));
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SqueezeConvolutionLayer);
} // namespace caffe
/***********************************************************************************************************************/
|
8494b6bd7265e905f2800e91c9f98a8365e30911.hip | // !!! This is a file automatically generated by hipify!!!
#include <container_ops.h>
int main(int argc, char **argv) {
int deviceCount;
hipGetDeviceCount(&deviceCount);
for (int dev = 0; dev < deviceCount; dev++) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
cadLog("No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
cadLog("There is 1 device supporting CUDA");
} else {
cadLog("There are " << deviceCount << " devices supporting CUDA");
}
}
cadLog("Device " << dev << " name: " << deviceProp.name);
cadLog(" Computational Capabilities: " << deviceProp.major << "." << deviceProp.minor);
cadLog(" Maximum global memory size: " << deviceProp.totalGlobalMem);
cadLog(" Maximum constant memory size: " << deviceProp.totalConstMem);
cadLog(" Maximum shared memory size per block: " << deviceProp.sharedMemPerBlock);
cadLog(" Maximum block dimensions: " << deviceProp.maxThreadsDim[0]
<< " x " << deviceProp.maxThreadsDim[1]
<< " x " << deviceProp.maxThreadsDim[2]);
cadLog(" Maximum grid dimensions: " << deviceProp.maxGridSize[0]
<< " x " << deviceProp.maxGridSize[1]
<< " x " << deviceProp.maxGridSize[2]);
cadLog(" Warp size: " << deviceProp.warpSize);
return 0;
}
return 0;
}
| 8494b6bd7265e905f2800e91c9f98a8365e30911.cu | #include <container_ops.h>
int main(int argc, char **argv) {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
cadLog("No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
cadLog("There is 1 device supporting CUDA");
} else {
cadLog("There are " << deviceCount << " devices supporting CUDA");
}
}
cadLog("Device " << dev << " name: " << deviceProp.name);
cadLog(" Computational Capabilities: " << deviceProp.major << "." << deviceProp.minor);
cadLog(" Maximum global memory size: " << deviceProp.totalGlobalMem);
cadLog(" Maximum constant memory size: " << deviceProp.totalConstMem);
cadLog(" Maximum shared memory size per block: " << deviceProp.sharedMemPerBlock);
cadLog(" Maximum block dimensions: " << deviceProp.maxThreadsDim[0]
<< " x " << deviceProp.maxThreadsDim[1]
<< " x " << deviceProp.maxThreadsDim[2]);
cadLog(" Maximum grid dimensions: " << deviceProp.maxGridSize[0]
<< " x " << deviceProp.maxGridSize[1]
<< " x " << deviceProp.maxGridSize[2]);
cadLog(" Warp size: " << deviceProp.warpSize);
return 0;
}
return 0;
}
|
4ab1f067905dae859d4f9410f7be7c48bd57789d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Device code
extern "C" __global__ void op(float* A, float * B, float scalar, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
A[i] = A[i]*scalar;
B[i] = B[i] + scalar;
}
}
extern "C" __global__ void op2(float* A, float * B, float scalar, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
B[i] = B[i] + scalar;
A[i] = A[i]*scalar + B[i];
}
}
| 4ab1f067905dae859d4f9410f7be7c48bd57789d.cu | // Device code
extern "C" __global__ void op(float* A, float * B, float scalar, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
A[i] = A[i]*scalar;
B[i] = B[i] + scalar;
}
}
extern "C" __global__ void op2(float* A, float * B, float scalar, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
B[i] = B[i] + scalar;
A[i] = A[i]*scalar + B[i];
}
}
|
e8c32986f99e16db8ff520ef81678788315fdabc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/op_registry.h"
#include "paddle/platform/assert.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
struct Pair {
__device__ __forceinline__ Pair() {}
__device__ __forceinline__ Pair(T value, int64_t id) : v(value), id(id) {}
__device__ __forceinline__ void set(T value, int64_t id) {
v = value;
id = id;
}
__device__ __forceinline__ void operator=(const Pair<T>& in) {
v = in.v;
id = in.id;
}
__device__ __forceinline__ bool operator<(const T value) const {
return (v < value);
}
__device__ __forceinline__ bool operator<(const Pair<T>& in) const {
return (v < in.v) || ((v == in.v) && (id > in.id));
}
__device__ __forceinline__ bool operator>(const Pair<T>& in) const {
return (v > in.v) || ((v == in.v) && (id < in.id));
}
T v;
int64_t id;
};
template <typename T>
__device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p,
int beam_size) {
for (int k = beam_size - 2; k >= 0; k--) {
if (topk[k] < p) {
topk[k + 1] = topk[k];
} else {
topk[k + 1] = p;
return;
}
}
topk[0] = p;
}
template <typename T, int beam_size>
__device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p) {
for (int k = beam_size - 2; k >= 0; k--) {
if (topk[k] < p) {
topk[k + 1] = topk[k];
} else {
topk[k + 1] = p;
return;
}
}
topk[0] = p;
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx,
int dim, int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < src[idx]) {
Pair<T> tmp(src[idx], idx);
AddTo<T>(topk, tmp, beam_size);
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx,
int dim, const Pair<T>& max,
int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < src[idx]) {
Pair<T> tmp(src[idx], idx);
if (tmp < max) {
AddTo<T>(topk, tmp, beam_size);
}
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col,
int idx, int dim, int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < val[idx]) {
Pair<T> tmp(val[idx], col[idx]);
AddTo<T>(topk, tmp, beam_size);
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col,
int idx, int dim, const Pair<T>& max,
int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < val[idx]) {
Pair<T> tmp(val[idx], col[idx]);
if (tmp < max) {
AddTo<T>(topk, tmp, beam_size);
}
}
idx += BlockSize;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int& beam,
int beam_size, const T* src,
bool& firstStep, bool& is_empty,
Pair<T>& max, int dim,
const int tid) {
if (beam > 0) {
int length = beam < beam_size ? beam : beam_size;
if (firstStep) {
firstStep = false;
GetTopK<T, BlockSize>(topk, src, tid, dim, length);
} else {
for (int k = 0; k < MaxLength; k++) {
if (k < MaxLength - beam) {
topk[k] = topk[k + beam];
} else {
topk[k].set(-INFINITY, -1);
}
}
if (!is_empty) {
GetTopK<T, BlockSize>(topk + MaxLength - beam, src, tid, dim, max,
length);
}
}
max = topk[MaxLength - 1];
if (max.v == -1) is_empty = true;
beam = 0;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int& beam,
int beam_size, const T* val,
int* col, bool& firstStep,
bool& is_empty, Pair<T>& max,
int dim, const int tid) {
if (beam > 0) {
int length = beam < beam_size ? beam : beam_size;
if (firstStep) {
firstStep = false;
GetTopK<T, BlockSize>(topk, val, col, tid, dim, length);
} else {
for (int k = 0; k < MaxLength; k++) {
if (k < MaxLength - beam) {
topk[k] = topk[k + beam];
} else {
topk[k].set(-INFINITY, -1);
}
}
if (!is_empty) {
GetTopK<T, BlockSize>(topk + MaxLength - beam, val, col, tid, dim, max,
length);
}
}
max = topk[MaxLength - 1];
if (max.v == -1) is_empty = true;
beam = 0;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void BlockReduce(Pair<T>* sh_topk, int* maxid,
Pair<T> topk[], T** topVal,
int64_t** topIds, int& beam, int& k,
const int tid, const int warp) {
while (true) {
__syncthreads();
if (tid < BlockSize / 2) {
if (sh_topk[tid] < sh_topk[tid + BlockSize / 2]) {
maxid[tid] = tid + BlockSize / 2;
} else {
maxid[tid] = tid;
}
}
__syncthreads();
for (int stride = BlockSize / 4; stride > 0; stride = stride / 2) {
if (tid < stride) {
if (sh_topk[maxid[tid]] < sh_topk[maxid[tid + stride]]) {
maxid[tid] = maxid[tid + stride];
}
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
**topVal = sh_topk[maxid[0]].v;
**topIds = sh_topk[maxid[0]].id;
(*topVal)++;
(*topIds)++;
}
if (tid == maxid[0]) beam++;
if (--k == 0) break;
__syncthreads();
if (tid == maxid[0]) {
if (beam < MaxLength) {
sh_topk[tid] = topk[beam];
}
}
if (maxid[0] / 32 == warp) {
if (__shfl(beam, (maxid[0]) % 32, 32) == MaxLength) break;
}
}
}
/**
* Each block compute one sample.
* In a block:
* 1. every thread get top MaxLength value;
* 2. merge to sh_topk, block reduce and get max value;
* 3. go to the second setp, until one thread's topk value is null;
* 4. go to the first setp, until get the topk value.
*/
template <typename T, int MaxLength, int BlockSize>
__global__ void KeMatrixTopK(T* output, int output_stride, int64_t* indices,
const T* src, int lds, int dim, int k) {
__shared__ Pair<T> sh_topk[BlockSize];
__shared__ int maxid[BlockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
output += blockIdx.x * output_stride;
indices += blockIdx.x * k;
Pair<T> topk[MaxLength];
int beam = MaxLength;
Pair<T> max;
bool is_empty = false;
bool firststep = true;
for (int k = 0; k < MaxLength; k++) {
topk[k].set(-INFINITY, -1);
}
while (k) {
ThreadGetTopK<T, MaxLength, BlockSize>(topk, beam, k,
src + blockIdx.x * lds, firststep,
is_empty, max, dim, tid);
sh_topk[tid] = topk[0];
BlockReduce<T, MaxLength, BlockSize>(sh_topk, maxid, topk, &output,
&indices, beam, k, tid, warp);
}
}
template <typename T>
class TopkOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* indices = ctx.Output<Tensor>("Indices");
size_t k = static_cast<int>(ctx.Attr<int>("k"));
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
// FIXME(typhoonzero): data is always converted to type T?
int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace());
size_t input_height = input->dims()[0];
size_t input_width = input->dims()[1];
if (k > input_width) k = input_width;
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
// TODO(typhoonzero): refine this kernel.
dim3 threads(256, 1);
dim3 grid(input_height, 1);
hipLaunchKernelGGL(( KeMatrixTopK<T, 5, 256>),
dim3(grid), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), output_data, output->dims()[1],
indices_data, input_data,
input_width, input_width, int(k));
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_GPU_KERNEL(top_k, paddle::operators::TopkOpCUDAKernel<float>);
| e8c32986f99e16db8ff520ef81678788315fdabc.cu | /* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/op_registry.h"
#include "paddle/platform/assert.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
struct Pair {
__device__ __forceinline__ Pair() {}
__device__ __forceinline__ Pair(T value, int64_t id) : v(value), id(id) {}
__device__ __forceinline__ void set(T value, int64_t id) {
v = value;
id = id;
}
__device__ __forceinline__ void operator=(const Pair<T>& in) {
v = in.v;
id = in.id;
}
__device__ __forceinline__ bool operator<(const T value) const {
return (v < value);
}
__device__ __forceinline__ bool operator<(const Pair<T>& in) const {
return (v < in.v) || ((v == in.v) && (id > in.id));
}
__device__ __forceinline__ bool operator>(const Pair<T>& in) const {
return (v > in.v) || ((v == in.v) && (id < in.id));
}
T v;
int64_t id;
};
template <typename T>
__device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p,
int beam_size) {
for (int k = beam_size - 2; k >= 0; k--) {
if (topk[k] < p) {
topk[k + 1] = topk[k];
} else {
topk[k + 1] = p;
return;
}
}
topk[0] = p;
}
template <typename T, int beam_size>
__device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p) {
for (int k = beam_size - 2; k >= 0; k--) {
if (topk[k] < p) {
topk[k + 1] = topk[k];
} else {
topk[k + 1] = p;
return;
}
}
topk[0] = p;
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx,
int dim, int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < src[idx]) {
Pair<T> tmp(src[idx], idx);
AddTo<T>(topk, tmp, beam_size);
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx,
int dim, const Pair<T>& max,
int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < src[idx]) {
Pair<T> tmp(src[idx], idx);
if (tmp < max) {
AddTo<T>(topk, tmp, beam_size);
}
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col,
int idx, int dim, int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < val[idx]) {
Pair<T> tmp(val[idx], col[idx]);
AddTo<T>(topk, tmp, beam_size);
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col,
int idx, int dim, const Pair<T>& max,
int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < val[idx]) {
Pair<T> tmp(val[idx], col[idx]);
if (tmp < max) {
AddTo<T>(topk, tmp, beam_size);
}
}
idx += BlockSize;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int& beam,
int beam_size, const T* src,
bool& firstStep, bool& is_empty,
Pair<T>& max, int dim,
const int tid) {
if (beam > 0) {
int length = beam < beam_size ? beam : beam_size;
if (firstStep) {
firstStep = false;
GetTopK<T, BlockSize>(topk, src, tid, dim, length);
} else {
for (int k = 0; k < MaxLength; k++) {
if (k < MaxLength - beam) {
topk[k] = topk[k + beam];
} else {
topk[k].set(-INFINITY, -1);
}
}
if (!is_empty) {
GetTopK<T, BlockSize>(topk + MaxLength - beam, src, tid, dim, max,
length);
}
}
max = topk[MaxLength - 1];
if (max.v == -1) is_empty = true;
beam = 0;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int& beam,
int beam_size, const T* val,
int* col, bool& firstStep,
bool& is_empty, Pair<T>& max,
int dim, const int tid) {
if (beam > 0) {
int length = beam < beam_size ? beam : beam_size;
if (firstStep) {
firstStep = false;
GetTopK<T, BlockSize>(topk, val, col, tid, dim, length);
} else {
for (int k = 0; k < MaxLength; k++) {
if (k < MaxLength - beam) {
topk[k] = topk[k + beam];
} else {
topk[k].set(-INFINITY, -1);
}
}
if (!is_empty) {
GetTopK<T, BlockSize>(topk + MaxLength - beam, val, col, tid, dim, max,
length);
}
}
max = topk[MaxLength - 1];
if (max.v == -1) is_empty = true;
beam = 0;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void BlockReduce(Pair<T>* sh_topk, int* maxid,
Pair<T> topk[], T** topVal,
int64_t** topIds, int& beam, int& k,
const int tid, const int warp) {
while (true) {
__syncthreads();
if (tid < BlockSize / 2) {
if (sh_topk[tid] < sh_topk[tid + BlockSize / 2]) {
maxid[tid] = tid + BlockSize / 2;
} else {
maxid[tid] = tid;
}
}
__syncthreads();
for (int stride = BlockSize / 4; stride > 0; stride = stride / 2) {
if (tid < stride) {
if (sh_topk[maxid[tid]] < sh_topk[maxid[tid + stride]]) {
maxid[tid] = maxid[tid + stride];
}
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
**topVal = sh_topk[maxid[0]].v;
**topIds = sh_topk[maxid[0]].id;
(*topVal)++;
(*topIds)++;
}
if (tid == maxid[0]) beam++;
if (--k == 0) break;
__syncthreads();
if (tid == maxid[0]) {
if (beam < MaxLength) {
sh_topk[tid] = topk[beam];
}
}
if (maxid[0] / 32 == warp) {
if (__shfl(beam, (maxid[0]) % 32, 32) == MaxLength) break;
}
}
}
/**
* Each block compute one sample.
* In a block:
* 1. every thread get top MaxLength value;
* 2. merge to sh_topk, block reduce and get max value;
* 3. go to the second setp, until one thread's topk value is null;
* 4. go to the first setp, until get the topk value.
*/
template <typename T, int MaxLength, int BlockSize>
__global__ void KeMatrixTopK(T* output, int output_stride, int64_t* indices,
const T* src, int lds, int dim, int k) {
__shared__ Pair<T> sh_topk[BlockSize];
__shared__ int maxid[BlockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
output += blockIdx.x * output_stride;
indices += blockIdx.x * k;
Pair<T> topk[MaxLength];
int beam = MaxLength;
Pair<T> max;
bool is_empty = false;
bool firststep = true;
for (int k = 0; k < MaxLength; k++) {
topk[k].set(-INFINITY, -1);
}
while (k) {
ThreadGetTopK<T, MaxLength, BlockSize>(topk, beam, k,
src + blockIdx.x * lds, firststep,
is_empty, max, dim, tid);
sh_topk[tid] = topk[0];
BlockReduce<T, MaxLength, BlockSize>(sh_topk, maxid, topk, &output,
&indices, beam, k, tid, warp);
}
}
template <typename T>
class TopkOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* indices = ctx.Output<Tensor>("Indices");
size_t k = static_cast<int>(ctx.Attr<int>("k"));
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
// FIXME(typhoonzero): data is always converted to type T?
int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace());
size_t input_height = input->dims()[0];
size_t input_width = input->dims()[1];
if (k > input_width) k = input_width;
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
// TODO(typhoonzero): refine this kernel.
dim3 threads(256, 1);
dim3 grid(input_height, 1);
KeMatrixTopK<T, 5, 256><<<
grid, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(output_data, output->dims()[1],
indices_data, input_data,
input_width, input_width, int(k));
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_GPU_KERNEL(top_k, paddle::operators::TopkOpCUDAKernel<float>);
|
9bac90e95b99efeec6d0e45e6ca3424a958dc3de.hip | // !!! This is a file automatically generated by hipify!!!
#define PRG_SEED 1
struct handles{
hipStream_t stream1;
hipsparseHandle_t cusparse_h0, cusparse_h1;
hipblasHandle_t cublas_h;
cusolverSpHandle_t cusolver_h;
hiprandGenerator_t uniformRNG;
};
namespace Handles{
handles* init(){
handles *h = (handles*) malloc(sizeof(handles));
CHECK_HOST(h);
CHECK_CUSPARSE( hipsparseCreate(&(h->cusparse_h0)) );
CHECK_CUSPARSE( hipsparseCreate(&(h->cusparse_h1)) );
CHECK_CUBLAS( hipblasCreate(&(h->cublas_h)) );
CHECK_DEVICE( hipStreamCreate(&(h->stream1)) );
CHECK_CUSPARSE( hipsparseSetStream(h->cusparse_h1, h->stream1) );
CHECK_CUSOLVER( cusolverSpCreate(&(h->cusolver_h)) );
hiprandCreateGenerator(&h->uniformRNG, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(h->uniformRNG, PRG_SEED);
return h;
}
void free(handles *h){
CHECK_CUSPARSE( hipsparseDestroy(h->cusparse_h0) );
CHECK_CUSPARSE( hipsparseDestroy(h->cusparse_h1) );
CHECK_CUBLAS( hipblasDestroy(h->cublas_h) );
CHECK_DEVICE( hipStreamDestroy(h->stream1) );
CHECK_CUSOLVER( cusolverSpDestroy(h->cusolver_h) );
//hiprandDestroyGenerator(h->uniformRNG);
std::free(h);
}
}
| 9bac90e95b99efeec6d0e45e6ca3424a958dc3de.cu | #define PRG_SEED 1
struct handles{
cudaStream_t stream1;
cusparseHandle_t cusparse_h0, cusparse_h1;
cublasHandle_t cublas_h;
cusolverSpHandle_t cusolver_h;
curandGenerator_t uniformRNG;
};
namespace Handles{
handles* init(){
handles *h = (handles*) malloc(sizeof(handles));
CHECK_HOST(h);
CHECK_CUSPARSE( cusparseCreate(&(h->cusparse_h0)) );
CHECK_CUSPARSE( cusparseCreate(&(h->cusparse_h1)) );
CHECK_CUBLAS( cublasCreate(&(h->cublas_h)) );
CHECK_DEVICE( cudaStreamCreate(&(h->stream1)) );
CHECK_CUSPARSE( cusparseSetStream(h->cusparse_h1, h->stream1) );
CHECK_CUSOLVER( cusolverSpCreate(&(h->cusolver_h)) );
curandCreateGenerator(&h->uniformRNG, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(h->uniformRNG, PRG_SEED);
return h;
}
void free(handles *h){
CHECK_CUSPARSE( cusparseDestroy(h->cusparse_h0) );
CHECK_CUSPARSE( cusparseDestroy(h->cusparse_h1) );
CHECK_CUBLAS( cublasDestroy(h->cublas_h) );
CHECK_DEVICE( cudaStreamDestroy(h->stream1) );
CHECK_CUSOLVER( cusolverSpDestroy(h->cusolver_h) );
//curandDestroyGenerator(h->uniformRNG);
std::free(h);
}
}
|
e845bb8667b35a8c360c62515316ded89afb55da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixClip(double *a, double min, double max, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
if(a[y * cc + x] > max){
c[y * cc + x] = max;
}else{
if(a[y * cc + x] < min){
c[y * cc + x] = min;
}else{
c[y * cc + x] = a[y * cc + x];
}
}
}
} | e845bb8667b35a8c360c62515316ded89afb55da.cu | #include "includes.h"
__global__ void matrixClip(double *a, double min, double max, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
if(a[y * cc + x] > max){
c[y * cc + x] = max;
}else{
if(a[y * cc + x] < min){
c[y * cc + x] = min;
}else{
c[y * cc + x] = a[y * cc + x];
}
}
}
} |
59e5b436c65d73f6c5787d932da833add0cec218.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_ideal_gas_kernel [4][2];
static int dims_ideal_gas_kernel_h [4][2] = {0};
//user function
__device__
void ideal_gas_kernel_gpu(const ACC<double> &density,
const ACC<double> &energy,
ACC<double> &pressure,
ACC<double> &soundspeed) {
double sound_speed_squared, v, pressurebyenergy, pressurebyvolume;
v = 1.0 / density(0,0,0);
pressure(0,0,0) = (1.4 - 1.0) * density(0,0,0) * energy(0,0,0);
pressurebyenergy = (1.4 - 1.0) * density(0,0,0);
pressurebyvolume = -1.0*density(0,0,0) * pressure(0,0,0);
sound_speed_squared = v*v*(pressure(0,0,0) * pressurebyenergy-pressurebyvolume);
soundspeed(0,0,0) = sqrt(sound_speed_squared);
}
__global__ void ops_ideal_gas_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_ideal_gas_kernel[0][0] + idx_z * 1*1 * dims_ideal_gas_kernel[0][0] * dims_ideal_gas_kernel[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_ideal_gas_kernel[1][0] + idx_z * 1*1 * dims_ideal_gas_kernel[1][0] * dims_ideal_gas_kernel[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_ideal_gas_kernel[2][0] + idx_z * 1*1 * dims_ideal_gas_kernel[2][0] * dims_ideal_gas_kernel[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_ideal_gas_kernel[3][0] + idx_z * 1*1 * dims_ideal_gas_kernel[3][0] * dims_ideal_gas_kernel[3][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_ideal_gas_kernel[0][0], dims_ideal_gas_kernel[0][1], arg0);
const ACC<double> argp1(dims_ideal_gas_kernel[1][0], dims_ideal_gas_kernel[1][1], arg1);
ACC<double> argp2(dims_ideal_gas_kernel[2][0], dims_ideal_gas_kernel[2][1], arg2);
ACC<double> argp3(dims_ideal_gas_kernel[3][0], dims_ideal_gas_kernel[3][1], arg3);
ideal_gas_kernel_gpu(argp0, argp1, argp2, argp3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_ideal_gas_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,11)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(11,"ideal_gas_kernel");
OPS_kernels[11].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != dims_ideal_gas_kernel_h[0][0] || ydim0 != dims_ideal_gas_kernel_h[0][1] || xdim1 != dims_ideal_gas_kernel_h[1][0] || ydim1 != dims_ideal_gas_kernel_h[1][1] || xdim2 != dims_ideal_gas_kernel_h[2][0] || ydim2 != dims_ideal_gas_kernel_h[2][1] || xdim3 != dims_ideal_gas_kernel_h[3][0] || ydim3 != dims_ideal_gas_kernel_h[3][1]) {
dims_ideal_gas_kernel_h[0][0] = xdim0;
dims_ideal_gas_kernel_h[0][1] = ydim0;
dims_ideal_gas_kernel_h[1][0] = xdim1;
dims_ideal_gas_kernel_h[1][1] = ydim1;
dims_ideal_gas_kernel_h[2][0] = xdim2;
dims_ideal_gas_kernel_h[2][1] = ydim2;
dims_ideal_gas_kernel_h[3][0] = xdim3;
dims_ideal_gas_kernel_h[3][1] = ydim3;
cutilSafeCall(hipMemcpyToSymbol( dims_ideal_gas_kernel, dims_ideal_gas_kernel_h, sizeof(dims_ideal_gas_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[11].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_ideal_gas_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[11].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[11].mpi_time += t2-t1;
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 11;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 11;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_ideal_gas_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(11,"ideal_gas_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 59e5b436c65d73f6c5787d932da833add0cec218.cu | //
// auto-generated by ops.py
//
__constant__ int dims_ideal_gas_kernel [4][2];
static int dims_ideal_gas_kernel_h [4][2] = {0};
//user function
__device__
void ideal_gas_kernel_gpu(const ACC<double> &density,
const ACC<double> &energy,
ACC<double> &pressure,
ACC<double> &soundspeed) {
double sound_speed_squared, v, pressurebyenergy, pressurebyvolume;
v = 1.0 / density(0,0,0);
pressure(0,0,0) = (1.4 - 1.0) * density(0,0,0) * energy(0,0,0);
pressurebyenergy = (1.4 - 1.0) * density(0,0,0);
pressurebyvolume = -1.0*density(0,0,0) * pressure(0,0,0);
sound_speed_squared = v*v*(pressure(0,0,0) * pressurebyenergy-pressurebyvolume);
soundspeed(0,0,0) = sqrt(sound_speed_squared);
}
__global__ void ops_ideal_gas_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_ideal_gas_kernel[0][0] + idx_z * 1*1 * dims_ideal_gas_kernel[0][0] * dims_ideal_gas_kernel[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_ideal_gas_kernel[1][0] + idx_z * 1*1 * dims_ideal_gas_kernel[1][0] * dims_ideal_gas_kernel[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_ideal_gas_kernel[2][0] + idx_z * 1*1 * dims_ideal_gas_kernel[2][0] * dims_ideal_gas_kernel[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_ideal_gas_kernel[3][0] + idx_z * 1*1 * dims_ideal_gas_kernel[3][0] * dims_ideal_gas_kernel[3][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_ideal_gas_kernel[0][0], dims_ideal_gas_kernel[0][1], arg0);
const ACC<double> argp1(dims_ideal_gas_kernel[1][0], dims_ideal_gas_kernel[1][1], arg1);
ACC<double> argp2(dims_ideal_gas_kernel[2][0], dims_ideal_gas_kernel[2][1], arg2);
ACC<double> argp3(dims_ideal_gas_kernel[3][0], dims_ideal_gas_kernel[3][1], arg3);
ideal_gas_kernel_gpu(argp0, argp1, argp2, argp3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_ideal_gas_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,11)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(11,"ideal_gas_kernel");
OPS_kernels[11].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != dims_ideal_gas_kernel_h[0][0] || ydim0 != dims_ideal_gas_kernel_h[0][1] || xdim1 != dims_ideal_gas_kernel_h[1][0] || ydim1 != dims_ideal_gas_kernel_h[1][1] || xdim2 != dims_ideal_gas_kernel_h[2][0] || ydim2 != dims_ideal_gas_kernel_h[2][1] || xdim3 != dims_ideal_gas_kernel_h[3][0] || ydim3 != dims_ideal_gas_kernel_h[3][1]) {
dims_ideal_gas_kernel_h[0][0] = xdim0;
dims_ideal_gas_kernel_h[0][1] = ydim0;
dims_ideal_gas_kernel_h[1][0] = xdim1;
dims_ideal_gas_kernel_h[1][1] = ydim1;
dims_ideal_gas_kernel_h[2][0] = xdim2;
dims_ideal_gas_kernel_h[2][1] = ydim2;
dims_ideal_gas_kernel_h[3][0] = xdim3;
dims_ideal_gas_kernel_h[3][1] = ydim3;
cutilSafeCall(cudaMemcpyToSymbol( dims_ideal_gas_kernel, dims_ideal_gas_kernel_h, sizeof(dims_ideal_gas_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[11].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_ideal_gas_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[11].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[11].mpi_time += t2-t1;
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 11;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 11;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_ideal_gas_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(11,"ideal_gas_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
2a3808286fbeb5d9a1efd6f77f9ab30a0a2ea3ba.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 128, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
1, 4, 8, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 2a3808286fbeb5d9a1efd6f77f9ab30a0a2ea3ba.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 128, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
1, 4, 8, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
6a3bf9ecac39c0f97ee8a96a9ab68bc333447cb1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <string>
#include <math.h>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#include <cstdio>
using namespace std;
using namespace cv;
//Tamanho da rea de cada pedao a ser feita a filtragem na imagem.
#define SQUARE_AREA 25
//Nmero de threads a serem utilizadas.
#define NUMTHREAD 1024
__device__ unsigned char smooth(unsigned char* pixel, int i, int j, int w, int h);
__global__ void filtro(unsigned char *in, unsigned char *out, int w, int h);
int main( int argc, char** argv) {
int tam, tamLargura, tamAltura, blocoX , blocoY, i;
double tempo;
Mat src, dst[3], finImg;
clock_t itime, ftime;
unsigned char *dados_in, *dados_out;
int imgType = atoi(argv[2]);
src = imread( argv[1], imgType );
//obtm o tamanho da img de entrada.
tamAltura = src.rows;
tamLargura = src.cols;
// Aloca vetores para separar a imagem.
tam = tamLargura * tamAltura * sizeof(unsigned char);
hipMalloc((void**)&dados_in, tam);
hipMalloc((void**)&dados_out, tam);
// Npumero de blocos para cada dimenso
blocoX = (int)ceil((double) tamLargura/(double)NUMTHREAD);
blocoY = tamAltura;
// define o nmero de blocos e threads.
dim3 Blocos(blocoX, blocoY);
dim3 threadBloco(NUMTHREAD);
itime = clock();
/// grayScale image section
if( imgType == 0) {
//Passa o filtro no nico canal da img em GRAY
hipMemcpy(dados_in, (unsigned char*) src.data , tam, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( filtro), dim3(Blocos), dim3(threadBloco), 0, 0, dados_in, dados_out, tamLargura, tamAltura);
hipMemcpy((unsigned char*) src.data , dados_out, tam, hipMemcpyDeviceToHost);
imwrite("novaImg.jpg", src);
}else{
/// Split the image in channels
split(src,dst);
/// Apply medianBlur in each channel
for(int i=0;i<3;++i){
hipMemcpy(dados_in, (unsigned char*) dst[i].data , tam, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( filtro), dim3(Blocos), dim3(threadBloco), 0, 0, dados_in, dados_out, tamLargura, tamAltura);
hipMemcpy((unsigned char*) dst[i].data , dados_out, tam, hipMemcpyDeviceToHost);
}
/// Push the channels into the Mat vector
vector<Mat> rgb;
rgb.push_back(dst[0]); //blue
rgb.push_back(dst[1]); //green
rgb.push_back(dst[2]); //red
/// Merge the three channels
merge(rgb, finImg);
imwrite("novaImg.jpg", finImg);
}
ftime = clock();
tempo = (ftime-itime) / (CLOCKS_PER_SEC * 1.0);
printf("\nTempo : %lf\n",tempo);
hipFree(dados_in);
hipFree(dados_out);
return 0;
}
//Mtodo Smooth para processamento de imagem.
__device__ unsigned char smooth(unsigned char* pixel, int i, int j, int w, int h){
int l, k;
int sum;
int raio = 5/2;
sum = 0;
for(l = i - raio; l <= i + raio; l++) {
for(k = j - raio; k <= j + raio; k++) {
if(l >= 0 && k >= 0 && l < h && k < w) {
sum += pixel[l*w + k];
}
}
}
return sum/SQUARE_AREA;
}
//mtodo para obter o pixel para a filtragem.
__global__ void filtro(unsigned char *in, unsigned char *out, int w, int h) {
int i, j;
i = blockIdx.y;
j = blockIdx.x*blockDim.x + threadIdx.x;
out[i*w+j] = smooth(in, i, j, w, h);
}
| 6a3bf9ecac39c0f97ee8a96a9ab68bc333447cb1.cu | #include <iostream>
#include <vector>
#include <string>
#include <math.h>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#include <cstdio>
using namespace std;
using namespace cv;
//Tamanho da área de cada pedaço a ser feita a filtragem na imagem.
#define SQUARE_AREA 25
//Número de threads a serem utilizadas.
#define NUMTHREAD 1024
__device__ unsigned char smooth(unsigned char* pixel, int i, int j, int w, int h);
__global__ void filtro(unsigned char *in, unsigned char *out, int w, int h);
int main( int argc, char** argv) {
int tam, tamLargura, tamAltura, blocoX , blocoY, i;
double tempo;
Mat src, dst[3], finImg;
clock_t itime, ftime;
unsigned char *dados_in, *dados_out;
int imgType = atoi(argv[2]);
src = imread( argv[1], imgType );
//obtém o tamanho da img de entrada.
tamAltura = src.rows;
tamLargura = src.cols;
// Aloca vetores para separar a imagem.
tam = tamLargura * tamAltura * sizeof(unsigned char);
cudaMalloc((void**)&dados_in, tam);
cudaMalloc((void**)&dados_out, tam);
// Npumero de blocos para cada dimensão
blocoX = (int)ceil((double) tamLargura/(double)NUMTHREAD);
blocoY = tamAltura;
// define o número de blocos e threads.
dim3 Blocos(blocoX, blocoY);
dim3 threadBloco(NUMTHREAD);
itime = clock();
/// grayScale image section
if( imgType == 0) {
//Passa o filtro no único canal da img em GRAY
cudaMemcpy(dados_in, (unsigned char*) src.data , tam, cudaMemcpyHostToDevice);
filtro<<<Blocos, threadBloco>>>(dados_in, dados_out, tamLargura, tamAltura);
cudaMemcpy((unsigned char*) src.data , dados_out, tam, cudaMemcpyDeviceToHost);
imwrite("novaImg.jpg", src);
}else{
/// Split the image in channels
split(src,dst);
/// Apply medianBlur in each channel
for(int i=0;i<3;++i){
cudaMemcpy(dados_in, (unsigned char*) dst[i].data , tam, cudaMemcpyHostToDevice);
filtro<<<Blocos, threadBloco>>>(dados_in, dados_out, tamLargura, tamAltura);
cudaMemcpy((unsigned char*) dst[i].data , dados_out, tam, cudaMemcpyDeviceToHost);
}
/// Push the channels into the Mat vector
vector<Mat> rgb;
rgb.push_back(dst[0]); //blue
rgb.push_back(dst[1]); //green
rgb.push_back(dst[2]); //red
/// Merge the three channels
merge(rgb, finImg);
imwrite("novaImg.jpg", finImg);
}
ftime = clock();
tempo = (ftime-itime) / (CLOCKS_PER_SEC * 1.0);
printf("\nTempo : %lf\n",tempo);
cudaFree(dados_in);
cudaFree(dados_out);
return 0;
}
//Método Smooth para processamento de imagem.
__device__ unsigned char smooth(unsigned char* pixel, int i, int j, int w, int h){
int l, k;
int sum;
int raio = 5/2;
sum = 0;
for(l = i - raio; l <= i + raio; l++) {
for(k = j - raio; k <= j + raio; k++) {
if(l >= 0 && k >= 0 && l < h && k < w) {
sum += pixel[l*w + k];
}
}
}
return sum/SQUARE_AREA;
}
//método para obter o pixel para a filtragem.
__global__ void filtro(unsigned char *in, unsigned char *out, int w, int h) {
int i, j;
i = blockIdx.y;
j = blockIdx.x*blockDim.x + threadIdx.x;
out[i*w+j] = smooth(in, i, j, w, h);
}
|
3dcde6f66731a5c7907b660faa8e7b9124e1568a.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "timer.h"
#include "check.h"
#include <hip/hip_runtime.h>
#define SOFTENING 1e-9f
#define BLOCK_SIZE 32
#define BLOCK_STRIDE 32
typedef struct
{
float x, y, z, vx, vy, vz;
} Body;
void randomizeBodies(float *data, int n)
{
for (int i = 0; i < n; i++)
{
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__ void bodyForce(Body *p, float dt, int n)
{
// int i = threadIdx.x + blockIdx.x * blockDim.x;
int cycle_times = n / BLOCK_SIZE;
// index
int i = threadIdx.x + (int)(blockIdx.x / BLOCK_STRIDE) * blockDim.x;
//
int start_block = blockIdx.x % BLOCK_STRIDE;
if (i < n)
{
Body ptemp = p[i];
Body temp;
float share_x,share_y,share_z;
float dx, dy, dz, distSqr, invDist, invDist3;
float Fx = 0.0f;
float Fy = 0.0f;
float Fz = 0.0f;
// cycle_times
for (int block_num = start_block; block_num < cycle_times; block_num += BLOCK_STRIDE)
{
temp = p[block_num * BLOCK_SIZE + threadIdx.x];
share_x = temp.x;
share_y = temp.y;
share_z = temp.z;
// BLOCK_SIZE
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; j++)
{
dx = __shfl_sync(0xFFFFFFFF,share_x,j) - ptemp.x;
dy = __shfl_sync(0xFFFFFFFF,share_y,j) - ptemp.y;
dz = __shfl_sync(0xFFFFFFFF,share_z,j) - ptemp.z;
distSqr = dx * dx + dy * dy + dz * dz + SOFTENING;
invDist = rsqrtf(distSqr);
invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3;
Fy += dy * invDist3;
Fz += dz * invDist3;
}
// spos
__syncthreads();
}
//
atomicAdd(&p[i].vx, dt * Fx);
atomicAdd(&p[i].vy, dt * Fy);
atomicAdd(&p[i].vz, dt * Fz);
// p[i].vx += dt * Fx;
// p[i].vy += dt * Fy;
// p[i].vz += dt * Fz;
}
}
__global__ void integrate_position(Body *p, float dt, int n)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
{
p[i].x += p[i].vx * dt;
p[i].y += p[i].vy * dt;
p[i].z += p[i].vz * dt;
}
}
int main(const int argc, const char **argv)
{
int nBodies = 2 << 11;
int salt = 0;
if (argc > 1)
nBodies = 2 << atoi(argv[1]);
/*
* This salt is for assessment reasons. Tampering with it will result in automatic failure.
*/
if (argc > 2)
salt = atoi(argv[2]);
const float dt = 0.01f; // time step
const int nIters = 10; // simulation iterations
int bytes = nBodies * sizeof(Body);
float *buf;
hipHostMalloc(&buf, bytes);
randomizeBodies(buf, 6 * nBodies); // Init pos / vel data
double totalTime = 0.0;
int deviceId;
hipGetDevice(&deviceId);
size_t threadsPerBlock = BLOCK_SIZE;
size_t numberOfBlocks = (nBodies + threadsPerBlock - 1) / threadsPerBlock;
float *d_buf;
hipMalloc(&d_buf, bytes);
Body *d_p = (Body *)d_buf;
/*
* This simulation will run for 10 cycles of time, calculating gravitational
* interaction amongst bodies, and adjusting their positions to reflect.
*/
hipMemcpy(d_buf, buf, bytes, hipMemcpyHostToDevice);
/*******************************************************************/
// Do not modify these 2 lines of code.gg
for (int iter = 0; iter < nIters; iter++)
{
StartTimer();
/*******************************************************************/
/*
* You will likely wish to refactor the work being done in `bodyForce`,
* as well as the work to integrate the positions.
*/
hipLaunchKernelGGL(( bodyForce), dim3(numberOfBlocks * BLOCK_STRIDE), dim3(threadsPerBlock), 0, 0, d_p, dt, nBodies); // compute interbody forces
/*
* This position integration cannot occur until this round of `bodyForce` has completed.
* Also, the next round of `bodyForce` cannot begin until the integration is complete.
*/
hipLaunchKernelGGL(( integrate_position), dim3(nBodies / threadsPerBlock), dim3(threadsPerBlock), 0, 0, d_p, dt, nBodies);
if (iter == nIters - 1)
{
hipMemcpy(buf, d_buf, bytes, hipMemcpyDeviceToHost);
}
/*******************************************************************/
// Do not modify the code in this section.
const double tElapsed = GetTimer() / 1000.0;
totalTime += tElapsed;
}
double avgTime = totalTime / (double)(nIters);
float billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / avgTime;
#ifdef ASSESS
checkPerformance(buf, billionsOfOpsPerSecond, salt);
#else
checkAccuracy(buf, nBodies);
printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, billionsOfOpsPerSecond);
salt += 1;
#endif
/*******************************************************************/
/*
* Feel free to modify code below.
*/
hipFree(d_buf);
hipHostFree(buf);
}
| 3dcde6f66731a5c7907b660faa8e7b9124e1568a.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "timer.h"
#include "check.h"
#include <cuda_runtime.h>
#define SOFTENING 1e-9f
#define BLOCK_SIZE 32
#define BLOCK_STRIDE 32
typedef struct
{
float x, y, z, vx, vy, vz;
} Body;
void randomizeBodies(float *data, int n)
{
for (int i = 0; i < n; i++)
{
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__ void bodyForce(Body *p, float dt, int n)
{
// int i = threadIdx.x + blockIdx.x * blockDim.x;
int cycle_times = n / BLOCK_SIZE;
// 计算要处理的数据index
int i = threadIdx.x + (int)(blockIdx.x / BLOCK_STRIDE) * blockDim.x;
// 此块对应要处理的数据块的起始位置
int start_block = blockIdx.x % BLOCK_STRIDE;
if (i < n)
{
Body ptemp = p[i];
Body temp;
float share_x,share_y,share_z;
float dx, dy, dz, distSqr, invDist, invDist3;
float Fx = 0.0f;
float Fy = 0.0f;
float Fz = 0.0f;
// 这里的cycle_times 在已知块大小时使用常数性能会高一些
for (int block_num = start_block; block_num < cycle_times; block_num += BLOCK_STRIDE)
{
temp = p[block_num * BLOCK_SIZE + threadIdx.x];
share_x = temp.x;
share_y = temp.y;
share_z = temp.z;
// 编译优化,只有 BLOCK_SIZE 为常量时才有用
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; j++)
{
dx = __shfl_sync(0xFFFFFFFF,share_x,j) - ptemp.x;
dy = __shfl_sync(0xFFFFFFFF,share_y,j) - ptemp.y;
dz = __shfl_sync(0xFFFFFFFF,share_z,j) - ptemp.z;
distSqr = dx * dx + dy * dy + dz * dz + SOFTENING;
invDist = rsqrtf(distSqr);
invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3;
Fy += dy * invDist3;
Fz += dz * invDist3;
}
// 块内同步,防止spos提前被写入
__syncthreads();
}
// 块之间不同步,原子加保证正确性
atomicAdd(&p[i].vx, dt * Fx);
atomicAdd(&p[i].vy, dt * Fy);
atomicAdd(&p[i].vz, dt * Fz);
// p[i].vx += dt * Fx;
// p[i].vy += dt * Fy;
// p[i].vz += dt * Fz;
}
}
__global__ void integrate_position(Body *p, float dt, int n)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
{
p[i].x += p[i].vx * dt;
p[i].y += p[i].vy * dt;
p[i].z += p[i].vz * dt;
}
}
int main(const int argc, const char **argv)
{
int nBodies = 2 << 11;
int salt = 0;
if (argc > 1)
nBodies = 2 << atoi(argv[1]);
/*
* This salt is for assessment reasons. Tampering with it will result in automatic failure.
*/
if (argc > 2)
salt = atoi(argv[2]);
const float dt = 0.01f; // time step
const int nIters = 10; // simulation iterations
int bytes = nBodies * sizeof(Body);
float *buf;
cudaMallocHost(&buf, bytes);
randomizeBodies(buf, 6 * nBodies); // Init pos / vel data
double totalTime = 0.0;
int deviceId;
cudaGetDevice(&deviceId);
size_t threadsPerBlock = BLOCK_SIZE;
size_t numberOfBlocks = (nBodies + threadsPerBlock - 1) / threadsPerBlock;
float *d_buf;
cudaMalloc(&d_buf, bytes);
Body *d_p = (Body *)d_buf;
/*
* This simulation will run for 10 cycles of time, calculating gravitational
* interaction amongst bodies, and adjusting their positions to reflect.
*/
cudaMemcpy(d_buf, buf, bytes, cudaMemcpyHostToDevice);
/*******************************************************************/
// Do not modify these 2 lines of code.gg
for (int iter = 0; iter < nIters; iter++)
{
StartTimer();
/*******************************************************************/
/*
* You will likely wish to refactor the work being done in `bodyForce`,
* as well as the work to integrate the positions.
*/
bodyForce<<<numberOfBlocks * BLOCK_STRIDE, threadsPerBlock>>>(d_p, dt, nBodies); // compute interbody forces
/*
* This position integration cannot occur until this round of `bodyForce` has completed.
* Also, the next round of `bodyForce` cannot begin until the integration is complete.
*/
integrate_position<<<nBodies / threadsPerBlock, threadsPerBlock>>>(d_p, dt, nBodies);
if (iter == nIters - 1)
{
cudaMemcpy(buf, d_buf, bytes, cudaMemcpyDeviceToHost);
}
/*******************************************************************/
// Do not modify the code in this section.
const double tElapsed = GetTimer() / 1000.0;
totalTime += tElapsed;
}
double avgTime = totalTime / (double)(nIters);
float billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / avgTime;
#ifdef ASSESS
checkPerformance(buf, billionsOfOpsPerSecond, salt);
#else
checkAccuracy(buf, nBodies);
printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, billionsOfOpsPerSecond);
salt += 1;
#endif
/*******************************************************************/
/*
* Feel free to modify code below.
*/
cudaFree(d_buf);
cudaFreeHost(buf);
}
|
5006be3ed4e0768a4aba043f7334709093f0bd2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
__global__ void mv(
float *m,
float *v,
float *o,
int size0,int size1) {
};
__global__ void CudaQUICK_PRODUCT_kernel(
THCState *state,
float *input1,
float *input2,
float *output,
int size0,int size1,int c = 2) {
if (c==0) return;
c--;
const float alpha = 1.0;
const float beta = 0.0;
hipblasSgemv(state->currentBlasHandle, HIPBLAS_OP_N, size0, size1, & alpha,input1+threadIdx.x, 1,input2+blockIdx.x*blockDim.x+threadIdx.x, 1,& beta, output+blockIdx.x*blockDim.x+threadIdx.x,1);
// CudaQUICK_PRODUCT_kernel<<<size0, size1>>>(state,input1,input2,output,size0,size1,c)
};
static int CudaQUICK_PRODUCT(lua_State *L)
{
THCudaTensor *input1 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor");
THCudaTensor *input2 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
int length1 = luaL_checkint(L, 4);
int length2 = luaL_checkint(L, 5);
int length3 = luaL_checkint(L, 6);
THCState *state = getCutorchState(L);
int size0 = input1->size[0];
int size1 = input1->size[1];hipLaunchKernelGGL((
CudaQUICK_PRODUCT_kernel) , dim3(length1), dim3(length2), 0, 0,
state,
THCudaTensor_data(state,input1),
THCudaTensor_data(state,input2),
THCudaTensor_data(state,output),size0,size1
);
/*
const float alpha = 1.0;
const float beta = 0.0;
hipblasSgemv(state->currentBlasHandle, HIPBLAS_OP_N, size0, size1, & alpha,THCudaTensor_data(state,input1), 1,THCudaTensor_data(state,input2), 1,& beta,THCudaTensor_data(state,output),1);
*/
THCudaCheck(hipGetLastError());
return 1;
}
static const struct luaL_Reg cunnx_CudaQuickProduct__ [] = {
{"CudaQuickProduct_updateOutput", CudaQUICK_PRODUCT},
{NULL, NULL}
};
static void cunnx_CudaQuickProduct_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunnx_CudaQuickProduct__, "nn");
lua_pop(L,1);
}
| 5006be3ed4e0768a4aba043f7334709093f0bd2c.cu | #include "utils.h"
__global__ void mv(
float *m,
float *v,
float *o,
int size0,int size1) {
};
__global__ void CudaQUICK_PRODUCT_kernel(
THCState *state,
float *input1,
float *input2,
float *output,
int size0,int size1,int c = 2) {
if (c==0) return;
c--;
const float alpha = 1.0;
const float beta = 0.0;
cublasSgemv(state->currentBlasHandle, CUBLAS_OP_N, size0, size1, & alpha,input1+threadIdx.x, 1,input2+blockIdx.x*blockDim.x+threadIdx.x, 1,& beta, output+blockIdx.x*blockDim.x+threadIdx.x,1);
// CudaQUICK_PRODUCT_kernel<<<size0, size1>>>(state,input1,input2,output,size0,size1,c)
};
static int CudaQUICK_PRODUCT(lua_State *L)
{
THCudaTensor *input1 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor");
THCudaTensor *input2 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
int length1 = luaL_checkint(L, 4);
int length2 = luaL_checkint(L, 5);
int length3 = luaL_checkint(L, 6);
THCState *state = getCutorchState(L);
int size0 = input1->size[0];
int size1 = input1->size[1];
CudaQUICK_PRODUCT_kernel <<<length1, length2>>>(
state,
THCudaTensor_data(state,input1),
THCudaTensor_data(state,input2),
THCudaTensor_data(state,output),size0,size1
);
/*
const float alpha = 1.0;
const float beta = 0.0;
cublasSgemv(state->currentBlasHandle, CUBLAS_OP_N, size0, size1, & alpha,THCudaTensor_data(state,input1), 1,THCudaTensor_data(state,input2), 1,& beta,THCudaTensor_data(state,output),1);
*/
THCudaCheck(cudaGetLastError());
return 1;
}
static const struct luaL_Reg cunnx_CudaQuickProduct__ [] = {
{"CudaQuickProduct_updateOutput", CudaQUICK_PRODUCT},
{NULL, NULL}
};
static void cunnx_CudaQuickProduct_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunnx_CudaQuickProduct__, "nn");
lua_pop(L,1);
}
|
e2d1a2a8f27e0ddc5a22e65960f7aafd3bc52e52.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include "utils.h"
#define MAX_BLOCKS_1D 16384
#define MAX_BLOCKS_2D 128
dim3 *dim3Ctr(int x, int y = 1, int z = 1)
{
dim3 *a;
a = (dim3 *)malloc(sizeof(dim3));
a->x = x;
a->y = y;
a->z = z;
return a;
}
dim3 *dim3Unit()
{
dim3 *a;
a = (dim3 *)malloc(sizeof(dim3));
a->x = 1;
a->y = 1;
a->z = 1;
return a;
}
int dim3Vol(dim3 *a)
{
return a->x * a->y * a->z;
}
void printDim3(dim3 *yow)
{
printf("yow: {%d, %d, %d}", yow->x, yow->y, yow->z);
}
int ThreadChop1d(int size)
{
if (size > 65536)
return 256;
if (size > 16384)
return 128;
if (size > 4096)
return 64;
if (size > 1024)
return 32;
if (size > 255)
return 16;
if (size > 63)
return 8;
if (size > 15)
return 4;
if (size > 3)
return 2;
return 1;
}
int ThreadChop2d(int width)
{
if (width > 255)
return 16;
if (width > 63)
return 8;
if (width > 15)
return 4;
if (width > 3)
return 2;
return 1;
}
void GridAndBlocks1d(dim3 &grid, dim3 &block, int size)
{
int threadCt = ThreadChop1d(size);
int blockCt = MinInt((size + threadCt - 1) / threadCt, MAX_BLOCKS_1D);
grid = dim3(blockCt);
block = dim3(threadCt);
}
void GridAndBlocks2d(dim3 &grid, dim3 &block, int width)
{
int threadCt = ThreadChop2d(width);
int blockCt = MinInt((width + threadCt - 1) / threadCt, MAX_BLOCKS_2D);
grid = dim3(blockCt, blockCt);
block = dim3(threadCt, threadCt);
} | e2d1a2a8f27e0ddc5a22e65960f7aafd3bc52e52.cu | #pragma once
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include "utils.h"
#define MAX_BLOCKS_1D 16384
#define MAX_BLOCKS_2D 128
dim3 *dim3Ctr(int x, int y = 1, int z = 1)
{
dim3 *a;
a = (dim3 *)malloc(sizeof(dim3));
a->x = x;
a->y = y;
a->z = z;
return a;
}
dim3 *dim3Unit()
{
dim3 *a;
a = (dim3 *)malloc(sizeof(dim3));
a->x = 1;
a->y = 1;
a->z = 1;
return a;
}
int dim3Vol(dim3 *a)
{
return a->x * a->y * a->z;
}
void printDim3(dim3 *yow)
{
printf("yow: {%d, %d, %d}", yow->x, yow->y, yow->z);
}
int ThreadChop1d(int size)
{
if (size > 65536)
return 256;
if (size > 16384)
return 128;
if (size > 4096)
return 64;
if (size > 1024)
return 32;
if (size > 255)
return 16;
if (size > 63)
return 8;
if (size > 15)
return 4;
if (size > 3)
return 2;
return 1;
}
int ThreadChop2d(int width)
{
if (width > 255)
return 16;
if (width > 63)
return 8;
if (width > 15)
return 4;
if (width > 3)
return 2;
return 1;
}
void GridAndBlocks1d(dim3 &grid, dim3 &block, int size)
{
int threadCt = ThreadChop1d(size);
int blockCt = MinInt((size + threadCt - 1) / threadCt, MAX_BLOCKS_1D);
grid = dim3(blockCt);
block = dim3(threadCt);
}
void GridAndBlocks2d(dim3 &grid, dim3 &block, int width)
{
int threadCt = ThreadChop2d(width);
int blockCt = MinInt((width + threadCt - 1) / threadCt, MAX_BLOCKS_2D);
grid = dim3(blockCt, blockCt);
block = dim3(threadCt, threadCt);
} |
094f51d762979f5364f6951de0304405bd33a4ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace thread {
using FunctorPlanarComplexF32F32 = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
float,
4,
float,
float>;
__global__ void epilogue_thread_functor_planar_complex_f32_f32(
float *output_ptr,
float const *accum_ptr,
float const *source_ptr,
typename FunctorPlanarComplexF32F32::Params params) {
FunctorPlanarComplexF32F32 linear_combination_op(params);
auto accum = *reinterpret_cast<cutlass::ArrayPlanarComplex<float , 4> const *>(accum_ptr);
auto source = *reinterpret_cast<cutlass::ArrayPlanarComplex<float, 4> const *>(source_ptr);
*reinterpret_cast<cutlass::ArrayPlanarComplex<float, 4>*>(output_ptr) = linear_combination_op(accum, source);
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_planar_complex, f32) {
using Element = float;
using ElementOutput = float;
int const kCount = 4;
using Functor = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
ElementOutput,
kCount,
Element,
Element>;
cutlass::complex<Element> alpha(Element(2), Element(1));
cutlass::complex<Element> beta(Element(1), Element(-1));
typename Functor::Params params(alpha, beta);
Functor linear_combination_op(params);
cutlass::ArrayPlanarComplex<ElementOutput, kCount> source;
cutlass::ArrayPlanarComplex<Element, kCount> accum;
// Define arbitrary inputs
for (int i = 0; i < kCount; ++i) {
accum.real[i] = Element(i * 2);
accum.imag[i] = Element((i * 3 % 6) - 3);
source.real[i] = ElementOutput((i * 7 % 9) - 4);
source.imag[i] = ElementOutput(((i * 5 + 2) % 9) - 4);
}
cutlass::ArrayPlanarComplex<ElementOutput, kCount> destination = linear_combination_op(accum, source);
// Verify each result
for (int i = 0; i < kCount; ++i) {
cutlass::complex<Element> expected = alpha * cutlass::complex<Element>(accum.real[i], accum.imag[i]) +
beta * cutlass::complex<Element>(Element(source.real[i]), Element(source.imag[i]));
cutlass::complex<ElementOutput> got(destination.real[i], destination.imag[i]);
EXPECT_TRUE(ElementOutput(expected.real()) == got.real());
EXPECT_TRUE(ElementOutput(expected.imag()) == got.imag());
EXPECT_TRUE(expected.real() != Element(0) || expected.imag() != Element(0));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace thread {
using FunctorPlanarComplexF16F32 = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
cutlass::half_t,
4,
float,
float>;
__global__ void epilogue_thread_functor_planar_complex_f16_f32(
cutlass::half_t *output_ptr,
float const *accum_ptr,
cutlass::half_t const *source_ptr,
typename FunctorPlanarComplexF16F32::Params params,
int N) {
FunctorPlanarComplexF16F32 linear_combination_op(params);
auto accum = *reinterpret_cast<cutlass::ArrayPlanarComplex<float , 4> const *>(accum_ptr);
auto source = *reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4> const *>(source_ptr);
#pragma unroll 1
for (int n = 0; n < N; ++n) {
source = linear_combination_op(accum, source);
}
*reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4>*>(output_ptr) = source;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_planar_complex, f16_f32) {
using Element = float;
using ElementOutput = cutlass::half_t;
int const kCount = 4;
using Functor = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
ElementOutput,
kCount,
Element,
Element>;
cutlass::complex<Element> alpha(Element(2), Element(1));
cutlass::complex<Element> beta(Element(1), Element(-1));
typename Functor::Params params(alpha, beta);
Functor linear_combination_op(params);
cutlass::ArrayPlanarComplex<ElementOutput, kCount> source;
cutlass::ArrayPlanarComplex<Element, kCount> accum;
// Define arbitrary inputs
for (int i = 0; i < kCount; ++i) {
accum.real[i] = Element(i * 2);
accum.imag[i] = Element((i * 3 % 6) - 3);
source.real[i] = ElementOutput((i * 7 % 9) - 4);
source.imag[i] = ElementOutput(((i * 5 + 2) % 9) - 4);
}
cutlass::ArrayPlanarComplex<ElementOutput, kCount> destination = linear_combination_op(accum, source);
// Verify each result
for (int i = 0; i < kCount; ++i) {
cutlass::complex<Element> expected = alpha * cutlass::complex<Element>(accum.real[i], accum.imag[i]) +
beta * cutlass::complex<Element>(Element(source.real[i]), Element(source.imag[i]));
cutlass::complex<ElementOutput> got(destination.real[i], destination.imag[i]);
EXPECT_TRUE(ElementOutput(expected.real()) == got.real());
EXPECT_TRUE(ElementOutput(expected.imag()) == got.imag());
EXPECT_TRUE(expected.real() != Element(0) || expected.imag() != Element(0));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace thread {
using FunctorPlanarComplexF16F16 = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
cutlass::half_t,
4,
cutlass::half_t,
cutlass::half_t>;
__global__ void epilogue_thread_functor_planar_complex_f16_f16(
cutlass::half_t *output_ptr,
cutlass::half_t const *accum_ptr,
cutlass::half_t const *source_ptr,
typename FunctorPlanarComplexF16F16::Params params,
int N) {
FunctorPlanarComplexF16F16 linear_combination_op(params);
auto accum = *reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4> const *>(accum_ptr);
auto source = *reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4> const *>(source_ptr);
#pragma unroll 1
for (int n = 0; n < N; ++n) {
source = linear_combination_op(accum, source);
}
*reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4>*>(output_ptr) = source;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_planar_complex, f16_f16) {
using Element = cutlass::half_t;
using ElementOutput = cutlass::half_t;
int const kCount = 8;
using Functor = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
ElementOutput,
kCount,
Element,
Element>;
cutlass::complex<Element> alpha(Element(2), Element(1));
cutlass::complex<Element> beta(Element(1), Element(-1));
typename Functor::Params params(alpha, beta);
Functor linear_combination_op(params);
cutlass::ArrayPlanarComplex<ElementOutput, kCount> source;
cutlass::ArrayPlanarComplex<Element, kCount> accum;
// Define arbitrary inputs
for (int i = 0; i < kCount; ++i) {
accum.real[i] = Element(i * 2);
accum.imag[i] = Element((i * 3 % 6) - 3);
source.real[i] = ElementOutput((i * 7 % 9) - 4);
source.imag[i] = ElementOutput(((i * 5 + 2) % 9) - 4);
}
cutlass::ArrayPlanarComplex<ElementOutput, kCount> destination = linear_combination_op(accum, source);
// Verify each result
for (int i = 0; i < kCount; ++i) {
cutlass::complex<Element> expected = alpha * cutlass::complex<Element>(accum.real[i], accum.imag[i]) +
beta * cutlass::complex<Element>(Element(source.real[i]), Element(source.imag[i]));
cutlass::complex<ElementOutput> got(destination.real[i], destination.imag[i]);
EXPECT_TRUE(ElementOutput(expected.real()) == got.real());
EXPECT_TRUE(ElementOutput(expected.imag()) == got.imag());
EXPECT_TRUE(expected.real() != Element(0) || expected.imag() != Element(0));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 094f51d762979f5364f6951de0304405bd33a4ef.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace thread {
using FunctorPlanarComplexF32F32 = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
float,
4,
float,
float>;
__global__ void epilogue_thread_functor_planar_complex_f32_f32(
float *output_ptr,
float const *accum_ptr,
float const *source_ptr,
typename FunctorPlanarComplexF32F32::Params params) {
FunctorPlanarComplexF32F32 linear_combination_op(params);
auto accum = *reinterpret_cast<cutlass::ArrayPlanarComplex<float , 4> const *>(accum_ptr);
auto source = *reinterpret_cast<cutlass::ArrayPlanarComplex<float, 4> const *>(source_ptr);
*reinterpret_cast<cutlass::ArrayPlanarComplex<float, 4>*>(output_ptr) = linear_combination_op(accum, source);
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_planar_complex, f32) {
using Element = float;
using ElementOutput = float;
int const kCount = 4;
using Functor = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
ElementOutput,
kCount,
Element,
Element>;
cutlass::complex<Element> alpha(Element(2), Element(1));
cutlass::complex<Element> beta(Element(1), Element(-1));
typename Functor::Params params(alpha, beta);
Functor linear_combination_op(params);
cutlass::ArrayPlanarComplex<ElementOutput, kCount> source;
cutlass::ArrayPlanarComplex<Element, kCount> accum;
// Define arbitrary inputs
for (int i = 0; i < kCount; ++i) {
accum.real[i] = Element(i * 2);
accum.imag[i] = Element((i * 3 % 6) - 3);
source.real[i] = ElementOutput((i * 7 % 9) - 4);
source.imag[i] = ElementOutput(((i * 5 + 2) % 9) - 4);
}
cutlass::ArrayPlanarComplex<ElementOutput, kCount> destination = linear_combination_op(accum, source);
// Verify each result
for (int i = 0; i < kCount; ++i) {
cutlass::complex<Element> expected = alpha * cutlass::complex<Element>(accum.real[i], accum.imag[i]) +
beta * cutlass::complex<Element>(Element(source.real[i]), Element(source.imag[i]));
cutlass::complex<ElementOutput> got(destination.real[i], destination.imag[i]);
EXPECT_TRUE(ElementOutput(expected.real()) == got.real());
EXPECT_TRUE(ElementOutput(expected.imag()) == got.imag());
EXPECT_TRUE(expected.real() != Element(0) || expected.imag() != Element(0));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace thread {
using FunctorPlanarComplexF16F32 = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
cutlass::half_t,
4,
float,
float>;
__global__ void epilogue_thread_functor_planar_complex_f16_f32(
cutlass::half_t *output_ptr,
float const *accum_ptr,
cutlass::half_t const *source_ptr,
typename FunctorPlanarComplexF16F32::Params params,
int N) {
FunctorPlanarComplexF16F32 linear_combination_op(params);
auto accum = *reinterpret_cast<cutlass::ArrayPlanarComplex<float , 4> const *>(accum_ptr);
auto source = *reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4> const *>(source_ptr);
#pragma unroll 1
for (int n = 0; n < N; ++n) {
source = linear_combination_op(accum, source);
}
*reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4>*>(output_ptr) = source;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_planar_complex, f16_f32) {
using Element = float;
using ElementOutput = cutlass::half_t;
int const kCount = 4;
using Functor = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
ElementOutput,
kCount,
Element,
Element>;
cutlass::complex<Element> alpha(Element(2), Element(1));
cutlass::complex<Element> beta(Element(1), Element(-1));
typename Functor::Params params(alpha, beta);
Functor linear_combination_op(params);
cutlass::ArrayPlanarComplex<ElementOutput, kCount> source;
cutlass::ArrayPlanarComplex<Element, kCount> accum;
// Define arbitrary inputs
for (int i = 0; i < kCount; ++i) {
accum.real[i] = Element(i * 2);
accum.imag[i] = Element((i * 3 % 6) - 3);
source.real[i] = ElementOutput((i * 7 % 9) - 4);
source.imag[i] = ElementOutput(((i * 5 + 2) % 9) - 4);
}
cutlass::ArrayPlanarComplex<ElementOutput, kCount> destination = linear_combination_op(accum, source);
// Verify each result
for (int i = 0; i < kCount; ++i) {
cutlass::complex<Element> expected = alpha * cutlass::complex<Element>(accum.real[i], accum.imag[i]) +
beta * cutlass::complex<Element>(Element(source.real[i]), Element(source.imag[i]));
cutlass::complex<ElementOutput> got(destination.real[i], destination.imag[i]);
EXPECT_TRUE(ElementOutput(expected.real()) == got.real());
EXPECT_TRUE(ElementOutput(expected.imag()) == got.imag());
EXPECT_TRUE(expected.real() != Element(0) || expected.imag() != Element(0));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace thread {
using FunctorPlanarComplexF16F16 = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
cutlass::half_t,
4,
cutlass::half_t,
cutlass::half_t>;
__global__ void epilogue_thread_functor_planar_complex_f16_f16(
cutlass::half_t *output_ptr,
cutlass::half_t const *accum_ptr,
cutlass::half_t const *source_ptr,
typename FunctorPlanarComplexF16F16::Params params,
int N) {
FunctorPlanarComplexF16F16 linear_combination_op(params);
auto accum = *reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4> const *>(accum_ptr);
auto source = *reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4> const *>(source_ptr);
#pragma unroll 1
for (int n = 0; n < N; ++n) {
source = linear_combination_op(accum, source);
}
*reinterpret_cast<cutlass::ArrayPlanarComplex<cutlass::half_t , 4>*>(output_ptr) = source;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_planar_complex, f16_f16) {
using Element = cutlass::half_t;
using ElementOutput = cutlass::half_t;
int const kCount = 8;
using Functor = cutlass::epilogue::thread::LinearCombinationPlanarComplex<
ElementOutput,
kCount,
Element,
Element>;
cutlass::complex<Element> alpha(Element(2), Element(1));
cutlass::complex<Element> beta(Element(1), Element(-1));
typename Functor::Params params(alpha, beta);
Functor linear_combination_op(params);
cutlass::ArrayPlanarComplex<ElementOutput, kCount> source;
cutlass::ArrayPlanarComplex<Element, kCount> accum;
// Define arbitrary inputs
for (int i = 0; i < kCount; ++i) {
accum.real[i] = Element(i * 2);
accum.imag[i] = Element((i * 3 % 6) - 3);
source.real[i] = ElementOutput((i * 7 % 9) - 4);
source.imag[i] = ElementOutput(((i * 5 + 2) % 9) - 4);
}
cutlass::ArrayPlanarComplex<ElementOutput, kCount> destination = linear_combination_op(accum, source);
// Verify each result
for (int i = 0; i < kCount; ++i) {
cutlass::complex<Element> expected = alpha * cutlass::complex<Element>(accum.real[i], accum.imag[i]) +
beta * cutlass::complex<Element>(Element(source.real[i]), Element(source.imag[i]));
cutlass::complex<ElementOutput> got(destination.real[i], destination.imag[i]);
EXPECT_TRUE(ElementOutput(expected.real()) == got.real());
EXPECT_TRUE(ElementOutput(expected.imag()) == got.imag());
EXPECT_TRUE(expected.real() != Element(0) || expected.imag() != Element(0));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
ac1039ecc10b4ad08fd47a5438887faacd57d550.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2013-2015 The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013 Frank Ong, Martin Uecker, Pat Virtue, and Mark Murphy
* frankong@berkeley.edu
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include "num/multind.h"
#include "num/gpuops.h"
#include "dfwavelet_kernels.h"
#include "dfwavelet_impl.h"
# define _hdev_ __host__ __device__
// _data_t is the interal representation of data_t in CUDA
// Must be float2/double2 for data_t=Complex float/double or float/double for data_t=float/double
typedef float2 _data_t;
// Float2 Operators
inline _hdev_ float2 operator+ (float2 z1, float2 z2) {
return make_float2 (z1.x + z2.x, z1.y + z2.y);
}
inline _hdev_ float2 operator- (float2 z1, float2 z2) {
return make_float2 (z1.x - z2.x, z1.y - z2.y);
}
inline _hdev_ float2 operator* (float2 z1, float2 z2) {
return make_float2 (z1.x*z2.x - z1.y*z2.y, z1.x*z2.y + z1.y*z2.x);
}
inline _hdev_ float2 operator* (float2 z1, float alpha) {
return make_float2 (z1.x*alpha, z1.y*alpha);
}
inline _hdev_ float2 operator* (float alpha,float2 z1) {
return make_float2 (z1.x*alpha, z1.y*alpha);
}
inline _hdev_ float2 operator/ (float alpha,float2 z1) {
return make_float2 (1.f/z1.x, 1.f/z1.y);
}
inline _hdev_ void operator+= (float2 &z1, float2 z2) {
z1.x += z2.x;
z1.y += z2.y;
}
inline _hdev_ float abs(float2 z1) {
return sqrt(z1.x*z1.x + z1.y*z1.y);
}
// Double2 Operators
inline _hdev_ double2 operator+ (double2 z1, double2 z2) {
return make_double2 (z1.x + z2.x, z1.y + z2.y);
}
inline _hdev_ double2 operator- (double2 z1, double2 z2) {
return make_double2 (z1.x - z2.x, z1.y - z2.y);
}
inline _hdev_ double2 operator* (double2 z1, double2 z2) {
return make_double2 (z1.x*z2.x - z1.y*z2.y, z1.x*z2.y + z1.y*z2.x);
}
inline _hdev_ double2 operator* (double2 z1, double alpha) {
return make_double2 (z1.x*alpha, z1.y*alpha);
}
inline _hdev_ double2 operator* (double alpha,double2 z1) {
return make_double2 (z1.x*alpha, z1.y*alpha);
}
inline _hdev_ double2 operator/ (double alpha,double2 z1) {
return make_double2 (1.f/z1.x, 1.f/z1.y);
}
inline _hdev_ void operator+= (double2 &z1, double2 z2) {
z1.x += z2.x;
z1.y += z2.y;
}
inline _hdev_ double abs(double2 z1) {
return sqrt(z1.x*z1.x + z1.y*z1.y);
}
/********** Macros ************/
#define cuda(Call) do { \
hipError_t err = cuda ## Call ; \
if (err != hipSuccess){ \
fprintf(stderr, "%s\n", hipGetErrorString(err)); \
throw; \
} \
} while(0)
#define cuda_sync() do{ \
cuda (DeviceSynchronize()); \
cuda (GetLastError()); \
} while(0)
/********** Macros ************/
#define cuda(Call) do { \
hipError_t err = cuda ## Call ; \
if (err != hipSuccess){ \
fprintf(stderr, "%s\n", hipGetErrorString(err)); \
throw; \
} \
} while(0)
#define cuda_sync() do{ \
cuda (DeviceSynchronize()); \
cuda (GetLastError()); \
} while(0)
// ############################################################################
// Headers
// ############################################################################
static __global__ void cu_fwt3df_col(_data_t *Lx,_data_t *Hx,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen);
static __global__ void cu_fwt3df_row(_data_t *Ly,_data_t *Hy,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen);
static __global__ void cu_fwt3df_dep(_data_t *Lz,_data_t *Hz,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen);
static __global__ void cu_iwt3df_dep(_data_t *out,_data_t *Lz,_data_t *Hz,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,int xOffset,int yOffset,int zOffset,scalar_t *lod,scalar_t *hid,int filterLen);
static __global__ void cu_iwt3df_row(_data_t *out,_data_t *Ly,_data_t *Hy,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,int xOffset,int yOffset,int zOffset,scalar_t *lod,scalar_t *hid,int filterLen);
static __global__ void cu_iwt3df_col(_data_t *out,_data_t *Lx,_data_t *Hx,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,int xOffset,int yOffset,int zOffset,scalar_t *lod,scalar_t *hid,int filterLen);
static __global__ void cu_fwt3df_LC1(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext);
static __global__ void cu_fwt3df_LC2(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext);
static __global__ void cu_fwt3df_LC1_diff(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext);
static __global__ void cu_fwt3df_LC2_diff(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext);
static __global__ void cu_fwt3df_LC3(_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dxNext, int dyNext, int dzNext);
static __global__ void cu_iwt3df_LC1(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz);
static __global__ void cu_iwt3df_LC2(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz);
static __global__ void cu_iwt3df_LC1_diff(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz);
static __global__ void cu_iwt3df_LC2_diff(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz);
static __global__ void cu_iwt3df_LC3(_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dx, int dy, int dz);
static __global__ void cu_mult(_data_t* in, _data_t mult, int maxInd);
static __global__ void cu_soft_thresh (_data_t* in, scalar_t thresh, int numMax);
static __global__ void cu_circshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz, int shift1, int shift2, int shift3);
static __global__ void cu_circunshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz, int shift1, int shift2, int shift3);
extern "C" void dffwt3_gpuHost(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz)
{
assert(plan->use_gpu==2);
data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn,*dev_vx,*dev_vy,*dev_vz;
cuda(Malloc( (void**)&dev_vx, plan->numPixel*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_vy, plan->numPixel*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_vz, plan->numPixel*sizeof(data_t) ));
cuda(Memcpy( dev_vx, in_vx, plan->numPixel*sizeof(data_t), hipMemcpyHostToDevice ));
cuda(Memcpy( dev_vy, in_vy, plan->numPixel*sizeof(data_t), hipMemcpyHostToDevice ));
cuda(Memcpy( dev_vz, in_vz, plan->numPixel*sizeof(data_t), hipMemcpyHostToDevice ));
cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(data_t) ));
dffwt3_gpu(plan,dev_wcdf1,dev_wcdf2,dev_wcn,dev_vx,dev_vy,dev_vz);
cuda(Memcpy( out_wcdf1, dev_wcdf1, plan->numCoeff*sizeof(data_t), hipMemcpyDeviceToHost ));
cuda(Memcpy( out_wcdf2, dev_wcdf2, plan->numCoeff*sizeof(data_t), hipMemcpyDeviceToHost ));
cuda(Memcpy( out_wcn, dev_wcn, plan->numCoeff*sizeof(data_t), hipMemcpyDeviceToHost ));
cuda(Free( dev_wcdf1 ));
cuda(Free( dev_wcdf2 ));
cuda(Free( dev_wcn ));
cuda(Free( dev_vx ));
cuda(Free( dev_vy ));
cuda(Free( dev_vz ));
}
extern "C" void dfiwt3_gpuHost(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn)
{
assert(plan->use_gpu==2);
data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn,*dev_vx,*dev_vy,*dev_vz;
cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(data_t) ));
cuda(Memcpy( dev_wcdf1, in_wcdf1, plan->numCoeff*sizeof(data_t), hipMemcpyHostToDevice ));
cuda(Memcpy( dev_wcdf2, in_wcdf2, plan->numCoeff*sizeof(data_t), hipMemcpyHostToDevice ));
cuda(Memcpy( dev_wcn, in_wcn, plan->numCoeff*sizeof(data_t), hipMemcpyHostToDevice ));
cuda(Malloc( (void**)&dev_vx, plan->numPixel*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_vy, plan->numPixel*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_vz, plan->numPixel*sizeof(data_t) ));
dfiwt3_gpu(plan,dev_vx,dev_vy,dev_vz,dev_wcdf1,dev_wcdf2,dev_wcn);
cuda(Memcpy( out_vx, dev_vx, plan->numPixel*sizeof(data_t), hipMemcpyDeviceToHost ));
cuda(Memcpy( out_vy, dev_vy, plan->numPixel*sizeof(data_t), hipMemcpyDeviceToHost ));
cuda(Memcpy( out_vz, dev_vz, plan->numPixel*sizeof(data_t), hipMemcpyDeviceToHost ));
cuda(Free( dev_wcdf1 ));
cuda(Free( dev_wcdf2 ));
cuda(Free( dev_wcn ));
cuda(Free( dev_vx ));
cuda(Free( dev_vy ));
cuda(Free( dev_vz ));
}
extern "C" void dfsoftthresh_gpuHost(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn)
{
assert(plan->use_gpu==2);
data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn;
cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(data_t) ));
cuda(Memcpy( dev_wcdf1, out_wcdf1, plan->numCoeff*sizeof(data_t), hipMemcpyHostToDevice ));
cuda(Memcpy( dev_wcdf2, out_wcdf2, plan->numCoeff*sizeof(data_t), hipMemcpyHostToDevice ));
cuda(Memcpy( dev_wcn, out_wcn, plan->numCoeff*sizeof(data_t), hipMemcpyHostToDevice ));
dfsoftthresh_gpu(plan,dfthresh,nthresh,dev_wcdf1,dev_wcdf2,dev_wcn);
cuda(Memcpy( out_wcdf1, dev_wcdf1, plan->numCoeff*sizeof(data_t), hipMemcpyDeviceToHost ));
cuda(Memcpy( out_wcdf2, dev_wcdf2, plan->numCoeff*sizeof(data_t), hipMemcpyDeviceToHost ));
cuda(Memcpy( out_wcn, dev_wcn, plan->numCoeff*sizeof(data_t), hipMemcpyDeviceToHost ));
cuda(Free( dev_wcdf1 ));
cuda(Free( dev_wcdf2 ));
cuda(Free( dev_wcn ));
}
extern "C" void dfwavthresh3_gpuHost(struct dfwavelet_plan_s* plan, scalar_t dfthresh,scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_vx,data_t* in_vy,data_t* in_vz)
{
assert(plan->use_gpu==2);
data_t*dev_vx,*dev_vy,*dev_vz;
cuda(Malloc( (void**)&dev_vx, plan->numPixel*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_vy, plan->numPixel*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_vz, plan->numPixel*sizeof(data_t) ));
cuda(Memcpy( dev_vx, in_vx, plan->numPixel*sizeof(data_t), hipMemcpyHostToDevice ));
cuda(Memcpy( dev_vy, in_vy, plan->numPixel*sizeof(data_t), hipMemcpyHostToDevice ));
cuda(Memcpy( dev_vz, in_vz, plan->numPixel*sizeof(data_t), hipMemcpyHostToDevice ));
dfwavthresh3_gpu(plan,dfthresh,nthresh,dev_vx,dev_vy,dev_vz,dev_vx,dev_vy,dev_vz);
cuda(Memcpy( out_vx, dev_vx, plan->numPixel*sizeof(data_t), hipMemcpyDeviceToHost ));
cuda(Memcpy( out_vy, dev_vy, plan->numPixel*sizeof(data_t), hipMemcpyDeviceToHost ));
cuda(Memcpy( out_vz, dev_vz, plan->numPixel*sizeof(data_t), hipMemcpyDeviceToHost ));
cuda(Free( dev_vx ));
cuda(Free( dev_vy ));
cuda(Free( dev_vz ));
}
extern "C" void dffwt3_gpu(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz)
{
circshift_gpu(plan,in_vx);
circshift_gpu(plan,in_vy);
circshift_gpu(plan,in_vz);
long numCoeff, filterLen,*waveSizes;
numCoeff = plan->numCoeff;
waveSizes = plan->waveSizes;
filterLen = plan->filterLen;
int numLevels = plan->numLevels;
// Cast from generic data_t to device compatible _data_t
_data_t* dev_wcdf1 = (_data_t*) out_wcdf1;
_data_t* dev_wcdf2 = (_data_t*) out_wcdf2;
_data_t* dev_wcn = (_data_t*) out_wcn;
_data_t* dev_in_vx = (_data_t*) in_vx;
_data_t* dev_in_vy = (_data_t*) in_vy;
_data_t* dev_in_vz = (_data_t*) in_vz;
_data_t* res = (_data_t*) plan->res;
_data_t* dev_temp1,*dev_temp2;
cuda(Malloc( (void**)&dev_temp1, numCoeff*sizeof(_data_t) ));
cuda(Malloc( (void**)&dev_temp2, numCoeff*sizeof(_data_t) ));
// Get dimensions
int dx = plan->imSize[0];
int dy = plan->imSize[1];
int dz = plan->imSize[2];
int dxNext = waveSizes[0 + 3*numLevels];
int dyNext = waveSizes[1 + 3*numLevels];
int dzNext = waveSizes[2 + 3*numLevels];
int blockSize = dxNext*dyNext*dzNext;
// allocate device memory and copy filters to device
scalar_t *dev_filters;
cuda(Malloc( (void**)&dev_filters, 4*plan->filterLen*sizeof(scalar_t) ));
scalar_t *dev_lod0 = dev_filters + 0*plan->filterLen;
scalar_t *dev_hid0 = dev_filters + 1*plan->filterLen;
scalar_t *dev_lod1 = dev_filters + 2*plan->filterLen;
scalar_t *dev_hid1 = dev_filters + 3*plan->filterLen;
cuda(Memcpy( dev_lod0, plan->lod0, 2*plan->filterLen*sizeof(scalar_t), hipMemcpyHostToDevice ));
cuda(Memcpy( dev_lod1, plan->lod1, 2*plan->filterLen*sizeof(scalar_t), hipMemcpyHostToDevice ));
// Initialize variables and Pointers for FWT
int const SHMEM_SIZE = 16384;
int const T = 512;
int mem, K;
dim3 numBlocks, numThreads;
// Temp Pointers
_data_t *dev_tempLx,*dev_tempHx;
dev_tempLx = dev_temp1;
dev_tempHx = dev_tempLx + numCoeff/2;
_data_t *dev_tempLxLy,*dev_tempHxLy,*dev_tempLxHy,*dev_tempHxHy;
dev_tempLxLy = dev_temp2;
dev_tempHxLy = dev_tempLxLy + numCoeff/4;
dev_tempLxHy = dev_tempHxLy + numCoeff/4;
dev_tempHxHy = dev_tempLxHy + numCoeff/4;
// wcdf1 Pointers
_data_t *dev_LxLyLz_df1,*dev_HxLyLz_df1,*dev_LxHyLz_df1,*dev_HxHyLz_df1,*dev_LxLyHz_df1,*dev_HxLyHz_df1,*dev_LxHyHz_df1,*dev_HxHyHz_df1,*dev_current_vx;
dev_LxLyLz_df1 = dev_wcdf1;
dev_HxLyLz_df1 = dev_LxLyLz_df1 + waveSizes[0]*waveSizes[1]*waveSizes[2];
for (int l = 1; l <= numLevels; ++l){
dev_HxLyLz_df1 += 7*waveSizes[0 + 3*l]*waveSizes[1 + 3*l]*waveSizes[2 + 3*l];
}
dev_current_vx = dev_in_vx;
// wcdf2 Pointers
_data_t *dev_LxLyLz_df2,*dev_HxLyLz_df2,*dev_LxHyLz_df2,*dev_HxHyLz_df2,*dev_LxLyHz_df2,*dev_HxLyHz_df2,*dev_LxHyHz_df2,*dev_HxHyHz_df2,*dev_current_vy;
dev_LxLyLz_df2 = dev_wcdf2;
dev_HxLyLz_df2 = dev_LxLyLz_df2 + waveSizes[0]*waveSizes[1]*waveSizes[2];
for (int l = 1; l <= numLevels; ++l){
dev_HxLyLz_df2 += 7*waveSizes[0 + 3*l]*waveSizes[1 + 3*l]*waveSizes[2 + 3*l];
}
dev_current_vy = dev_in_vy;
// wcn Pointers
_data_t *dev_LxLyLz_n,*dev_HxLyLz_n,*dev_LxHyLz_n,*dev_HxHyLz_n,*dev_LxLyHz_n,*dev_HxLyHz_n,*dev_LxHyHz_n,*dev_HxHyHz_n,*dev_current_vz;
dev_LxLyLz_n = dev_wcn;
dev_HxLyLz_n = dev_LxLyLz_n + waveSizes[0]*waveSizes[1]*waveSizes[2];
for (int l = 1; l <= numLevels; ++l){
dev_HxLyLz_n += 7*waveSizes[0 + 3*l]*waveSizes[1 + 3*l]*waveSizes[2 + 3*l];
}
dev_current_vz = dev_in_vz;
//*****************Loop through levels****************
for (int l = numLevels; l >= 1; --l)
{
dxNext = waveSizes[0 + 3*l];
dyNext = waveSizes[1 + 3*l];
dzNext = waveSizes[2 + 3*l];
blockSize = dxNext*dyNext*dzNext;
// Update Pointers
// df1
dev_HxLyLz_df1 = dev_HxLyLz_df1 - 7*blockSize;
dev_LxHyLz_df1 = dev_HxLyLz_df1 + blockSize;
dev_HxHyLz_df1 = dev_LxHyLz_df1 + blockSize;
dev_LxLyHz_df1 = dev_HxHyLz_df1 + blockSize;
dev_HxLyHz_df1 = dev_LxLyHz_df1 + blockSize;
dev_LxHyHz_df1 = dev_HxLyHz_df1 + blockSize;
dev_HxHyHz_df1 = dev_LxHyHz_df1 + blockSize;
// df2
dev_HxLyLz_df2 = dev_HxLyLz_df2 - 7*blockSize;
dev_LxHyLz_df2 = dev_HxLyLz_df2 + blockSize;
dev_HxHyLz_df2 = dev_LxHyLz_df2 + blockSize;
dev_LxLyHz_df2 = dev_HxHyLz_df2 + blockSize;
dev_HxLyHz_df2 = dev_LxLyHz_df2 + blockSize;
dev_LxHyHz_df2 = dev_HxLyHz_df2 + blockSize;
dev_HxHyHz_df2 = dev_LxHyHz_df2 + blockSize;
// n
dev_HxLyLz_n = dev_HxLyLz_n - 7*blockSize;
dev_LxHyLz_n = dev_HxLyLz_n + blockSize;
dev_HxHyLz_n = dev_LxHyLz_n + blockSize;
dev_LxLyHz_n = dev_HxHyLz_n + blockSize;
dev_HxLyHz_n = dev_LxLyHz_n + blockSize;
dev_LxHyHz_n = dev_HxLyHz_n + blockSize;
dev_HxHyHz_n = dev_LxHyHz_n + blockSize;
//************WCVX***********
// FWT Columns
K = (SHMEM_SIZE-16)/(dx*sizeof(_data_t));
numBlocks = dim3(1,(dy+K-1)/K,dz);
numThreads = dim3(T/K,K,1);
mem = K*dx*sizeof(_data_t);
hipLaunchKernelGGL(( cu_fwt3df_col) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLx,dev_tempHx,dev_current_vx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
cuda_sync();
// FWT Rows
K = (SHMEM_SIZE-16)/(dy*sizeof(_data_t));
numBlocks = dim3(((dxNext)+K-1)/K,1,dz);
numThreads = dim3(K,T/K,1);
mem = K*dy*sizeof(_data_t);
hipLaunchKernelGGL(( cu_fwt3df_row) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLxLy,dev_tempLxHy,dev_tempLx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
hipLaunchKernelGGL(( cu_fwt3df_row) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempHxLy,dev_tempHxHy,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cuda_sync();
// FWT Depths
K = (SHMEM_SIZE-16)/(dz*sizeof(_data_t));
numBlocks = dim3(((dxNext)+K-1)/K,dyNext,1);
numThreads = dim3(K,1,T/K);
mem = K*dz*sizeof(_data_t);
hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_LxLyLz_df1,dev_LxLyHz_df1,dev_tempLxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_LxHyLz_df1,dev_LxHyHz_df1,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_HxLyLz_df1,dev_HxLyHz_df1,dev_tempHxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_HxHyLz_df1,dev_HxHyHz_df1,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cuda_sync();
//************WCVY***********
// FWT Columns
K = (SHMEM_SIZE-16)/(dx*sizeof(_data_t));
numBlocks = dim3(1,(dy+K-1)/K,dz);
numThreads = dim3(T/K,K,1);
mem = K*dx*sizeof(_data_t);
hipLaunchKernelGGL(( cu_fwt3df_col) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLx,dev_tempHx,dev_current_vy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cuda_sync();
// FWT Rows
K = (SHMEM_SIZE-16)/(dy*sizeof(_data_t));
numBlocks = dim3(((dxNext)+K-1)/K,1,dz);
numThreads = dim3(K,T/K,1);
mem = K*dy*sizeof(_data_t);
hipLaunchKernelGGL(( cu_fwt3df_row) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLxLy,dev_tempLxHy,dev_tempLx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
hipLaunchKernelGGL(( cu_fwt3df_row) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempHxLy,dev_tempHxHy,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
cuda_sync();
// FWT Depths
K = (SHMEM_SIZE-16)/(dz*sizeof(_data_t));
numBlocks = dim3(((dxNext)+K-1)/K,dyNext,1);
numThreads = dim3(K,1,T/K);
mem = K*dz*sizeof(_data_t);
hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_LxLyLz_df2,dev_LxLyHz_df2,dev_tempLxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_LxHyLz_df2,dev_LxHyHz_df2,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_HxLyLz_df2,dev_HxLyHz_df2,dev_tempHxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_HxHyLz_df2,dev_HxHyHz_df2,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cuda_sync();
//************WCVZ***********
// FWT Columns
K = (SHMEM_SIZE-16)/(dx*sizeof(_data_t));
numBlocks = dim3(1,(dy+K-1)/K,dz);
numThreads = dim3(T/K,K,1);
mem = K*dx*sizeof(_data_t);
hipLaunchKernelGGL(( cu_fwt3df_col) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLx,dev_tempHx,dev_current_vz,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cuda_sync();
// FWT Rows
K = (SHMEM_SIZE-16)/(dy*sizeof(_data_t));
numBlocks = dim3(((dxNext)+K-1)/K,1,dz);
numThreads = dim3(K,T/K,1);
mem = K*dy*sizeof(_data_t);
hipLaunchKernelGGL(( cu_fwt3df_row) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLxLy,dev_tempLxHy,dev_tempLx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
hipLaunchKernelGGL(( cu_fwt3df_row) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempHxLy,dev_tempHxHy,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cuda_sync();
// FWT Depths
K = (SHMEM_SIZE-16)/(dz*sizeof(_data_t));
numBlocks = dim3(((dxNext)+K-1)/K,dyNext,1);
numThreads = dim3(K,1,T/K);
mem = K*dz*sizeof(_data_t);
hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_LxLyLz_n,dev_LxLyHz_n,dev_tempLxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_LxHyLz_n,dev_LxHyHz_n,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_HxLyLz_n,dev_HxLyHz_n,dev_tempHxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_HxHyLz_n,dev_HxHyHz_n,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
cuda_sync();
//******* Multi ******
int maxInd = 7*blockSize;
numThreads = T;
numBlocks = (maxInd+numThreads.x-1)/numThreads.x;
hipLaunchKernelGGL(( cu_mult) , dim3(numBlocks), dim3(numThreads), 0, cuda_get_stream() , dev_HxLyLz_df1,1.f/res[0],maxInd);
hipLaunchKernelGGL(( cu_mult) , dim3(numBlocks), dim3(numThreads), 0, cuda_get_stream() , dev_HxLyLz_df2,1.f/res[1],maxInd);
hipLaunchKernelGGL(( cu_mult) , dim3(numBlocks), dim3(numThreads), 0, cuda_get_stream() , dev_HxLyLz_n,1.f/res[2],maxInd);
cuda_sync();
//*******Linear Combination******
int t1 = min(dxNext,T);
int t2 = T/t1;
numBlocks = dim3( (dxNext+t1-1)/t1, (dyNext+t2-1)/t2, dzNext);
numThreads = dim3(t1,t2,1);
hipLaunchKernelGGL(( cu_fwt3df_LC1) , dim3(numBlocks),dim3(numThreads), 0, cuda_get_stream() , dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dxNext,dyNext,dzNext);
hipLaunchKernelGGL(( cu_fwt3df_LC2) , dim3(numBlocks),dim3(numThreads), 0, cuda_get_stream() , dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dxNext,dyNext,dzNext);
hipLaunchKernelGGL(( cu_fwt3df_LC3) , dim3(numBlocks),dim3(numThreads), 0, cuda_get_stream() , dev_HxHyHz_df1,dev_HxHyHz_df2,dev_HxHyHz_n,dxNext,dyNext,dzNext);
cuda_sync();
hipLaunchKernelGGL(( cu_fwt3df_LC1_diff) , dim3(numBlocks),dim3(numThreads), 0, cuda_get_stream() , dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dxNext,dyNext,dzNext);
hipLaunchKernelGGL(( cu_fwt3df_LC2_diff) , dim3(numBlocks),dim3(numThreads), 0, cuda_get_stream() , dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dxNext,dyNext,dzNext);
cuda_sync();
dev_current_vx = dev_wcdf1;
dev_current_vy = dev_wcdf2;
dev_current_vz = dev_wcn;
dx = dxNext;
dy = dyNext;
dz = dzNext;
}
cuda(Free( dev_filters ));
cuda(Free( dev_temp1 ));
cuda(Free( dev_temp2 ));
circunshift_gpu(plan,in_vx);
circunshift_gpu(plan,in_vy);
circunshift_gpu(plan,in_vz);
}
extern "C" void dfiwt3_gpu(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn)
{
long numCoeff, filterLen,*waveSizes;
numCoeff = plan->numCoeff;
waveSizes = plan->waveSizes;
filterLen = plan->filterLen;
int numLevels = plan->numLevels;
// Cast from generic data_t to device compatible _data_t
_data_t* dev_out_vx = (_data_t*)out_vx;
_data_t* dev_out_vy = (_data_t*)out_vy;
_data_t* dev_out_vz = (_data_t*)out_vz;
_data_t* dev_wcdf1 = (_data_t*)in_wcdf1;
_data_t* dev_wcdf2 = (_data_t*)in_wcdf2;
_data_t* dev_wcn = (_data_t*)in_wcn;
_data_t* res = (_data_t*) plan->res;
_data_t* dev_temp1, *dev_temp2;
cuda(Malloc( (void**)&dev_temp1, numCoeff*sizeof(_data_t) ));
cuda(Malloc( (void**)&dev_temp2, numCoeff*sizeof(_data_t)) );
// allocate device memory
scalar_t *dev_filters;
cuda(Malloc( (void**)&dev_filters, 4*(plan->filterLen)*sizeof(scalar_t) ));
scalar_t *dev_lor0 = dev_filters + 0*plan->filterLen;
scalar_t *dev_hir0 = dev_filters + 1*plan->filterLen;
scalar_t *dev_lor1 = dev_filters + 2*plan->filterLen;
scalar_t *dev_hir1 = dev_filters + 3*plan->filterLen;
cuda(Memcpy( dev_lor0, plan->lor0, 2*plan->filterLen*sizeof(scalar_t), hipMemcpyHostToDevice ));
cuda(Memcpy( dev_lor1, plan->lor1, 2*plan->filterLen*sizeof(scalar_t), hipMemcpyHostToDevice ));
// Workspace dimensions
int dxWork = waveSizes[0 + 3*numLevels]*2-1 + filterLen-1;
int dyWork = waveSizes[1 + 3*numLevels]*2-1 + filterLen-1;
int dzWork = waveSizes[2 + 3*numLevels]*2-1 + filterLen-1;
// Initialize variables and pointers for IWT
int const SHMEM_SIZE = 16384;
int const T = 512;
int mem,K;
dim3 numBlocks, numThreads;
int dx = waveSizes[0];
int dy = waveSizes[1];
int dz = waveSizes[2];
// Temp Pointers
_data_t *dev_tempLxLy,*dev_tempHxLy,*dev_tempLxHy,*dev_tempHxHy;
dev_tempLxLy = dev_temp1;
dev_tempHxLy = dev_tempLxLy + numCoeff/4;
dev_tempLxHy = dev_tempHxLy + numCoeff/4;
dev_tempHxHy = dev_tempLxHy + numCoeff/4;
_data_t *dev_tempLx,*dev_tempHx;
dev_tempLx = dev_temp2;
dev_tempHx = dev_tempLx + numCoeff/2;
// wcdf1 Pointers
_data_t *dev_LxLyLz_df1,*dev_HxLyLz_df1,*dev_LxHyLz_df1,*dev_HxHyLz_df1,*dev_LxLyHz_df1,*dev_HxLyHz_df1,*dev_LxHyHz_df1,*dev_HxHyHz_df1,*dev_current_vx;
dev_LxLyLz_df1 = dev_wcdf1;
dev_HxLyLz_df1 = dev_LxLyLz_df1 + dx*dy*dz;
dev_current_vx = dev_LxLyLz_df1;
// wcdf2 Pointers
_data_t *dev_LxLyLz_df2,*dev_HxLyLz_df2,*dev_LxHyLz_df2,*dev_HxHyLz_df2,*dev_LxLyHz_df2,*dev_HxLyHz_df2,*dev_LxHyHz_df2,*dev_HxHyHz_df2,*dev_current_vy;
dev_LxLyLz_df2 = dev_wcdf2;
dev_HxLyLz_df2 = dev_LxLyLz_df2 + dx*dy*dz;
dev_current_vy = dev_LxLyLz_df2;
// wcn Pointers
_data_t *dev_LxLyLz_n,*dev_HxLyLz_n,*dev_LxHyLz_n,*dev_HxHyLz_n,*dev_LxLyHz_n,*dev_HxLyHz_n,*dev_LxHyHz_n,*dev_HxHyHz_n,*dev_current_vz;
dev_LxLyLz_n = dev_wcn;
dev_HxLyLz_n = dev_LxLyLz_n + dx*dy*dz;
dev_current_vz = dev_LxLyLz_n;
for (int level = 1; level < numLevels+1; ++level)
{
dx = waveSizes[0 + 3*level];
dy = waveSizes[1 + 3*level];
dz = waveSizes[2 + 3*level];
int blockSize = dx*dy*dz;
int dxNext = waveSizes[0+3*(level+1)];
int dyNext = waveSizes[1+3*(level+1)];
int dzNext = waveSizes[2+3*(level+1)];
// Calclate Offset
dxWork = (2*dx-1 + filterLen-1);
dyWork = (2*dy-1 + filterLen-1);
dzWork = (2*dz-1 + filterLen-1);
int xOffset = (int) floor((dxWork - dxNext) / 2.0);
int yOffset = (int) floor((dyWork - dyNext) / 2.0);
int zOffset = (int) floor((dzWork - dzNext) / 2.0);
// Update Pointers
// df1
dev_LxHyLz_df1 = dev_HxLyLz_df1 + blockSize;
dev_HxHyLz_df1 = dev_LxHyLz_df1 + blockSize;
dev_LxLyHz_df1 = dev_HxHyLz_df1 + blockSize;
dev_HxLyHz_df1 = dev_LxLyHz_df1 + blockSize;
dev_LxHyHz_df1 = dev_HxLyHz_df1 + blockSize;
dev_HxHyHz_df1 = dev_LxHyHz_df1 + blockSize;
// df2
dev_LxHyLz_df2 = dev_HxLyLz_df2 + blockSize;
dev_HxHyLz_df2 = dev_LxHyLz_df2 + blockSize;
dev_LxLyHz_df2 = dev_HxHyLz_df2 + blockSize;
dev_HxLyHz_df2 = dev_LxLyHz_df2 + blockSize;
dev_LxHyHz_df2 = dev_HxLyHz_df2 + blockSize;
dev_HxHyHz_df2 = dev_LxHyHz_df2 + blockSize;
// n
dev_LxHyLz_n = dev_HxLyLz_n + blockSize;
dev_HxHyLz_n = dev_LxHyLz_n + blockSize;
dev_LxLyHz_n = dev_HxHyLz_n + blockSize;
dev_HxLyHz_n = dev_LxLyHz_n + blockSize;
dev_LxHyHz_n = dev_HxLyHz_n + blockSize;
dev_HxHyHz_n = dev_LxHyHz_n + blockSize;
//*******Linear Combination******
int t1 = min(dxNext,T);
int t2 = T/t1;
numBlocks = dim3( (dx+t1-1)/t1, (dy+t2-1)/t2, dz);
numThreads = dim3(t1,t2,1);
hipLaunchKernelGGL(( cu_iwt3df_LC1) , dim3(numBlocks),dim3(numThreads), 0, cuda_get_stream() , dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dx,dy,dz);
hipLaunchKernelGGL(( cu_iwt3df_LC2) , dim3(numBlocks),dim3(numThreads), 0, cuda_get_stream() , dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dx,dy,dz);
hipLaunchKernelGGL(( cu_iwt3df_LC3) , dim3(numBlocks),dim3(numThreads), 0, cuda_get_stream() , dev_HxHyHz_df1,dev_HxHyHz_df2,dev_HxHyHz_n,dx,dy,dz);
cuda_sync();
hipLaunchKernelGGL(( cu_iwt3df_LC1_diff) , dim3(numBlocks),dim3(numThreads), 0, cuda_get_stream() , dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dx,dy,dz);
hipLaunchKernelGGL(( cu_iwt3df_LC2_diff) , dim3(numBlocks),dim3(numThreads), 0, cuda_get_stream() , dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dx,dy,dz);
cuda_sync();
//******* Multi ******
int maxInd = 7*blockSize;
numThreads = T;
numBlocks = (maxInd+numThreads.x-1)/numThreads.x;
hipLaunchKernelGGL(( cu_mult) , dim3(numBlocks), dim3(numThreads), 0, cuda_get_stream() , dev_HxLyLz_df1,res[0],maxInd);
hipLaunchKernelGGL(( cu_mult) , dim3(numBlocks), dim3(numThreads), 0, cuda_get_stream() , dev_HxLyLz_df2,res[1],maxInd);
hipLaunchKernelGGL(( cu_mult) , dim3(numBlocks), dim3(numThreads), 0, cuda_get_stream() , dev_HxLyLz_n,res[2],maxInd);
cuda_sync();
//************WCX************
// Update Pointers
if (level==numLevels)
dev_current_vx = dev_out_vx;
// IWT Depths
K = (SHMEM_SIZE-16)/(2*dz*sizeof(_data_t));
numBlocks = dim3((dx+K-1)/K,dy,1);
numThreads = dim3(K,1,(T/K));
mem = K*2*dz*sizeof(_data_t);
hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLxLy,dev_LxLyLz_df1,dev_LxLyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempHxLy,dev_HxLyLz_df1,dev_HxLyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLxHy,dev_LxHyLz_df1,dev_LxHyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempHxHy,dev_HxHyLz_df1,dev_HxHyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
cuda_sync();
// IWT Rows
K = (SHMEM_SIZE-16)/(2*dy*sizeof(_data_t));
numBlocks = dim3((dx+K-1)/K,1,dzNext);
numThreads = dim3(K,(T/K),1);
mem = K*2*dy*sizeof(_data_t);
hipLaunchKernelGGL(( cu_iwt3df_row) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLx,dev_tempLxLy,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen);
hipLaunchKernelGGL(( cu_iwt3df_row) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempHx,dev_tempHxLy,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen);
cuda_sync();
// IWT Columns
K = (SHMEM_SIZE-16)/(2*dx*sizeof(_data_t));
numBlocks = dim3(1,(dyNext+K-1)/K,dzNext);
numThreads = dim3((T/K),K,1);
mem = K*2*dx*sizeof(_data_t);
hipLaunchKernelGGL(( cu_iwt3df_col) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_current_vx,dev_tempLx,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,plan->filterLen);
cuda_sync();
//************WCY************
// Update Pointers
if (level==numLevels)
dev_current_vy = dev_out_vy;
// IWT Depths
K = (SHMEM_SIZE-16)/(2*dz*sizeof(_data_t));
numBlocks = dim3((dx+K-1)/K,dy,1);
numThreads = dim3(K,1,(T/K));
mem = K*2*dz*sizeof(_data_t);
hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLxLy,dev_LxLyLz_df2,dev_LxLyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempHxLy,dev_HxLyLz_df2,dev_HxLyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLxHy,dev_LxHyLz_df2,dev_LxHyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempHxHy,dev_HxHyLz_df2,dev_HxHyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
cuda_sync();
// IWT Rows
K = (SHMEM_SIZE-16)/(2*dy*sizeof(_data_t));
numBlocks = dim3((dx+K-1)/K,1,dzNext);
numThreads = dim3(K,(T/K),1);
mem = K*2*dy*sizeof(_data_t);
hipLaunchKernelGGL(( cu_iwt3df_row) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLx,dev_tempLxLy,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,plan->filterLen);
hipLaunchKernelGGL(( cu_iwt3df_row) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempHx,dev_tempHxLy,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,plan->filterLen);
cuda_sync();
// IWT Columns
K = (SHMEM_SIZE-16)/(2*dx*sizeof(_data_t));
numBlocks = dim3(1,(dyNext+K-1)/K,dzNext);
numThreads = dim3((T/K),K,1);
mem = K*2*dx*sizeof(_data_t);
hipLaunchKernelGGL(( cu_iwt3df_col) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_current_vy,dev_tempLx,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen);
cuda_sync();
//************WCZ************
// Update Pointers
if (level==numLevels)
dev_current_vz = dev_out_vz;
// IWT Depths
K = (SHMEM_SIZE-16)/(2*dz*sizeof(_data_t));
numBlocks = dim3((dx+K-1)/K,dy,1);
numThreads = dim3(K,1,(T/K));
mem = K*2*dz*sizeof(_data_t);
hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLxLy,dev_LxLyLz_n,dev_LxLyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen);
hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempHxLy,dev_HxLyLz_n,dev_HxLyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen);
hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLxHy,dev_LxHyLz_n,dev_LxHyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen);
hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempHxHy,dev_HxHyLz_n,dev_HxHyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen);
cuda_sync();
// IWT Rows
K = (SHMEM_SIZE-16)/(2*dy*sizeof(_data_t));
numBlocks = dim3((dx+K-1)/K,1,dzNext);
numThreads = dim3(K,(T/K),1);
mem = K*2*dy*sizeof(_data_t);
hipLaunchKernelGGL(( cu_iwt3df_row) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempLx,dev_tempLxLy,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen);
hipLaunchKernelGGL(( cu_iwt3df_row) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_tempHx,dev_tempHxLy,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen);
cuda_sync();
// IWT Columns
K = (SHMEM_SIZE-16)/(2*dx*sizeof(_data_t));
numBlocks = dim3(1,(dyNext+K-1)/K,dzNext);
numThreads = dim3((T/K),K,1);
mem = K*2*dx*sizeof(_data_t);
hipLaunchKernelGGL(( cu_iwt3df_col) , dim3(numBlocks),dim3(numThreads),mem, cuda_get_stream() , dev_current_vz,dev_tempLx,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen);
cuda_sync();
dev_HxLyLz_df1 += 7*blockSize;
dev_HxLyLz_df2 += 7*blockSize;
dev_HxLyLz_n += 7*blockSize;
}
cuda(Free( dev_filters ));
cuda(Free( dev_temp1 ));
cuda(Free( dev_temp2 ));
circunshift_gpu(plan,out_vx);
circunshift_gpu(plan,out_vy);
circunshift_gpu(plan,out_vz);
}
int rand_lim(int limit) {
int divisor = RAND_MAX/(limit+1);
int retval;
do {
retval = rand() / divisor;
} while (retval > limit);
return retval;
}
void dfwavelet_new_randshift_gpu (struct dfwavelet_plan_s* plan) {
int i;
i = rand();
for(i = 0; i < plan->numdims; i++) {
// Determine maximum shift value for this dimension
int log2dim = 1;
while( (1<<log2dim) < plan->imSize[i]) {
log2dim++;
}
int maxShift = 1 << (log2dim-plan->numLevels);
if (maxShift > 8) {
maxShift = 8;
}
// Generate random shift value between 0 and maxShift
plan->randShift[i] = rand_lim(maxShift);
}
}
extern "C" void dfwavthresh3_gpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz)
{
data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn;
cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(_data_t) ));
cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(_data_t) ));
cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(_data_t) ));
dffwt3_gpu(plan,dev_wcdf1,dev_wcdf2,dev_wcn,in_vx,in_vy,in_vz);
dfsoftthresh_gpu(plan,dfthresh,nthresh,dev_wcdf1,dev_wcdf2,dev_wcn);
dfiwt3_gpu(plan,out_vx,out_vy,out_vz,dev_wcdf1,dev_wcdf2,dev_wcn);
cuda(Free( dev_wcdf1 ));
cuda(Free( dev_wcdf2 ));
cuda(Free( dev_wcn ));
}
extern "C" void dfsoftthresh_gpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn)
{
assert(plan->use_gpu==1||plan->use_gpu==2);
_data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn;
dev_wcdf1 = (_data_t*) out_wcdf1;
dev_wcdf2 = (_data_t*) out_wcdf2;
dev_wcn = (_data_t*) out_wcn;
int numMax;
int const T = 512;
dim3 numBlocks, numThreads;
numMax = plan->numCoeff-plan->numCoarse;
numBlocks = dim3((numMax+T-1)/T,1,1);
numThreads = dim3(T,1,1);
hipLaunchKernelGGL(( cu_soft_thresh) , dim3(numBlocks),dim3(numThreads), 0, 0, dev_wcdf1+plan->numCoarse,dfthresh,numMax);
hipLaunchKernelGGL(( cu_soft_thresh) , dim3(numBlocks),dim3(numThreads), 0, 0, dev_wcdf2+plan->numCoarse,dfthresh,numMax);
hipLaunchKernelGGL(( cu_soft_thresh) , dim3(numBlocks),dim3(numThreads), 0, 0, dev_wcn+plan->numCoarse,nthresh,numMax);
}
/********** Aux functions **********/
extern "C" void circshift_gpu(struct dfwavelet_plan_s* plan, data_t* data_c) {
// Return if no shifts
int zeroShift = 1;
int i;
for (i = 0; i< plan->numdims; i++)
{
zeroShift &= (plan->randShift[i]==0);
}
if(zeroShift) {
return;
}
_data_t* data = (_data_t*) data_c;
// Copy data
_data_t* dataCopy;
cuda(Malloc((void**)&dataCopy, plan->numPixel*sizeof(_data_t)));
cuda(Memcpy(dataCopy, data, plan->numPixel*sizeof(_data_t), hipMemcpyDeviceToDevice));
int T = 512;
if (plan->numdims==2)
{
int dx,dy,r0,r1;
dx = plan->imSize[0];
dy = plan->imSize[1];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
hipLaunchKernelGGL(( cu_circshift) , dim3((plan->numPixel+T-1)/T), dim3(T), 0, 0, data,dataCopy,dx,dy,1,r0,r1,0);
} else if (plan->numdims==3)
{
int dx,dy,dz,r0,r1,r2;
dx = plan->imSize[0];
dy = plan->imSize[1];
dz = plan->imSize[2];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
r2 = plan->randShift[2];
hipLaunchKernelGGL(( cu_circshift) , dim3((plan->numPixel+T-1)/T), dim3(T), 0, 0, data,dataCopy,dx,dy,dz,r0,r1,r2);
}
cuda(Free(dataCopy));
}
extern "C" void circunshift_gpu(struct dfwavelet_plan_s* plan, data_t* data_c) {
// Return if no shifts
int zeroShift = 1;
int i;
for (i = 0; i< plan->numdims; i++)
{
zeroShift &= (plan->randShift[i]==0);
}
if(zeroShift) {
return;
}
_data_t* data = (_data_t*) data_c;
// Copy data
_data_t* dataCopy;
cuda(Malloc((void**)&dataCopy, plan->numPixel*sizeof(_data_t)));
cuda(Memcpy(dataCopy, data, plan->numPixel*sizeof(_data_t), hipMemcpyDeviceToDevice));
int T = 512;
if (plan->numdims==2)
{
int dx,dy,r0,r1;
dx = plan->imSize[0];
dy = plan->imSize[1];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
hipLaunchKernelGGL(( cu_circunshift) , dim3((plan->numPixel+T-1)/T), dim3(T), 0, 0, data,dataCopy,dx,dy,1,r0,r1,0);
} else if (plan->numdims==3)
{
int dx,dy,dz,r0,r1,r2;
dx = plan->imSize[0];
dy = plan->imSize[1];
dz = plan->imSize[2];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
r2 = plan->randShift[2];
hipLaunchKernelGGL(( cu_circunshift) , dim3((plan->numPixel+T-1)/T), dim3(T), 0, 0, data,dataCopy,dx,dy,dz,r0,r1,r2);
}
cuda(Free(dataCopy));
}
// ############################################################################
// CUDA function of fwt column convolution
// Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass
// Output: Lx, Hx
// Input: in, dx, dy, dz, dxNext, lod, hid, filterLen
// ############################################################################
extern "C" __global__ void cu_fwt3df_col(_data_t *Lx,_data_t *Hx,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen)
{
extern __shared__ _data_t cols [];
int ti = threadIdx.x;
int tj = threadIdx.y;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
if (j>=dy) {
return;
}
// Load Input to Temp Array
for (int i = ti; i < dx; i += blockDim.x){
cols[i + tj*dx] = in[i + j*dx + k*dx*dy];
}
__syncthreads();
// Low-Pass and High-Pass Downsample
int ind, lessThan, greaThan;
for (int i = ti; i < dxNext; i += blockDim.x){
_data_t y = cols[0]-cols[0];
_data_t z = cols[0]-cols[0];
#pragma unroll
for (int f = 0; f < filterLen; f++){
ind = 2*i+1 - (filterLen-1)+f;
lessThan = (int) (ind<0);
greaThan = (int) (ind>=dx);
ind = -1*lessThan+ind*(-2*lessThan+1);
ind = (2*dx-1)*greaThan+ind*(-2*greaThan+1);
y += cols[ind + tj*dx] * lod[filterLen-1-f];
z += cols[ind + tj*dx] * hid[filterLen-1-f];
}
Lx[i + j*dxNext + k*dxNext*dy] = y;
Hx[i + j*dxNext + k*dxNext*dy] = z;
}
}
// ############################################################################
// CUDA function of fwt row convolution. Assumes fwt_col() has already been called
// Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass
// Output: LxLy, LxHy / HxLy, HxHy
// Input: Lx/Hx, dx, dy, dxNext, dyNext, lod, hid, filterLen
// ############################################################################
extern "C" __global__ void cu_fwt3df_row(_data_t *Ly,_data_t *Hy,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen)
{
extern __shared__ _data_t rows [];
int const K = blockDim.x;
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x*blockDim.x+threadIdx.x;
int k = blockIdx.z*blockDim.z+threadIdx.z;
if (i>=dxNext)
{
return;
}
for (int j = tj; j < dy; j += blockDim.y){
rows[ti + j*K] = in[i + j*dxNext + k*dxNext*dy];
}
__syncthreads();
// Low-Pass and High Pass Downsample
int ind, lessThan, greaThan;
for (int j = tj; j < dyNext; j += blockDim.y){
_data_t y = rows[0]-rows[0];
_data_t z = rows[0]-rows[0];
#pragma unroll
for (int f = 0; f < filterLen; f++){
ind = 2*j+1 - (filterLen-1)+f;
lessThan = (int) (ind<0);
greaThan = (int) (ind>=dy);
ind = -1*lessThan+ind*(-2*lessThan+1);
ind = (2*dy-1)*greaThan+ind*(-2*greaThan+1);
y += rows[ti + ind*K] * lod[filterLen-1-f];
z += rows[ti + ind*K] * hid[filterLen-1-f];
}
Ly[i + j*dxNext + k*dxNext*dyNext] = y;
Hy[i + j*dxNext + k*dxNext*dyNext] = z;
}
}
// ############################################################################
// CUDA function of fwt depth convolution. Assumes fwt_row() has already been called
// Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass
// Output: LxLy, LxHy / HxLy, HxHy
// Input: Lx/Hx, dx, dy, dxNext, dyNext, lod, hid, filterLen
// ############################################################################
extern "C" __global__ void cu_fwt3df_dep(_data_t *Lz,_data_t *Hz,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen)
{
extern __shared__ _data_t deps [];
int const K = blockDim.x;
int ti = threadIdx.x;
int tk = threadIdx.z;
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
if (i>=dxNext)
{
return;
}
for (int k = tk; k < dz; k += blockDim.z){
deps[ti + k*K] = in[i + j*dxNext + k*dxNext*dyNext];
}
__syncthreads();
// Low-Pass and High Pass Downsample
int ind, lessThan, greaThan;
for (int k = tk; k < dzNext; k += blockDim.z){
_data_t y = deps[0]-deps[0];
_data_t z = deps[0]-deps[0];
#pragma unroll
for (int f = 0; f < filterLen; f++){
ind = 2*k+1 - (filterLen-1)+f;
lessThan = (int) (ind<0);
greaThan = (int) (ind>=dz);
ind = -1*lessThan+ind*(-2*lessThan+1);
ind = (2*dz-1)*greaThan+ind*(-2*greaThan+1);
y += deps[ti + ind*K] * lod[filterLen-1-f];
z += deps[ti + ind*K] * hid[filterLen-1-f];
}
Lz[i + j*dxNext + k*dxNext*dyNext] = y;
Hz[i + j*dxNext + k*dxNext*dyNext] = z;
}
}
extern "C" __global__ void cu_fwt3df_LC1(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
scalar_t xGreatZero,yGreatZero,zGreatZero;
if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext))
{
return;
}
//HLL
x = HxLyLz_df1[i+j*dxNext+k*dxNext*dyNext];
y = HxLyLz_df2[i+j*dxNext+k*dxNext*dyNext];
z = HxLyLz_n[i+j*dxNext+k*dxNext*dyNext];
HxLyLz_df1[i+j*dxNext+k*dxNext*dyNext] = y;
HxLyLz_df2[i+j*dxNext+k*dxNext*dyNext] = z;
yGreatZero = j>0;
zGreatZero = k>0;
HxLyLz_n[i+j*dxNext+k*dxNext*dyNext] = x + yGreatZero*0.25f*y + zGreatZero*0.25f*z;
//LHL
x = LxHyLz_df1[i+j*dxNext+k*dxNext*dyNext];
y = LxHyLz_df2[i+j*dxNext+k*dxNext*dyNext];
z = LxHyLz_n[i+j*dxNext+k*dxNext*dyNext];
LxHyLz_df2[i+j*dxNext+k*dxNext*dyNext] = z;
xGreatZero = i>0;
zGreatZero = k>0;
LxHyLz_n[i+j*dxNext+k*dxNext*dyNext] = y + xGreatZero*0.25f*x + zGreatZero*0.25f*z;
//LLH
x = LxLyHz_df1[i+j*dxNext+k*dxNext*dyNext];
y = LxLyHz_df2[i+j*dxNext+k*dxNext*dyNext];
z = LxLyHz_n[i+j*dxNext+k*dxNext*dyNext];
LxLyHz_df1[i+j*dxNext+k*dxNext*dyNext] = y;
LxLyHz_df2[i+j*dxNext+k*dxNext*dyNext] = x;
yGreatZero = j>0;
xGreatZero = i>0;
LxLyHz_n[i+j*dxNext+k*dxNext*dyNext] = z + yGreatZero*0.25*y + xGreatZero*0.25*x;
}
extern "C" __global__ void cu_fwt3df_LC1_diff(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
_data_t zero = make_float2(0.f,0.f);
if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext))
{
return;
}
//HLL
if (j>0)
y = HxLyLz_df1[i+(j-1)*dxNext+k*dxNext*dyNext];
else
y = zero;
if (k>0)
z = HxLyLz_df2[i+j*dxNext+(k-1)*dxNext*dyNext];
else
z = zero;
HxLyLz_n[i+j*dxNext+k*dxNext*dyNext] += -0.25*y - 0.25*z;
//LHL
if (i>0)
x = LxHyLz_df1[(i-1)+j*dxNext+k*dxNext*dyNext];
else
x = zero;
if (k>0)
z = LxHyLz_df2[i+j*dxNext+(k-1)*dxNext*dyNext];
else
z = zero;
LxHyLz_n[i+j*dxNext+k*dxNext*dyNext] += -0.25*x - 0.25*z;
//LLH
if (j>0)
y = LxLyHz_df1[i+(j-1)*dxNext+k*dxNext*dyNext];
else
y = zero;
if (i>0)
x = LxLyHz_df2[(i-1)+j*dxNext+k*dxNext*dyNext];
else
x = zero;
LxLyHz_n[i+j*dxNext+k*dxNext*dyNext] += -0.25*y - 0.25*x;
}
extern "C" __global__ void cu_fwt3df_LC2(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
scalar_t xGreatZero,yGreatZero,zGreatZero;
if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext))
{
return;
}
//HHL
x = HxHyLz_df1[i+j*dxNext+k*dxNext*dyNext];
y = HxHyLz_df2[i+j*dxNext+k*dxNext*dyNext];
z = HxHyLz_n[i+j*dxNext+k*dxNext*dyNext];
HxHyLz_df1[i+j*dxNext+k*dxNext*dyNext] = 0.5*(x-y);
HxHyLz_df2[i+j*dxNext+k*dxNext*dyNext] = z;
zGreatZero = k>0;
HxHyLz_n[i+j*dxNext+k*dxNext*dyNext] = 0.5*(x+y) + zGreatZero*0.125*z;
//HLH
x = HxLyHz_df1[i+j*dxNext+k*dxNext*dyNext];
y = HxLyHz_df2[i+j*dxNext+k*dxNext*dyNext];
z = HxLyHz_n[i+j*dxNext+k*dxNext*dyNext];
HxLyHz_df1[i+j*dxNext+k*dxNext*dyNext] = 0.5*(z-x);
HxLyHz_df2[i+j*dxNext+k*dxNext*dyNext] = y;
yGreatZero = j>0;
HxLyHz_n[i+j*dxNext+k*dxNext*dyNext] = 0.5*(z+x) + yGreatZero*0.125*y;
//LHH
x = LxHyHz_df1[i+j*dxNext+k*dxNext*dyNext];
y = LxHyHz_df2[i+j*dxNext+k*dxNext*dyNext];
z = LxHyHz_n[i+j*dxNext+k*dxNext*dyNext];
LxHyHz_df1[i+j*dxNext+k*dxNext*dyNext] = 0.5*(y-z);
LxHyHz_df2[i+j*dxNext+k*dxNext*dyNext] = x;
xGreatZero = i>0;
LxHyHz_n[i+j*dxNext+k*dxNext*dyNext] = 0.5*(y+z) + xGreatZero*0.125*x;
}
extern "C" __global__ void cu_fwt3df_LC2_diff(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
_data_t zero = make_float2(0.f,0.f);
if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext))
{
return;
}
//HHL
if (k>0)
z = HxHyLz_df2[i+j*dxNext+(k-1)*dxNext*dyNext];
else
z = zero;
HxHyLz_n[i+j*dxNext+k*dxNext*dyNext] += -0.125*z;
//HLH
if (j>0)
y = HxLyHz_df2[i+(j-1)*dxNext+k*dxNext*dyNext];
else
y = zero;
HxLyHz_n[i+j*dxNext+k*dxNext*dyNext] += -0.125*y;
//LHH
if (i>0)
x = LxHyHz_df2[(i-1)+j*dxNext+k*dxNext*dyNext];
else
x = zero;
LxHyHz_n[i+j*dxNext+k*dxNext*dyNext] += -0.125*x;
}
extern "C" __global__ void cu_fwt3df_LC3(_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dxNext, int dyNext, int dzNext)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext))
{
return;
}
//HHH
x = HxHyHz_df1[i+j*dxNext+k*dxNext*dyNext];
y = HxHyHz_df2[i+j*dxNext+k*dxNext*dyNext];
z = HxHyHz_n[i+j*dxNext+k*dxNext*dyNext];
HxHyHz_df1[i+j*dxNext+k*dxNext*dyNext] = 1.0/3.0*(-2.0*x+y+z);
HxHyHz_df2[i+j*dxNext+k*dxNext*dyNext] = 1.0/3.0*(2*y-x-z);
HxHyHz_n[i+j*dxNext+k*dxNext*dyNext] = 1.0/3.0*(x+y+z);
}
// ############################################################################
// CUDA function of iwt depth convolution.
// Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass
// Scratchpad size: K x 2*dy
// Output: Lz/Hz
// Input: LxLy,LxHy / HxLy, HxHy, dx, dy, dxNext, dyNext,xOffset, yOffset,lod, hid, filterLen
// ############################################################################
extern "C" __global__ void cu_iwt3df_dep(_data_t *out, _data_t *Lz, _data_t *Hz, int dx, int dy,int dz,int dxNext, int dyNext, int dzNext,int xOffset, int yOffset,int zOffset,scalar_t *lod, scalar_t *hid, int filterLen)
{
extern __shared__ _data_t deps [];
int const K = blockDim.x;
int ti = threadIdx.x;
int tk = threadIdx.z;
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
if (i>=dx){
return;
}
for (int k = tk; k < dz; k += blockDim.z){
deps[ti + k*K] = Lz[i + j*dx + k*dx*dy];
deps[ti + (k+dz)*K] = Hz[i + j*dx + k*dx*dy];
}
__syncthreads();
// Low-Pass and High Pass Downsample
int ind;
for (int k = tk+zOffset; k < dzNext+zOffset; k += blockDim.z){
_data_t y = deps[0]-deps[0];
#pragma unroll
for (int f = (k-(filterLen-1)) % 2; f < filterLen; f+=2){
ind = (k-(filterLen-1)+f)>>1;
if ((ind >= 0) && (ind < dz)) {
y += deps[ti + ind*K] * lod[filterLen-1-f];
y += deps[ti + (ind+dz)*K] * hid[filterLen-1-f];
}
}
out[i + j*dx + (k-zOffset)*dx*dy] = y;
}
}
// ############################################################################
// CUDA function of iwt row convolution. Assumes fwt_col() has already been called.
// Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass
// Scratchpad size: K x 2*dy
// Output: Lx/Hx
// Input: LxLy,LxHy / HxLy, HxHy, dx, dy, dxNext, dyNext,xOffset, yOffset,lod, hid, filterLen
// ############################################################################
extern "C" __global__ void cu_iwt3df_row(_data_t *out, _data_t *Ly, _data_t *Hy, int dx, int dy,int dz,int dxNext, int dyNext,int dzNext,int xOffset, int yOffset, int zOffset,scalar_t *lod, scalar_t *hid, int filterLen)
{
extern __shared__ _data_t rows [];
int const K = blockDim.x;
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x*blockDim.x+threadIdx.x;
int k = blockIdx.z*blockDim.z+threadIdx.z;
if (i>=dx){
return;
}
for (int j = tj; j < dy; j += blockDim.y){
rows[ti + j*K] = Ly[i + j*dx + k*dx*dy];
rows[ti + (j+dy)*K] = Hy[i + j*dx + k*dx*dy];
}
__syncthreads();
// Low-Pass and High Pass Downsample
int ind;
for (int j = tj+yOffset; j < dyNext+yOffset; j += blockDim.y){
_data_t y = rows[0]-rows[0];
#pragma unroll
for (int f = (j-(filterLen-1)) % 2; f < filterLen; f+=2){
ind = (j-(filterLen-1)+f)>>1;
if ((ind >= 0) && (ind < dy)) {
y += rows[ti + ind*K] * lod[filterLen-1-f];
y += rows[ti + (ind+dy)*K] * hid[filterLen-1-f];
}
}
out[i + (j-yOffset)*dx + k*dx*dyNext] = y;
}
}
// ############################################################################
// CUDA function of iwt column convolution
// Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass
// Scratchpad size: 2*dx x K
// Output: out
// Input: Lx, Hx, dx, dy, dxNext, dyNext, lod, hid, filterLen
// ############################################################################
extern "C" __global__ void cu_iwt3df_col(_data_t *out, _data_t *Lx, _data_t *Hx, int dx, int dy,int dz,int dxNext, int dyNext, int dzNext,int xOffset, int yOffset, int zOffset,scalar_t *lod, scalar_t *hid, int filterLen)
{
extern __shared__ _data_t cols [];
int ti = threadIdx.x;
int tj = threadIdx.y;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
if (j>=dyNext){
return;
}
int dx2 = 2*dx;
// Load Input to Temp Array
for (int i = ti; i < dx; i += blockDim.x){
cols[i + tj*dx2] = Lx[i + j*dx + k*dx*dyNext];
cols[dx+i + tj*dx2] = Hx[i + j*dx + k*dx*dyNext];
}
__syncthreads();
// Low-Pass and High Pass Downsample
int ind;
for (int i = ti+xOffset; i < dxNext+xOffset; i += blockDim.x){
_data_t y = cols[0]-cols[0];
#pragma unroll
for (int f = (i-(filterLen-1)) % 2; f < filterLen; f+=2){
ind = (i-(filterLen-1)+f)>>1;
if (ind >= 0 && ind < dx) {
y += cols[ind + tj*dx2] * lod[filterLen-1-f];
y += cols[dx+ind + tj*dx2] * hid[filterLen-1-f];
}
}
out[(i-xOffset) + j*dxNext + k*dxNext*dyNext] = y;
}
}
extern "C" __global__ void cu_iwt3df_LC1 (_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t df1,df2,n;
scalar_t xGreatZero,yGreatZero,zGreatZero;
if ((i>=dx)||(j>=dy)||(k>=dz))
{
return;
}
//HLL
df1 = HxLyLz_df1[i+j*dx+k*dx*dy];
df2 = HxLyLz_df2[i+j*dx+k*dx*dy];
n = HxLyLz_n[i+j*dx+k*dx*dy];
HxLyLz_df2[i+j*dx+k*dx*dy] = df1;
HxLyLz_n[i+j*dx+k*dx*dy] = df2;
yGreatZero = j>0;
zGreatZero = k>0;
HxLyLz_df1[i+j*dx+k*dx*dy] = n - yGreatZero*0.25*df1 - zGreatZero*0.25*df2;
//LHL
df1 = LxHyLz_df1[i+j*dx+k*dx*dy];
df2 = LxHyLz_df2[i+j*dx+k*dx*dy];
n = LxHyLz_n[i+j*dx+k*dx*dy];
LxHyLz_n[i+j*dx+k*dx*dy] = df2;
xGreatZero = i>0;
zGreatZero = k>0;
LxHyLz_df2[i+j*dx+k*dx*dy] = n - xGreatZero*0.25*df1 - zGreatZero*0.25*df2;
//LLH
df1 = LxLyHz_df1[i+j*dx+k*dx*dy];
df2 = LxLyHz_df2[i+j*dx+k*dx*dy];
n = LxLyHz_n[i+j*dx+k*dx*dy];
LxLyHz_df1[i+j*dx+k*dx*dy] = df2;
LxLyHz_df2[i+j*dx+k*dx*dy] = df1;
yGreatZero = j>0;
xGreatZero = i>0;
LxLyHz_n[i+j*dx+k*dx*dy] = n - yGreatZero*0.25*df1 - xGreatZero*0.25*df2;
}
extern "C" __global__ void cu_iwt3df_LC1_diff (_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
_data_t zero = make_float2(0.f,0.f);
if ((i>=dx)||(j>=dy)||(k>=dz))
{
return;
}
//HLL
if (j>0)
y = HxLyLz_df2[i+(j-1)*dx+k*dx*dy];
else
y = zero;
if (k>0)
z = HxLyLz_n[i+j*dx+(k-1)*dx*dy];
else
z = zero;
HxLyLz_df1[i+j*dx+k*dx*dy] += 0.25*y + 0.25*z;
//LHL
if (i>0)
x = LxHyLz_df1[(i-1)+j*dx+k*dx*dy];
else
x = zero;
if (k>0)
z = LxHyLz_n[i+j*dx+(k-1)*dx*dy];
else
z = zero;
LxHyLz_df2[i+j*dx+k*dx*dy] += 0.25*x + 0.25*z;
//LLH
if (j>0)
y = LxLyHz_df2[i+(j-1)*dx+k*dx*dy];
else
y = zero;
if (i>0)
x = LxLyHz_df1[(i-1)+j*dx+k*dx*dy];
else
x = zero;
LxLyHz_n[i+j*dx+k*dx*dy] += 0.25*y + 0.25*x;
}
extern "C" __global__ void cu_iwt3df_LC2 (_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t df1,df2,n;
scalar_t xGreatZero,yGreatZero,zGreatZero;
if ((i>=dx)||(j>=dy)||(k>=dz))
{
return;
}
//HHL
df1 = HxHyLz_df1[i+j*dx+k*dx*dy];
df2 = HxHyLz_df2[i+j*dx+k*dx*dy];
n = HxHyLz_n[i+j*dx+k*dx*dy];
HxHyLz_n[i+j*dx+k*dx*dy] = df2;
zGreatZero = k>0;
HxHyLz_df1[i+j*dx+k*dx*dy] = df1+n-zGreatZero*0.125*df2;
HxHyLz_df2[i+j*dx+k*dx*dy] = n-df1-zGreatZero*0.125*df2;
//HLH
df1 = HxLyHz_df1[i+j*dx+k*dx*dy];
df2 = HxLyHz_df2[i+j*dx+k*dx*dy];
n = HxLyHz_n[i+j*dx+k*dx*dy];
HxLyHz_df2[i+j*dx+k*dx*dy] = df2;
yGreatZero = j>0;
HxLyHz_n[i+j*dx+k*dx*dy] = df1+n-yGreatZero*0.125*df2;
HxLyHz_df1[i+j*dx+k*dx*dy] = n-df1-yGreatZero*0.125*df2;
//LHH
df1 = LxHyHz_df1[i+j*dx+k*dx*dy];
df2 = LxHyHz_df2[i+j*dx+k*dx*dy];
n = LxHyHz_n[i+j*dx+k*dx*dy];
LxHyHz_df1[i+j*dx+k*dx*dy] = df2;
xGreatZero = i>0;
LxHyHz_df2[i+j*dx+k*dx*dy] = df1+n-xGreatZero*0.125*df2;
LxHyHz_n[i+j*dx+k*dx*dy] = n-df1-xGreatZero*0.125*df2;
}
extern "C" __global__ void cu_iwt3df_LC2_diff (_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
_data_t zero = make_float2(0.f,0.f);
if ((i>=dx)||(j>=dy)||(k>=dz))
{
return;
}
//HHL
if (k>0)
z = HxHyLz_n[i+j*dx+(k-1)*dx*dy];
else
z = zero;
HxHyLz_df1[i+j*dx+k*dx*dy] += 0.125*z;
HxHyLz_df2[i+j*dx+k*dx*dy] += 0.125*z;
//HLH
if (j>0)
y = HxLyHz_df2[i+(j-1)*dx+k*dx*dy];
else
y = zero;
HxLyHz_df1[i+j*dx+k*dx*dy] += 0.125*y;
HxLyHz_n[i+j*dx+k*dx*dy] += 0.125*y;
//LHH
if (i>0)
x = LxHyHz_df1[(i-1)+j*dx+k*dx*dy];
else
x = zero;
LxHyHz_df2[i+j*dx+k*dx*dy] += 0.125*x;
LxHyHz_n[i+j*dx+k*dx*dy] += 0.125*x;
}
extern "C" __global__ void cu_iwt3df_LC3 (_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dx, int dy, int dz)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t df1,df2,n;
if ((i>=dx)||(j>=dy)||(k>=dz))
{
return;
}
//HHH
df1 = HxHyHz_df1[i+j*dx+k*dx*dy];
df2 = HxHyHz_df2[i+j*dx+k*dx*dy];
n = HxHyHz_n[i+j*dx+k*dx*dy];
HxHyHz_df1[i+j*dx+k*dx*dy] = n-df1;
HxHyHz_df2[i+j*dx+k*dx*dy] = df2+n;
HxHyHz_n[i+j*dx+k*dx*dy] = df1-df2+n;
}
extern "C" __global__ void cu_mult(_data_t* in, _data_t mult, int maxInd)
{
int ind = blockIdx.x*blockDim.x+threadIdx.x;
if (ind > maxInd)
{
return;
}
in[ind] = in[ind]*mult;
}
extern "C" __global__ void cu_add_mult(_data_t* out, _data_t* in, _data_t mult, int maxInd)
{
int ind = blockIdx.x*blockDim.x+threadIdx.x;
if (ind > maxInd)
{
return;
}
_data_t i = out[ind];
out[ind] = i+(out[ind]-i)*mult;
}
__global__ void cu_soft_thresh (_data_t* in, scalar_t thresh, int numMax)
{
int const i = threadIdx.x + blockDim.x*blockIdx.x;
if (i>numMax)
return;
scalar_t norm = abs(in[i]);
scalar_t red = norm - thresh;
in[i] = (red > 0.f) ? ((red / norm) * (in[i])) : in[i]-in[i];
}
__global__ void cu_circshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz,int shift1, int shift2,int shift3) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= dx*dy*dz) {
return;
}
int indexShifted = (index+shift1+shift2*dx+shift3*dx*dy)%(dx*dy*dz);
data[indexShifted] = dataCopy[index];
}
__global__ void cu_circunshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz,int shift1, int shift2,int shift3) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= dx*dy*dz) {
return;
}
int indexShifted = (index+shift1+shift2*dx+shift3*dx*dy)%(dx*dy*dz);
data[index] = dataCopy[indexShifted];
}
| ac1039ecc10b4ad08fd47a5438887faacd57d550.cu | /*
* Copyright 2013-2015 The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013 Frank Ong, Martin Uecker, Pat Virtue, and Mark Murphy
* frankong@berkeley.edu
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <assert.h>
#include <cuda.h>
#include "num/multind.h"
#include "num/gpuops.h"
#include "dfwavelet_kernels.h"
#include "dfwavelet_impl.h"
# define _hdev_ __host__ __device__
// _data_t is the interal representation of data_t in CUDA
// Must be float2/double2 for data_t=Complex float/double or float/double for data_t=float/double
typedef float2 _data_t;
// Float2 Operators
inline _hdev_ float2 operator+ (float2 z1, float2 z2) {
return make_float2 (z1.x + z2.x, z1.y + z2.y);
}
inline _hdev_ float2 operator- (float2 z1, float2 z2) {
return make_float2 (z1.x - z2.x, z1.y - z2.y);
}
inline _hdev_ float2 operator* (float2 z1, float2 z2) {
return make_float2 (z1.x*z2.x - z1.y*z2.y, z1.x*z2.y + z1.y*z2.x);
}
inline _hdev_ float2 operator* (float2 z1, float alpha) {
return make_float2 (z1.x*alpha, z1.y*alpha);
}
inline _hdev_ float2 operator* (float alpha,float2 z1) {
return make_float2 (z1.x*alpha, z1.y*alpha);
}
inline _hdev_ float2 operator/ (float alpha,float2 z1) {
return make_float2 (1.f/z1.x, 1.f/z1.y);
}
inline _hdev_ void operator+= (float2 &z1, float2 z2) {
z1.x += z2.x;
z1.y += z2.y;
}
inline _hdev_ float abs(float2 z1) {
return sqrt(z1.x*z1.x + z1.y*z1.y);
}
// Double2 Operators
inline _hdev_ double2 operator+ (double2 z1, double2 z2) {
return make_double2 (z1.x + z2.x, z1.y + z2.y);
}
inline _hdev_ double2 operator- (double2 z1, double2 z2) {
return make_double2 (z1.x - z2.x, z1.y - z2.y);
}
inline _hdev_ double2 operator* (double2 z1, double2 z2) {
return make_double2 (z1.x*z2.x - z1.y*z2.y, z1.x*z2.y + z1.y*z2.x);
}
inline _hdev_ double2 operator* (double2 z1, double alpha) {
return make_double2 (z1.x*alpha, z1.y*alpha);
}
inline _hdev_ double2 operator* (double alpha,double2 z1) {
return make_double2 (z1.x*alpha, z1.y*alpha);
}
inline _hdev_ double2 operator/ (double alpha,double2 z1) {
return make_double2 (1.f/z1.x, 1.f/z1.y);
}
inline _hdev_ void operator+= (double2 &z1, double2 z2) {
z1.x += z2.x;
z1.y += z2.y;
}
inline _hdev_ double abs(double2 z1) {
return sqrt(z1.x*z1.x + z1.y*z1.y);
}
/********** Macros ************/
#define cuda(Call) do { \
cudaError_t err = cuda ## Call ; \
if (err != cudaSuccess){ \
fprintf(stderr, "%s\n", cudaGetErrorString(err)); \
throw; \
} \
} while(0)
#define cuda_sync() do{ \
cuda (DeviceSynchronize()); \
cuda (GetLastError()); \
} while(0)
/********** Macros ************/
#define cuda(Call) do { \
cudaError_t err = cuda ## Call ; \
if (err != cudaSuccess){ \
fprintf(stderr, "%s\n", cudaGetErrorString(err)); \
throw; \
} \
} while(0)
#define cuda_sync() do{ \
cuda (DeviceSynchronize()); \
cuda (GetLastError()); \
} while(0)
// ############################################################################
// Headers
// ############################################################################
static __global__ void cu_fwt3df_col(_data_t *Lx,_data_t *Hx,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen);
static __global__ void cu_fwt3df_row(_data_t *Ly,_data_t *Hy,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen);
static __global__ void cu_fwt3df_dep(_data_t *Lz,_data_t *Hz,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen);
static __global__ void cu_iwt3df_dep(_data_t *out,_data_t *Lz,_data_t *Hz,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,int xOffset,int yOffset,int zOffset,scalar_t *lod,scalar_t *hid,int filterLen);
static __global__ void cu_iwt3df_row(_data_t *out,_data_t *Ly,_data_t *Hy,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,int xOffset,int yOffset,int zOffset,scalar_t *lod,scalar_t *hid,int filterLen);
static __global__ void cu_iwt3df_col(_data_t *out,_data_t *Lx,_data_t *Hx,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,int xOffset,int yOffset,int zOffset,scalar_t *lod,scalar_t *hid,int filterLen);
static __global__ void cu_fwt3df_LC1(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext);
static __global__ void cu_fwt3df_LC2(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext);
static __global__ void cu_fwt3df_LC1_diff(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext);
static __global__ void cu_fwt3df_LC2_diff(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext);
static __global__ void cu_fwt3df_LC3(_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dxNext, int dyNext, int dzNext);
static __global__ void cu_iwt3df_LC1(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz);
static __global__ void cu_iwt3df_LC2(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz);
static __global__ void cu_iwt3df_LC1_diff(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz);
static __global__ void cu_iwt3df_LC2_diff(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz);
static __global__ void cu_iwt3df_LC3(_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dx, int dy, int dz);
static __global__ void cu_mult(_data_t* in, _data_t mult, int maxInd);
static __global__ void cu_soft_thresh (_data_t* in, scalar_t thresh, int numMax);
static __global__ void cu_circshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz, int shift1, int shift2, int shift3);
static __global__ void cu_circunshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz, int shift1, int shift2, int shift3);
extern "C" void dffwt3_gpuHost(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz)
{
assert(plan->use_gpu==2);
data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn,*dev_vx,*dev_vy,*dev_vz;
cuda(Malloc( (void**)&dev_vx, plan->numPixel*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_vy, plan->numPixel*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_vz, plan->numPixel*sizeof(data_t) ));
cuda(Memcpy( dev_vx, in_vx, plan->numPixel*sizeof(data_t), cudaMemcpyHostToDevice ));
cuda(Memcpy( dev_vy, in_vy, plan->numPixel*sizeof(data_t), cudaMemcpyHostToDevice ));
cuda(Memcpy( dev_vz, in_vz, plan->numPixel*sizeof(data_t), cudaMemcpyHostToDevice ));
cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(data_t) ));
dffwt3_gpu(plan,dev_wcdf1,dev_wcdf2,dev_wcn,dev_vx,dev_vy,dev_vz);
cuda(Memcpy( out_wcdf1, dev_wcdf1, plan->numCoeff*sizeof(data_t), cudaMemcpyDeviceToHost ));
cuda(Memcpy( out_wcdf2, dev_wcdf2, plan->numCoeff*sizeof(data_t), cudaMemcpyDeviceToHost ));
cuda(Memcpy( out_wcn, dev_wcn, plan->numCoeff*sizeof(data_t), cudaMemcpyDeviceToHost ));
cuda(Free( dev_wcdf1 ));
cuda(Free( dev_wcdf2 ));
cuda(Free( dev_wcn ));
cuda(Free( dev_vx ));
cuda(Free( dev_vy ));
cuda(Free( dev_vz ));
}
extern "C" void dfiwt3_gpuHost(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn)
{
assert(plan->use_gpu==2);
data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn,*dev_vx,*dev_vy,*dev_vz;
cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(data_t) ));
cuda(Memcpy( dev_wcdf1, in_wcdf1, plan->numCoeff*sizeof(data_t), cudaMemcpyHostToDevice ));
cuda(Memcpy( dev_wcdf2, in_wcdf2, plan->numCoeff*sizeof(data_t), cudaMemcpyHostToDevice ));
cuda(Memcpy( dev_wcn, in_wcn, plan->numCoeff*sizeof(data_t), cudaMemcpyHostToDevice ));
cuda(Malloc( (void**)&dev_vx, plan->numPixel*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_vy, plan->numPixel*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_vz, plan->numPixel*sizeof(data_t) ));
dfiwt3_gpu(plan,dev_vx,dev_vy,dev_vz,dev_wcdf1,dev_wcdf2,dev_wcn);
cuda(Memcpy( out_vx, dev_vx, plan->numPixel*sizeof(data_t), cudaMemcpyDeviceToHost ));
cuda(Memcpy( out_vy, dev_vy, plan->numPixel*sizeof(data_t), cudaMemcpyDeviceToHost ));
cuda(Memcpy( out_vz, dev_vz, plan->numPixel*sizeof(data_t), cudaMemcpyDeviceToHost ));
cuda(Free( dev_wcdf1 ));
cuda(Free( dev_wcdf2 ));
cuda(Free( dev_wcn ));
cuda(Free( dev_vx ));
cuda(Free( dev_vy ));
cuda(Free( dev_vz ));
}
extern "C" void dfsoftthresh_gpuHost(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn)
{
assert(plan->use_gpu==2);
data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn;
cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(data_t) ));
cuda(Memcpy( dev_wcdf1, out_wcdf1, plan->numCoeff*sizeof(data_t), cudaMemcpyHostToDevice ));
cuda(Memcpy( dev_wcdf2, out_wcdf2, plan->numCoeff*sizeof(data_t), cudaMemcpyHostToDevice ));
cuda(Memcpy( dev_wcn, out_wcn, plan->numCoeff*sizeof(data_t), cudaMemcpyHostToDevice ));
dfsoftthresh_gpu(plan,dfthresh,nthresh,dev_wcdf1,dev_wcdf2,dev_wcn);
cuda(Memcpy( out_wcdf1, dev_wcdf1, plan->numCoeff*sizeof(data_t), cudaMemcpyDeviceToHost ));
cuda(Memcpy( out_wcdf2, dev_wcdf2, plan->numCoeff*sizeof(data_t), cudaMemcpyDeviceToHost ));
cuda(Memcpy( out_wcn, dev_wcn, plan->numCoeff*sizeof(data_t), cudaMemcpyDeviceToHost ));
cuda(Free( dev_wcdf1 ));
cuda(Free( dev_wcdf2 ));
cuda(Free( dev_wcn ));
}
extern "C" void dfwavthresh3_gpuHost(struct dfwavelet_plan_s* plan, scalar_t dfthresh,scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_vx,data_t* in_vy,data_t* in_vz)
{
assert(plan->use_gpu==2);
data_t*dev_vx,*dev_vy,*dev_vz;
cuda(Malloc( (void**)&dev_vx, plan->numPixel*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_vy, plan->numPixel*sizeof(data_t) ));
cuda(Malloc( (void**)&dev_vz, plan->numPixel*sizeof(data_t) ));
cuda(Memcpy( dev_vx, in_vx, plan->numPixel*sizeof(data_t), cudaMemcpyHostToDevice ));
cuda(Memcpy( dev_vy, in_vy, plan->numPixel*sizeof(data_t), cudaMemcpyHostToDevice ));
cuda(Memcpy( dev_vz, in_vz, plan->numPixel*sizeof(data_t), cudaMemcpyHostToDevice ));
dfwavthresh3_gpu(plan,dfthresh,nthresh,dev_vx,dev_vy,dev_vz,dev_vx,dev_vy,dev_vz);
cuda(Memcpy( out_vx, dev_vx, plan->numPixel*sizeof(data_t), cudaMemcpyDeviceToHost ));
cuda(Memcpy( out_vy, dev_vy, plan->numPixel*sizeof(data_t), cudaMemcpyDeviceToHost ));
cuda(Memcpy( out_vz, dev_vz, plan->numPixel*sizeof(data_t), cudaMemcpyDeviceToHost ));
cuda(Free( dev_vx ));
cuda(Free( dev_vy ));
cuda(Free( dev_vz ));
}
extern "C" void dffwt3_gpu(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz)
{
circshift_gpu(plan,in_vx);
circshift_gpu(plan,in_vy);
circshift_gpu(plan,in_vz);
long numCoeff, filterLen,*waveSizes;
numCoeff = plan->numCoeff;
waveSizes = plan->waveSizes;
filterLen = plan->filterLen;
int numLevels = plan->numLevels;
// Cast from generic data_t to device compatible _data_t
_data_t* dev_wcdf1 = (_data_t*) out_wcdf1;
_data_t* dev_wcdf2 = (_data_t*) out_wcdf2;
_data_t* dev_wcn = (_data_t*) out_wcn;
_data_t* dev_in_vx = (_data_t*) in_vx;
_data_t* dev_in_vy = (_data_t*) in_vy;
_data_t* dev_in_vz = (_data_t*) in_vz;
_data_t* res = (_data_t*) plan->res;
_data_t* dev_temp1,*dev_temp2;
cuda(Malloc( (void**)&dev_temp1, numCoeff*sizeof(_data_t) ));
cuda(Malloc( (void**)&dev_temp2, numCoeff*sizeof(_data_t) ));
// Get dimensions
int dx = plan->imSize[0];
int dy = plan->imSize[1];
int dz = plan->imSize[2];
int dxNext = waveSizes[0 + 3*numLevels];
int dyNext = waveSizes[1 + 3*numLevels];
int dzNext = waveSizes[2 + 3*numLevels];
int blockSize = dxNext*dyNext*dzNext;
// allocate device memory and copy filters to device
scalar_t *dev_filters;
cuda(Malloc( (void**)&dev_filters, 4*plan->filterLen*sizeof(scalar_t) ));
scalar_t *dev_lod0 = dev_filters + 0*plan->filterLen;
scalar_t *dev_hid0 = dev_filters + 1*plan->filterLen;
scalar_t *dev_lod1 = dev_filters + 2*plan->filterLen;
scalar_t *dev_hid1 = dev_filters + 3*plan->filterLen;
cuda(Memcpy( dev_lod0, plan->lod0, 2*plan->filterLen*sizeof(scalar_t), cudaMemcpyHostToDevice ));
cuda(Memcpy( dev_lod1, plan->lod1, 2*plan->filterLen*sizeof(scalar_t), cudaMemcpyHostToDevice ));
// Initialize variables and Pointers for FWT
int const SHMEM_SIZE = 16384;
int const T = 512;
int mem, K;
dim3 numBlocks, numThreads;
// Temp Pointers
_data_t *dev_tempLx,*dev_tempHx;
dev_tempLx = dev_temp1;
dev_tempHx = dev_tempLx + numCoeff/2;
_data_t *dev_tempLxLy,*dev_tempHxLy,*dev_tempLxHy,*dev_tempHxHy;
dev_tempLxLy = dev_temp2;
dev_tempHxLy = dev_tempLxLy + numCoeff/4;
dev_tempLxHy = dev_tempHxLy + numCoeff/4;
dev_tempHxHy = dev_tempLxHy + numCoeff/4;
// wcdf1 Pointers
_data_t *dev_LxLyLz_df1,*dev_HxLyLz_df1,*dev_LxHyLz_df1,*dev_HxHyLz_df1,*dev_LxLyHz_df1,*dev_HxLyHz_df1,*dev_LxHyHz_df1,*dev_HxHyHz_df1,*dev_current_vx;
dev_LxLyLz_df1 = dev_wcdf1;
dev_HxLyLz_df1 = dev_LxLyLz_df1 + waveSizes[0]*waveSizes[1]*waveSizes[2];
for (int l = 1; l <= numLevels; ++l){
dev_HxLyLz_df1 += 7*waveSizes[0 + 3*l]*waveSizes[1 + 3*l]*waveSizes[2 + 3*l];
}
dev_current_vx = dev_in_vx;
// wcdf2 Pointers
_data_t *dev_LxLyLz_df2,*dev_HxLyLz_df2,*dev_LxHyLz_df2,*dev_HxHyLz_df2,*dev_LxLyHz_df2,*dev_HxLyHz_df2,*dev_LxHyHz_df2,*dev_HxHyHz_df2,*dev_current_vy;
dev_LxLyLz_df2 = dev_wcdf2;
dev_HxLyLz_df2 = dev_LxLyLz_df2 + waveSizes[0]*waveSizes[1]*waveSizes[2];
for (int l = 1; l <= numLevels; ++l){
dev_HxLyLz_df2 += 7*waveSizes[0 + 3*l]*waveSizes[1 + 3*l]*waveSizes[2 + 3*l];
}
dev_current_vy = dev_in_vy;
// wcn Pointers
_data_t *dev_LxLyLz_n,*dev_HxLyLz_n,*dev_LxHyLz_n,*dev_HxHyLz_n,*dev_LxLyHz_n,*dev_HxLyHz_n,*dev_LxHyHz_n,*dev_HxHyHz_n,*dev_current_vz;
dev_LxLyLz_n = dev_wcn;
dev_HxLyLz_n = dev_LxLyLz_n + waveSizes[0]*waveSizes[1]*waveSizes[2];
for (int l = 1; l <= numLevels; ++l){
dev_HxLyLz_n += 7*waveSizes[0 + 3*l]*waveSizes[1 + 3*l]*waveSizes[2 + 3*l];
}
dev_current_vz = dev_in_vz;
//*****************Loop through levels****************
for (int l = numLevels; l >= 1; --l)
{
dxNext = waveSizes[0 + 3*l];
dyNext = waveSizes[1 + 3*l];
dzNext = waveSizes[2 + 3*l];
blockSize = dxNext*dyNext*dzNext;
// Update Pointers
// df1
dev_HxLyLz_df1 = dev_HxLyLz_df1 - 7*blockSize;
dev_LxHyLz_df1 = dev_HxLyLz_df1 + blockSize;
dev_HxHyLz_df1 = dev_LxHyLz_df1 + blockSize;
dev_LxLyHz_df1 = dev_HxHyLz_df1 + blockSize;
dev_HxLyHz_df1 = dev_LxLyHz_df1 + blockSize;
dev_LxHyHz_df1 = dev_HxLyHz_df1 + blockSize;
dev_HxHyHz_df1 = dev_LxHyHz_df1 + blockSize;
// df2
dev_HxLyLz_df2 = dev_HxLyLz_df2 - 7*blockSize;
dev_LxHyLz_df2 = dev_HxLyLz_df2 + blockSize;
dev_HxHyLz_df2 = dev_LxHyLz_df2 + blockSize;
dev_LxLyHz_df2 = dev_HxHyLz_df2 + blockSize;
dev_HxLyHz_df2 = dev_LxLyHz_df2 + blockSize;
dev_LxHyHz_df2 = dev_HxLyHz_df2 + blockSize;
dev_HxHyHz_df2 = dev_LxHyHz_df2 + blockSize;
// n
dev_HxLyLz_n = dev_HxLyLz_n - 7*blockSize;
dev_LxHyLz_n = dev_HxLyLz_n + blockSize;
dev_HxHyLz_n = dev_LxHyLz_n + blockSize;
dev_LxLyHz_n = dev_HxHyLz_n + blockSize;
dev_HxLyHz_n = dev_LxLyHz_n + blockSize;
dev_LxHyHz_n = dev_HxLyHz_n + blockSize;
dev_HxHyHz_n = dev_LxHyHz_n + blockSize;
//************WCVX***********
// FWT Columns
K = (SHMEM_SIZE-16)/(dx*sizeof(_data_t));
numBlocks = dim3(1,(dy+K-1)/K,dz);
numThreads = dim3(T/K,K,1);
mem = K*dx*sizeof(_data_t);
cu_fwt3df_col <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLx,dev_tempHx,dev_current_vx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
cuda_sync();
// FWT Rows
K = (SHMEM_SIZE-16)/(dy*sizeof(_data_t));
numBlocks = dim3(((dxNext)+K-1)/K,1,dz);
numThreads = dim3(K,T/K,1);
mem = K*dy*sizeof(_data_t);
cu_fwt3df_row <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLxLy,dev_tempLxHy,dev_tempLx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cu_fwt3df_row <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempHxLy,dev_tempHxHy,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cuda_sync();
// FWT Depths
K = (SHMEM_SIZE-16)/(dz*sizeof(_data_t));
numBlocks = dim3(((dxNext)+K-1)/K,dyNext,1);
numThreads = dim3(K,1,T/K);
mem = K*dz*sizeof(_data_t);
cu_fwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_LxLyLz_df1,dev_LxLyHz_df1,dev_tempLxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cu_fwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_LxHyLz_df1,dev_LxHyHz_df1,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cu_fwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_HxLyLz_df1,dev_HxLyHz_df1,dev_tempHxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cu_fwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_HxHyLz_df1,dev_HxHyHz_df1,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cuda_sync();
//************WCVY***********
// FWT Columns
K = (SHMEM_SIZE-16)/(dx*sizeof(_data_t));
numBlocks = dim3(1,(dy+K-1)/K,dz);
numThreads = dim3(T/K,K,1);
mem = K*dx*sizeof(_data_t);
cu_fwt3df_col <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLx,dev_tempHx,dev_current_vy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cuda_sync();
// FWT Rows
K = (SHMEM_SIZE-16)/(dy*sizeof(_data_t));
numBlocks = dim3(((dxNext)+K-1)/K,1,dz);
numThreads = dim3(K,T/K,1);
mem = K*dy*sizeof(_data_t);
cu_fwt3df_row <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLxLy,dev_tempLxHy,dev_tempLx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
cu_fwt3df_row <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempHxLy,dev_tempHxHy,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
cuda_sync();
// FWT Depths
K = (SHMEM_SIZE-16)/(dz*sizeof(_data_t));
numBlocks = dim3(((dxNext)+K-1)/K,dyNext,1);
numThreads = dim3(K,1,T/K);
mem = K*dz*sizeof(_data_t);
cu_fwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_LxLyLz_df2,dev_LxLyHz_df2,dev_tempLxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cu_fwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_LxHyLz_df2,dev_LxHyHz_df2,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cu_fwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_HxLyLz_df2,dev_HxLyHz_df2,dev_tempHxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cu_fwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_HxHyLz_df2,dev_HxHyHz_df2,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cuda_sync();
//************WCVZ***********
// FWT Columns
K = (SHMEM_SIZE-16)/(dx*sizeof(_data_t));
numBlocks = dim3(1,(dy+K-1)/K,dz);
numThreads = dim3(T/K,K,1);
mem = K*dx*sizeof(_data_t);
cu_fwt3df_col <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLx,dev_tempHx,dev_current_vz,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cuda_sync();
// FWT Rows
K = (SHMEM_SIZE-16)/(dy*sizeof(_data_t));
numBlocks = dim3(((dxNext)+K-1)/K,1,dz);
numThreads = dim3(K,T/K,1);
mem = K*dy*sizeof(_data_t);
cu_fwt3df_row <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLxLy,dev_tempLxHy,dev_tempLx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cu_fwt3df_row <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempHxLy,dev_tempHxHy,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen);
cuda_sync();
// FWT Depths
K = (SHMEM_SIZE-16)/(dz*sizeof(_data_t));
numBlocks = dim3(((dxNext)+K-1)/K,dyNext,1);
numThreads = dim3(K,1,T/K);
mem = K*dz*sizeof(_data_t);
cu_fwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_LxLyLz_n,dev_LxLyHz_n,dev_tempLxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
cu_fwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_LxHyLz_n,dev_LxHyHz_n,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
cu_fwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_HxLyLz_n,dev_HxLyHz_n,dev_tempHxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
cu_fwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_HxHyLz_n,dev_HxHyHz_n,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen);
cuda_sync();
//******* Multi ******
int maxInd = 7*blockSize;
numThreads = T;
numBlocks = (maxInd+numThreads.x-1)/numThreads.x;
cu_mult <<< numBlocks, numThreads, 0, cuda_get_stream() >>> (dev_HxLyLz_df1,1.f/res[0],maxInd);
cu_mult <<< numBlocks, numThreads, 0, cuda_get_stream() >>> (dev_HxLyLz_df2,1.f/res[1],maxInd);
cu_mult <<< numBlocks, numThreads, 0, cuda_get_stream() >>> (dev_HxLyLz_n,1.f/res[2],maxInd);
cuda_sync();
//*******Linear Combination******
int t1 = min(dxNext,T);
int t2 = T/t1;
numBlocks = dim3( (dxNext+t1-1)/t1, (dyNext+t2-1)/t2, dzNext);
numThreads = dim3(t1,t2,1);
cu_fwt3df_LC1 <<< numBlocks,numThreads, 0, cuda_get_stream() >>> (dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dxNext,dyNext,dzNext);
cu_fwt3df_LC2 <<< numBlocks,numThreads, 0, cuda_get_stream() >>> (dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dxNext,dyNext,dzNext);
cu_fwt3df_LC3 <<< numBlocks,numThreads, 0, cuda_get_stream() >>> (dev_HxHyHz_df1,dev_HxHyHz_df2,dev_HxHyHz_n,dxNext,dyNext,dzNext);
cuda_sync();
cu_fwt3df_LC1_diff <<< numBlocks,numThreads, 0, cuda_get_stream() >>> (dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dxNext,dyNext,dzNext);
cu_fwt3df_LC2_diff <<< numBlocks,numThreads, 0, cuda_get_stream() >>> (dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dxNext,dyNext,dzNext);
cuda_sync();
dev_current_vx = dev_wcdf1;
dev_current_vy = dev_wcdf2;
dev_current_vz = dev_wcn;
dx = dxNext;
dy = dyNext;
dz = dzNext;
}
cuda(Free( dev_filters ));
cuda(Free( dev_temp1 ));
cuda(Free( dev_temp2 ));
circunshift_gpu(plan,in_vx);
circunshift_gpu(plan,in_vy);
circunshift_gpu(plan,in_vz);
}
extern "C" void dfiwt3_gpu(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn)
{
long numCoeff, filterLen,*waveSizes;
numCoeff = plan->numCoeff;
waveSizes = plan->waveSizes;
filterLen = plan->filterLen;
int numLevels = plan->numLevels;
// Cast from generic data_t to device compatible _data_t
_data_t* dev_out_vx = (_data_t*)out_vx;
_data_t* dev_out_vy = (_data_t*)out_vy;
_data_t* dev_out_vz = (_data_t*)out_vz;
_data_t* dev_wcdf1 = (_data_t*)in_wcdf1;
_data_t* dev_wcdf2 = (_data_t*)in_wcdf2;
_data_t* dev_wcn = (_data_t*)in_wcn;
_data_t* res = (_data_t*) plan->res;
_data_t* dev_temp1, *dev_temp2;
cuda(Malloc( (void**)&dev_temp1, numCoeff*sizeof(_data_t) ));
cuda(Malloc( (void**)&dev_temp2, numCoeff*sizeof(_data_t)) );
// allocate device memory
scalar_t *dev_filters;
cuda(Malloc( (void**)&dev_filters, 4*(plan->filterLen)*sizeof(scalar_t) ));
scalar_t *dev_lor0 = dev_filters + 0*plan->filterLen;
scalar_t *dev_hir0 = dev_filters + 1*plan->filterLen;
scalar_t *dev_lor1 = dev_filters + 2*plan->filterLen;
scalar_t *dev_hir1 = dev_filters + 3*plan->filterLen;
cuda(Memcpy( dev_lor0, plan->lor0, 2*plan->filterLen*sizeof(scalar_t), cudaMemcpyHostToDevice ));
cuda(Memcpy( dev_lor1, plan->lor1, 2*plan->filterLen*sizeof(scalar_t), cudaMemcpyHostToDevice ));
// Workspace dimensions
int dxWork = waveSizes[0 + 3*numLevels]*2-1 + filterLen-1;
int dyWork = waveSizes[1 + 3*numLevels]*2-1 + filterLen-1;
int dzWork = waveSizes[2 + 3*numLevels]*2-1 + filterLen-1;
// Initialize variables and pointers for IWT
int const SHMEM_SIZE = 16384;
int const T = 512;
int mem,K;
dim3 numBlocks, numThreads;
int dx = waveSizes[0];
int dy = waveSizes[1];
int dz = waveSizes[2];
// Temp Pointers
_data_t *dev_tempLxLy,*dev_tempHxLy,*dev_tempLxHy,*dev_tempHxHy;
dev_tempLxLy = dev_temp1;
dev_tempHxLy = dev_tempLxLy + numCoeff/4;
dev_tempLxHy = dev_tempHxLy + numCoeff/4;
dev_tempHxHy = dev_tempLxHy + numCoeff/4;
_data_t *dev_tempLx,*dev_tempHx;
dev_tempLx = dev_temp2;
dev_tempHx = dev_tempLx + numCoeff/2;
// wcdf1 Pointers
_data_t *dev_LxLyLz_df1,*dev_HxLyLz_df1,*dev_LxHyLz_df1,*dev_HxHyLz_df1,*dev_LxLyHz_df1,*dev_HxLyHz_df1,*dev_LxHyHz_df1,*dev_HxHyHz_df1,*dev_current_vx;
dev_LxLyLz_df1 = dev_wcdf1;
dev_HxLyLz_df1 = dev_LxLyLz_df1 + dx*dy*dz;
dev_current_vx = dev_LxLyLz_df1;
// wcdf2 Pointers
_data_t *dev_LxLyLz_df2,*dev_HxLyLz_df2,*dev_LxHyLz_df2,*dev_HxHyLz_df2,*dev_LxLyHz_df2,*dev_HxLyHz_df2,*dev_LxHyHz_df2,*dev_HxHyHz_df2,*dev_current_vy;
dev_LxLyLz_df2 = dev_wcdf2;
dev_HxLyLz_df2 = dev_LxLyLz_df2 + dx*dy*dz;
dev_current_vy = dev_LxLyLz_df2;
// wcn Pointers
_data_t *dev_LxLyLz_n,*dev_HxLyLz_n,*dev_LxHyLz_n,*dev_HxHyLz_n,*dev_LxLyHz_n,*dev_HxLyHz_n,*dev_LxHyHz_n,*dev_HxHyHz_n,*dev_current_vz;
dev_LxLyLz_n = dev_wcn;
dev_HxLyLz_n = dev_LxLyLz_n + dx*dy*dz;
dev_current_vz = dev_LxLyLz_n;
for (int level = 1; level < numLevels+1; ++level)
{
dx = waveSizes[0 + 3*level];
dy = waveSizes[1 + 3*level];
dz = waveSizes[2 + 3*level];
int blockSize = dx*dy*dz;
int dxNext = waveSizes[0+3*(level+1)];
int dyNext = waveSizes[1+3*(level+1)];
int dzNext = waveSizes[2+3*(level+1)];
// Calclate Offset
dxWork = (2*dx-1 + filterLen-1);
dyWork = (2*dy-1 + filterLen-1);
dzWork = (2*dz-1 + filterLen-1);
int xOffset = (int) floor((dxWork - dxNext) / 2.0);
int yOffset = (int) floor((dyWork - dyNext) / 2.0);
int zOffset = (int) floor((dzWork - dzNext) / 2.0);
// Update Pointers
// df1
dev_LxHyLz_df1 = dev_HxLyLz_df1 + blockSize;
dev_HxHyLz_df1 = dev_LxHyLz_df1 + blockSize;
dev_LxLyHz_df1 = dev_HxHyLz_df1 + blockSize;
dev_HxLyHz_df1 = dev_LxLyHz_df1 + blockSize;
dev_LxHyHz_df1 = dev_HxLyHz_df1 + blockSize;
dev_HxHyHz_df1 = dev_LxHyHz_df1 + blockSize;
// df2
dev_LxHyLz_df2 = dev_HxLyLz_df2 + blockSize;
dev_HxHyLz_df2 = dev_LxHyLz_df2 + blockSize;
dev_LxLyHz_df2 = dev_HxHyLz_df2 + blockSize;
dev_HxLyHz_df2 = dev_LxLyHz_df2 + blockSize;
dev_LxHyHz_df2 = dev_HxLyHz_df2 + blockSize;
dev_HxHyHz_df2 = dev_LxHyHz_df2 + blockSize;
// n
dev_LxHyLz_n = dev_HxLyLz_n + blockSize;
dev_HxHyLz_n = dev_LxHyLz_n + blockSize;
dev_LxLyHz_n = dev_HxHyLz_n + blockSize;
dev_HxLyHz_n = dev_LxLyHz_n + blockSize;
dev_LxHyHz_n = dev_HxLyHz_n + blockSize;
dev_HxHyHz_n = dev_LxHyHz_n + blockSize;
//*******Linear Combination******
int t1 = min(dxNext,T);
int t2 = T/t1;
numBlocks = dim3( (dx+t1-1)/t1, (dy+t2-1)/t2, dz);
numThreads = dim3(t1,t2,1);
cu_iwt3df_LC1 <<< numBlocks,numThreads, 0, cuda_get_stream() >>> (dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dx,dy,dz);
cu_iwt3df_LC2 <<< numBlocks,numThreads, 0, cuda_get_stream() >>> (dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dx,dy,dz);
cu_iwt3df_LC3 <<< numBlocks,numThreads, 0, cuda_get_stream() >>> (dev_HxHyHz_df1,dev_HxHyHz_df2,dev_HxHyHz_n,dx,dy,dz);
cuda_sync();
cu_iwt3df_LC1_diff <<< numBlocks,numThreads, 0, cuda_get_stream() >>> (dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dx,dy,dz);
cu_iwt3df_LC2_diff <<< numBlocks,numThreads, 0, cuda_get_stream() >>> (dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dx,dy,dz);
cuda_sync();
//******* Multi ******
int maxInd = 7*blockSize;
numThreads = T;
numBlocks = (maxInd+numThreads.x-1)/numThreads.x;
cu_mult <<< numBlocks, numThreads, 0, cuda_get_stream() >>> (dev_HxLyLz_df1,res[0],maxInd);
cu_mult <<< numBlocks, numThreads, 0, cuda_get_stream() >>> (dev_HxLyLz_df2,res[1],maxInd);
cu_mult <<< numBlocks, numThreads, 0, cuda_get_stream() >>> (dev_HxLyLz_n,res[2],maxInd);
cuda_sync();
//************WCX************
// Update Pointers
if (level==numLevels)
dev_current_vx = dev_out_vx;
// IWT Depths
K = (SHMEM_SIZE-16)/(2*dz*sizeof(_data_t));
numBlocks = dim3((dx+K-1)/K,dy,1);
numThreads = dim3(K,1,(T/K));
mem = K*2*dz*sizeof(_data_t);
cu_iwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLxLy,dev_LxLyLz_df1,dev_LxLyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
cu_iwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempHxLy,dev_HxLyLz_df1,dev_HxLyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
cu_iwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLxHy,dev_LxHyLz_df1,dev_LxHyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
cu_iwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempHxHy,dev_HxHyLz_df1,dev_HxHyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
cuda_sync();
// IWT Rows
K = (SHMEM_SIZE-16)/(2*dy*sizeof(_data_t));
numBlocks = dim3((dx+K-1)/K,1,dzNext);
numThreads = dim3(K,(T/K),1);
mem = K*2*dy*sizeof(_data_t);
cu_iwt3df_row <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLx,dev_tempLxLy,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen);
cu_iwt3df_row <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempHx,dev_tempHxLy,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen);
cuda_sync();
// IWT Columns
K = (SHMEM_SIZE-16)/(2*dx*sizeof(_data_t));
numBlocks = dim3(1,(dyNext+K-1)/K,dzNext);
numThreads = dim3((T/K),K,1);
mem = K*2*dx*sizeof(_data_t);
cu_iwt3df_col <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_current_vx,dev_tempLx,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,plan->filterLen);
cuda_sync();
//************WCY************
// Update Pointers
if (level==numLevels)
dev_current_vy = dev_out_vy;
// IWT Depths
K = (SHMEM_SIZE-16)/(2*dz*sizeof(_data_t));
numBlocks = dim3((dx+K-1)/K,dy,1);
numThreads = dim3(K,1,(T/K));
mem = K*2*dz*sizeof(_data_t);
cu_iwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLxLy,dev_LxLyLz_df2,dev_LxLyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
cu_iwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempHxLy,dev_HxLyLz_df2,dev_HxLyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
cu_iwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLxHy,dev_LxHyLz_df2,dev_LxHyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
cu_iwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempHxHy,dev_HxHyLz_df2,dev_HxHyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen);
cuda_sync();
// IWT Rows
K = (SHMEM_SIZE-16)/(2*dy*sizeof(_data_t));
numBlocks = dim3((dx+K-1)/K,1,dzNext);
numThreads = dim3(K,(T/K),1);
mem = K*2*dy*sizeof(_data_t);
cu_iwt3df_row <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLx,dev_tempLxLy,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,plan->filterLen);
cu_iwt3df_row <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempHx,dev_tempHxLy,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,plan->filterLen);
cuda_sync();
// IWT Columns
K = (SHMEM_SIZE-16)/(2*dx*sizeof(_data_t));
numBlocks = dim3(1,(dyNext+K-1)/K,dzNext);
numThreads = dim3((T/K),K,1);
mem = K*2*dx*sizeof(_data_t);
cu_iwt3df_col <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_current_vy,dev_tempLx,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen);
cuda_sync();
//************WCZ************
// Update Pointers
if (level==numLevels)
dev_current_vz = dev_out_vz;
// IWT Depths
K = (SHMEM_SIZE-16)/(2*dz*sizeof(_data_t));
numBlocks = dim3((dx+K-1)/K,dy,1);
numThreads = dim3(K,1,(T/K));
mem = K*2*dz*sizeof(_data_t);
cu_iwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLxLy,dev_LxLyLz_n,dev_LxLyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen);
cu_iwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempHxLy,dev_HxLyLz_n,dev_HxLyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen);
cu_iwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLxHy,dev_LxHyLz_n,dev_LxHyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen);
cu_iwt3df_dep <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempHxHy,dev_HxHyLz_n,dev_HxHyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen);
cuda_sync();
// IWT Rows
K = (SHMEM_SIZE-16)/(2*dy*sizeof(_data_t));
numBlocks = dim3((dx+K-1)/K,1,dzNext);
numThreads = dim3(K,(T/K),1);
mem = K*2*dy*sizeof(_data_t);
cu_iwt3df_row <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempLx,dev_tempLxLy,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen);
cu_iwt3df_row <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_tempHx,dev_tempHxLy,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen);
cuda_sync();
// IWT Columns
K = (SHMEM_SIZE-16)/(2*dx*sizeof(_data_t));
numBlocks = dim3(1,(dyNext+K-1)/K,dzNext);
numThreads = dim3((T/K),K,1);
mem = K*2*dx*sizeof(_data_t);
cu_iwt3df_col <<< numBlocks,numThreads,mem, cuda_get_stream() >>>(dev_current_vz,dev_tempLx,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen);
cuda_sync();
dev_HxLyLz_df1 += 7*blockSize;
dev_HxLyLz_df2 += 7*blockSize;
dev_HxLyLz_n += 7*blockSize;
}
cuda(Free( dev_filters ));
cuda(Free( dev_temp1 ));
cuda(Free( dev_temp2 ));
circunshift_gpu(plan,out_vx);
circunshift_gpu(plan,out_vy);
circunshift_gpu(plan,out_vz);
}
int rand_lim(int limit) {
int divisor = RAND_MAX/(limit+1);
int retval;
do {
retval = rand() / divisor;
} while (retval > limit);
return retval;
}
void dfwavelet_new_randshift_gpu (struct dfwavelet_plan_s* plan) {
int i;
i = rand();
for(i = 0; i < plan->numdims; i++) {
// Determine maximum shift value for this dimension
int log2dim = 1;
while( (1<<log2dim) < plan->imSize[i]) {
log2dim++;
}
int maxShift = 1 << (log2dim-plan->numLevels);
if (maxShift > 8) {
maxShift = 8;
}
// Generate random shift value between 0 and maxShift
plan->randShift[i] = rand_lim(maxShift);
}
}
extern "C" void dfwavthresh3_gpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz)
{
data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn;
cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(_data_t) ));
cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(_data_t) ));
cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(_data_t) ));
dffwt3_gpu(plan,dev_wcdf1,dev_wcdf2,dev_wcn,in_vx,in_vy,in_vz);
dfsoftthresh_gpu(plan,dfthresh,nthresh,dev_wcdf1,dev_wcdf2,dev_wcn);
dfiwt3_gpu(plan,out_vx,out_vy,out_vz,dev_wcdf1,dev_wcdf2,dev_wcn);
cuda(Free( dev_wcdf1 ));
cuda(Free( dev_wcdf2 ));
cuda(Free( dev_wcn ));
}
extern "C" void dfsoftthresh_gpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn)
{
assert(plan->use_gpu==1||plan->use_gpu==2);
_data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn;
dev_wcdf1 = (_data_t*) out_wcdf1;
dev_wcdf2 = (_data_t*) out_wcdf2;
dev_wcn = (_data_t*) out_wcn;
int numMax;
int const T = 512;
dim3 numBlocks, numThreads;
numMax = plan->numCoeff-plan->numCoarse;
numBlocks = dim3((numMax+T-1)/T,1,1);
numThreads = dim3(T,1,1);
cu_soft_thresh <<< numBlocks,numThreads>>> (dev_wcdf1+plan->numCoarse,dfthresh,numMax);
cu_soft_thresh <<< numBlocks,numThreads>>> (dev_wcdf2+plan->numCoarse,dfthresh,numMax);
cu_soft_thresh <<< numBlocks,numThreads>>> (dev_wcn+plan->numCoarse,nthresh,numMax);
}
/********** Aux functions **********/
extern "C" void circshift_gpu(struct dfwavelet_plan_s* plan, data_t* data_c) {
// Return if no shifts
int zeroShift = 1;
int i;
for (i = 0; i< plan->numdims; i++)
{
zeroShift &= (plan->randShift[i]==0);
}
if(zeroShift) {
return;
}
_data_t* data = (_data_t*) data_c;
// Copy data
_data_t* dataCopy;
cuda(Malloc((void**)&dataCopy, plan->numPixel*sizeof(_data_t)));
cuda(Memcpy(dataCopy, data, plan->numPixel*sizeof(_data_t), cudaMemcpyDeviceToDevice));
int T = 512;
if (plan->numdims==2)
{
int dx,dy,r0,r1;
dx = plan->imSize[0];
dy = plan->imSize[1];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
cu_circshift <<< (plan->numPixel+T-1)/T, T>>>(data,dataCopy,dx,dy,1,r0,r1,0);
} else if (plan->numdims==3)
{
int dx,dy,dz,r0,r1,r2;
dx = plan->imSize[0];
dy = plan->imSize[1];
dz = plan->imSize[2];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
r2 = plan->randShift[2];
cu_circshift <<< (plan->numPixel+T-1)/T, T>>>(data,dataCopy,dx,dy,dz,r0,r1,r2);
}
cuda(Free(dataCopy));
}
extern "C" void circunshift_gpu(struct dfwavelet_plan_s* plan, data_t* data_c) {
// Return if no shifts
int zeroShift = 1;
int i;
for (i = 0; i< plan->numdims; i++)
{
zeroShift &= (plan->randShift[i]==0);
}
if(zeroShift) {
return;
}
_data_t* data = (_data_t*) data_c;
// Copy data
_data_t* dataCopy;
cuda(Malloc((void**)&dataCopy, plan->numPixel*sizeof(_data_t)));
cuda(Memcpy(dataCopy, data, plan->numPixel*sizeof(_data_t), cudaMemcpyDeviceToDevice));
int T = 512;
if (plan->numdims==2)
{
int dx,dy,r0,r1;
dx = plan->imSize[0];
dy = plan->imSize[1];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
cu_circunshift <<< (plan->numPixel+T-1)/T, T>>>(data,dataCopy,dx,dy,1,r0,r1,0);
} else if (plan->numdims==3)
{
int dx,dy,dz,r0,r1,r2;
dx = plan->imSize[0];
dy = plan->imSize[1];
dz = plan->imSize[2];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
r2 = plan->randShift[2];
cu_circunshift <<< (plan->numPixel+T-1)/T, T>>>(data,dataCopy,dx,dy,dz,r0,r1,r2);
}
cuda(Free(dataCopy));
}
// ############################################################################
// CUDA function of fwt column convolution
// Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass
// Output: Lx, Hx
// Input: in, dx, dy, dz, dxNext, lod, hid, filterLen
// ############################################################################
extern "C" __global__ void cu_fwt3df_col(_data_t *Lx,_data_t *Hx,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen)
{
extern __shared__ _data_t cols [];
int ti = threadIdx.x;
int tj = threadIdx.y;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
if (j>=dy) {
return;
}
// Load Input to Temp Array
for (int i = ti; i < dx; i += blockDim.x){
cols[i + tj*dx] = in[i + j*dx + k*dx*dy];
}
__syncthreads();
// Low-Pass and High-Pass Downsample
int ind, lessThan, greaThan;
for (int i = ti; i < dxNext; i += blockDim.x){
_data_t y = cols[0]-cols[0];
_data_t z = cols[0]-cols[0];
#pragma unroll
for (int f = 0; f < filterLen; f++){
ind = 2*i+1 - (filterLen-1)+f;
lessThan = (int) (ind<0);
greaThan = (int) (ind>=dx);
ind = -1*lessThan+ind*(-2*lessThan+1);
ind = (2*dx-1)*greaThan+ind*(-2*greaThan+1);
y += cols[ind + tj*dx] * lod[filterLen-1-f];
z += cols[ind + tj*dx] * hid[filterLen-1-f];
}
Lx[i + j*dxNext + k*dxNext*dy] = y;
Hx[i + j*dxNext + k*dxNext*dy] = z;
}
}
// ############################################################################
// CUDA function of fwt row convolution. Assumes fwt_col() has already been called
// Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass
// Output: LxLy, LxHy / HxLy, HxHy
// Input: Lx/Hx, dx, dy, dxNext, dyNext, lod, hid, filterLen
// ############################################################################
extern "C" __global__ void cu_fwt3df_row(_data_t *Ly,_data_t *Hy,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen)
{
extern __shared__ _data_t rows [];
int const K = blockDim.x;
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x*blockDim.x+threadIdx.x;
int k = blockIdx.z*blockDim.z+threadIdx.z;
if (i>=dxNext)
{
return;
}
for (int j = tj; j < dy; j += blockDim.y){
rows[ti + j*K] = in[i + j*dxNext + k*dxNext*dy];
}
__syncthreads();
// Low-Pass and High Pass Downsample
int ind, lessThan, greaThan;
for (int j = tj; j < dyNext; j += blockDim.y){
_data_t y = rows[0]-rows[0];
_data_t z = rows[0]-rows[0];
#pragma unroll
for (int f = 0; f < filterLen; f++){
ind = 2*j+1 - (filterLen-1)+f;
lessThan = (int) (ind<0);
greaThan = (int) (ind>=dy);
ind = -1*lessThan+ind*(-2*lessThan+1);
ind = (2*dy-1)*greaThan+ind*(-2*greaThan+1);
y += rows[ti + ind*K] * lod[filterLen-1-f];
z += rows[ti + ind*K] * hid[filterLen-1-f];
}
Ly[i + j*dxNext + k*dxNext*dyNext] = y;
Hy[i + j*dxNext + k*dxNext*dyNext] = z;
}
}
// ############################################################################
// CUDA function of fwt depth convolution. Assumes fwt_row() has already been called
// Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass
// Output: LxLy, LxHy / HxLy, HxHy
// Input: Lx/Hx, dx, dy, dxNext, dyNext, lod, hid, filterLen
// ############################################################################
extern "C" __global__ void cu_fwt3df_dep(_data_t *Lz,_data_t *Hz,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen)
{
extern __shared__ _data_t deps [];
int const K = blockDim.x;
int ti = threadIdx.x;
int tk = threadIdx.z;
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
if (i>=dxNext)
{
return;
}
for (int k = tk; k < dz; k += blockDim.z){
deps[ti + k*K] = in[i + j*dxNext + k*dxNext*dyNext];
}
__syncthreads();
// Low-Pass and High Pass Downsample
int ind, lessThan, greaThan;
for (int k = tk; k < dzNext; k += blockDim.z){
_data_t y = deps[0]-deps[0];
_data_t z = deps[0]-deps[0];
#pragma unroll
for (int f = 0; f < filterLen; f++){
ind = 2*k+1 - (filterLen-1)+f;
lessThan = (int) (ind<0);
greaThan = (int) (ind>=dz);
ind = -1*lessThan+ind*(-2*lessThan+1);
ind = (2*dz-1)*greaThan+ind*(-2*greaThan+1);
y += deps[ti + ind*K] * lod[filterLen-1-f];
z += deps[ti + ind*K] * hid[filterLen-1-f];
}
Lz[i + j*dxNext + k*dxNext*dyNext] = y;
Hz[i + j*dxNext + k*dxNext*dyNext] = z;
}
}
extern "C" __global__ void cu_fwt3df_LC1(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
scalar_t xGreatZero,yGreatZero,zGreatZero;
if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext))
{
return;
}
//HLL
x = HxLyLz_df1[i+j*dxNext+k*dxNext*dyNext];
y = HxLyLz_df2[i+j*dxNext+k*dxNext*dyNext];
z = HxLyLz_n[i+j*dxNext+k*dxNext*dyNext];
HxLyLz_df1[i+j*dxNext+k*dxNext*dyNext] = y;
HxLyLz_df2[i+j*dxNext+k*dxNext*dyNext] = z;
yGreatZero = j>0;
zGreatZero = k>0;
HxLyLz_n[i+j*dxNext+k*dxNext*dyNext] = x + yGreatZero*0.25f*y + zGreatZero*0.25f*z;
//LHL
x = LxHyLz_df1[i+j*dxNext+k*dxNext*dyNext];
y = LxHyLz_df2[i+j*dxNext+k*dxNext*dyNext];
z = LxHyLz_n[i+j*dxNext+k*dxNext*dyNext];
LxHyLz_df2[i+j*dxNext+k*dxNext*dyNext] = z;
xGreatZero = i>0;
zGreatZero = k>0;
LxHyLz_n[i+j*dxNext+k*dxNext*dyNext] = y + xGreatZero*0.25f*x + zGreatZero*0.25f*z;
//LLH
x = LxLyHz_df1[i+j*dxNext+k*dxNext*dyNext];
y = LxLyHz_df2[i+j*dxNext+k*dxNext*dyNext];
z = LxLyHz_n[i+j*dxNext+k*dxNext*dyNext];
LxLyHz_df1[i+j*dxNext+k*dxNext*dyNext] = y;
LxLyHz_df2[i+j*dxNext+k*dxNext*dyNext] = x;
yGreatZero = j>0;
xGreatZero = i>0;
LxLyHz_n[i+j*dxNext+k*dxNext*dyNext] = z + yGreatZero*0.25*y + xGreatZero*0.25*x;
}
extern "C" __global__ void cu_fwt3df_LC1_diff(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
_data_t zero = make_float2(0.f,0.f);
if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext))
{
return;
}
//HLL
if (j>0)
y = HxLyLz_df1[i+(j-1)*dxNext+k*dxNext*dyNext];
else
y = zero;
if (k>0)
z = HxLyLz_df2[i+j*dxNext+(k-1)*dxNext*dyNext];
else
z = zero;
HxLyLz_n[i+j*dxNext+k*dxNext*dyNext] += -0.25*y - 0.25*z;
//LHL
if (i>0)
x = LxHyLz_df1[(i-1)+j*dxNext+k*dxNext*dyNext];
else
x = zero;
if (k>0)
z = LxHyLz_df2[i+j*dxNext+(k-1)*dxNext*dyNext];
else
z = zero;
LxHyLz_n[i+j*dxNext+k*dxNext*dyNext] += -0.25*x - 0.25*z;
//LLH
if (j>0)
y = LxLyHz_df1[i+(j-1)*dxNext+k*dxNext*dyNext];
else
y = zero;
if (i>0)
x = LxLyHz_df2[(i-1)+j*dxNext+k*dxNext*dyNext];
else
x = zero;
LxLyHz_n[i+j*dxNext+k*dxNext*dyNext] += -0.25*y - 0.25*x;
}
extern "C" __global__ void cu_fwt3df_LC2(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
scalar_t xGreatZero,yGreatZero,zGreatZero;
if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext))
{
return;
}
//HHL
x = HxHyLz_df1[i+j*dxNext+k*dxNext*dyNext];
y = HxHyLz_df2[i+j*dxNext+k*dxNext*dyNext];
z = HxHyLz_n[i+j*dxNext+k*dxNext*dyNext];
HxHyLz_df1[i+j*dxNext+k*dxNext*dyNext] = 0.5*(x-y);
HxHyLz_df2[i+j*dxNext+k*dxNext*dyNext] = z;
zGreatZero = k>0;
HxHyLz_n[i+j*dxNext+k*dxNext*dyNext] = 0.5*(x+y) + zGreatZero*0.125*z;
//HLH
x = HxLyHz_df1[i+j*dxNext+k*dxNext*dyNext];
y = HxLyHz_df2[i+j*dxNext+k*dxNext*dyNext];
z = HxLyHz_n[i+j*dxNext+k*dxNext*dyNext];
HxLyHz_df1[i+j*dxNext+k*dxNext*dyNext] = 0.5*(z-x);
HxLyHz_df2[i+j*dxNext+k*dxNext*dyNext] = y;
yGreatZero = j>0;
HxLyHz_n[i+j*dxNext+k*dxNext*dyNext] = 0.5*(z+x) + yGreatZero*0.125*y;
//LHH
x = LxHyHz_df1[i+j*dxNext+k*dxNext*dyNext];
y = LxHyHz_df2[i+j*dxNext+k*dxNext*dyNext];
z = LxHyHz_n[i+j*dxNext+k*dxNext*dyNext];
LxHyHz_df1[i+j*dxNext+k*dxNext*dyNext] = 0.5*(y-z);
LxHyHz_df2[i+j*dxNext+k*dxNext*dyNext] = x;
xGreatZero = i>0;
LxHyHz_n[i+j*dxNext+k*dxNext*dyNext] = 0.5*(y+z) + xGreatZero*0.125*x;
}
extern "C" __global__ void cu_fwt3df_LC2_diff(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
_data_t zero = make_float2(0.f,0.f);
if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext))
{
return;
}
//HHL
if (k>0)
z = HxHyLz_df2[i+j*dxNext+(k-1)*dxNext*dyNext];
else
z = zero;
HxHyLz_n[i+j*dxNext+k*dxNext*dyNext] += -0.125*z;
//HLH
if (j>0)
y = HxLyHz_df2[i+(j-1)*dxNext+k*dxNext*dyNext];
else
y = zero;
HxLyHz_n[i+j*dxNext+k*dxNext*dyNext] += -0.125*y;
//LHH
if (i>0)
x = LxHyHz_df2[(i-1)+j*dxNext+k*dxNext*dyNext];
else
x = zero;
LxHyHz_n[i+j*dxNext+k*dxNext*dyNext] += -0.125*x;
}
extern "C" __global__ void cu_fwt3df_LC3(_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dxNext, int dyNext, int dzNext)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext))
{
return;
}
//HHH
x = HxHyHz_df1[i+j*dxNext+k*dxNext*dyNext];
y = HxHyHz_df2[i+j*dxNext+k*dxNext*dyNext];
z = HxHyHz_n[i+j*dxNext+k*dxNext*dyNext];
HxHyHz_df1[i+j*dxNext+k*dxNext*dyNext] = 1.0/3.0*(-2.0*x+y+z);
HxHyHz_df2[i+j*dxNext+k*dxNext*dyNext] = 1.0/3.0*(2*y-x-z);
HxHyHz_n[i+j*dxNext+k*dxNext*dyNext] = 1.0/3.0*(x+y+z);
}
// ############################################################################
// CUDA function of iwt depth convolution.
// Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass
// Scratchpad size: K x 2*dy
// Output: Lz/Hz
// Input: LxLy,LxHy / HxLy, HxHy, dx, dy, dxNext, dyNext,xOffset, yOffset,lod, hid, filterLen
// ############################################################################
extern "C" __global__ void cu_iwt3df_dep(_data_t *out, _data_t *Lz, _data_t *Hz, int dx, int dy,int dz,int dxNext, int dyNext, int dzNext,int xOffset, int yOffset,int zOffset,scalar_t *lod, scalar_t *hid, int filterLen)
{
extern __shared__ _data_t deps [];
int const K = blockDim.x;
int ti = threadIdx.x;
int tk = threadIdx.z;
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
if (i>=dx){
return;
}
for (int k = tk; k < dz; k += blockDim.z){
deps[ti + k*K] = Lz[i + j*dx + k*dx*dy];
deps[ti + (k+dz)*K] = Hz[i + j*dx + k*dx*dy];
}
__syncthreads();
// Low-Pass and High Pass Downsample
int ind;
for (int k = tk+zOffset; k < dzNext+zOffset; k += blockDim.z){
_data_t y = deps[0]-deps[0];
#pragma unroll
for (int f = (k-(filterLen-1)) % 2; f < filterLen; f+=2){
ind = (k-(filterLen-1)+f)>>1;
if ((ind >= 0) && (ind < dz)) {
y += deps[ti + ind*K] * lod[filterLen-1-f];
y += deps[ti + (ind+dz)*K] * hid[filterLen-1-f];
}
}
out[i + j*dx + (k-zOffset)*dx*dy] = y;
}
}
// ############################################################################
// CUDA function of iwt row convolution. Assumes fwt_col() has already been called.
// Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass
// Scratchpad size: K x 2*dy
// Output: Lx/Hx
// Input: LxLy,LxHy / HxLy, HxHy, dx, dy, dxNext, dyNext,xOffset, yOffset,lod, hid, filterLen
// ############################################################################
extern "C" __global__ void cu_iwt3df_row(_data_t *out, _data_t *Ly, _data_t *Hy, int dx, int dy,int dz,int dxNext, int dyNext,int dzNext,int xOffset, int yOffset, int zOffset,scalar_t *lod, scalar_t *hid, int filterLen)
{
extern __shared__ _data_t rows [];
int const K = blockDim.x;
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x*blockDim.x+threadIdx.x;
int k = blockIdx.z*blockDim.z+threadIdx.z;
if (i>=dx){
return;
}
for (int j = tj; j < dy; j += blockDim.y){
rows[ti + j*K] = Ly[i + j*dx + k*dx*dy];
rows[ti + (j+dy)*K] = Hy[i + j*dx + k*dx*dy];
}
__syncthreads();
// Low-Pass and High Pass Downsample
int ind;
for (int j = tj+yOffset; j < dyNext+yOffset; j += blockDim.y){
_data_t y = rows[0]-rows[0];
#pragma unroll
for (int f = (j-(filterLen-1)) % 2; f < filterLen; f+=2){
ind = (j-(filterLen-1)+f)>>1;
if ((ind >= 0) && (ind < dy)) {
y += rows[ti + ind*K] * lod[filterLen-1-f];
y += rows[ti + (ind+dy)*K] * hid[filterLen-1-f];
}
}
out[i + (j-yOffset)*dx + k*dx*dyNext] = y;
}
}
// ############################################################################
// CUDA function of iwt column convolution
// Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass
// Scratchpad size: 2*dx x K
// Output: out
// Input: Lx, Hx, dx, dy, dxNext, dyNext, lod, hid, filterLen
// ############################################################################
extern "C" __global__ void cu_iwt3df_col(_data_t *out, _data_t *Lx, _data_t *Hx, int dx, int dy,int dz,int dxNext, int dyNext, int dzNext,int xOffset, int yOffset, int zOffset,scalar_t *lod, scalar_t *hid, int filterLen)
{
extern __shared__ _data_t cols [];
int ti = threadIdx.x;
int tj = threadIdx.y;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
if (j>=dyNext){
return;
}
int dx2 = 2*dx;
// Load Input to Temp Array
for (int i = ti; i < dx; i += blockDim.x){
cols[i + tj*dx2] = Lx[i + j*dx + k*dx*dyNext];
cols[dx+i + tj*dx2] = Hx[i + j*dx + k*dx*dyNext];
}
__syncthreads();
// Low-Pass and High Pass Downsample
int ind;
for (int i = ti+xOffset; i < dxNext+xOffset; i += blockDim.x){
_data_t y = cols[0]-cols[0];
#pragma unroll
for (int f = (i-(filterLen-1)) % 2; f < filterLen; f+=2){
ind = (i-(filterLen-1)+f)>>1;
if (ind >= 0 && ind < dx) {
y += cols[ind + tj*dx2] * lod[filterLen-1-f];
y += cols[dx+ind + tj*dx2] * hid[filterLen-1-f];
}
}
out[(i-xOffset) + j*dxNext + k*dxNext*dyNext] = y;
}
}
extern "C" __global__ void cu_iwt3df_LC1 (_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t df1,df2,n;
scalar_t xGreatZero,yGreatZero,zGreatZero;
if ((i>=dx)||(j>=dy)||(k>=dz))
{
return;
}
//HLL
df1 = HxLyLz_df1[i+j*dx+k*dx*dy];
df2 = HxLyLz_df2[i+j*dx+k*dx*dy];
n = HxLyLz_n[i+j*dx+k*dx*dy];
HxLyLz_df2[i+j*dx+k*dx*dy] = df1;
HxLyLz_n[i+j*dx+k*dx*dy] = df2;
yGreatZero = j>0;
zGreatZero = k>0;
HxLyLz_df1[i+j*dx+k*dx*dy] = n - yGreatZero*0.25*df1 - zGreatZero*0.25*df2;
//LHL
df1 = LxHyLz_df1[i+j*dx+k*dx*dy];
df2 = LxHyLz_df2[i+j*dx+k*dx*dy];
n = LxHyLz_n[i+j*dx+k*dx*dy];
LxHyLz_n[i+j*dx+k*dx*dy] = df2;
xGreatZero = i>0;
zGreatZero = k>0;
LxHyLz_df2[i+j*dx+k*dx*dy] = n - xGreatZero*0.25*df1 - zGreatZero*0.25*df2;
//LLH
df1 = LxLyHz_df1[i+j*dx+k*dx*dy];
df2 = LxLyHz_df2[i+j*dx+k*dx*dy];
n = LxLyHz_n[i+j*dx+k*dx*dy];
LxLyHz_df1[i+j*dx+k*dx*dy] = df2;
LxLyHz_df2[i+j*dx+k*dx*dy] = df1;
yGreatZero = j>0;
xGreatZero = i>0;
LxLyHz_n[i+j*dx+k*dx*dy] = n - yGreatZero*0.25*df1 - xGreatZero*0.25*df2;
}
extern "C" __global__ void cu_iwt3df_LC1_diff (_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
_data_t zero = make_float2(0.f,0.f);
if ((i>=dx)||(j>=dy)||(k>=dz))
{
return;
}
//HLL
if (j>0)
y = HxLyLz_df2[i+(j-1)*dx+k*dx*dy];
else
y = zero;
if (k>0)
z = HxLyLz_n[i+j*dx+(k-1)*dx*dy];
else
z = zero;
HxLyLz_df1[i+j*dx+k*dx*dy] += 0.25*y + 0.25*z;
//LHL
if (i>0)
x = LxHyLz_df1[(i-1)+j*dx+k*dx*dy];
else
x = zero;
if (k>0)
z = LxHyLz_n[i+j*dx+(k-1)*dx*dy];
else
z = zero;
LxHyLz_df2[i+j*dx+k*dx*dy] += 0.25*x + 0.25*z;
//LLH
if (j>0)
y = LxLyHz_df2[i+(j-1)*dx+k*dx*dy];
else
y = zero;
if (i>0)
x = LxLyHz_df1[(i-1)+j*dx+k*dx*dy];
else
x = zero;
LxLyHz_n[i+j*dx+k*dx*dy] += 0.25*y + 0.25*x;
}
extern "C" __global__ void cu_iwt3df_LC2 (_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t df1,df2,n;
scalar_t xGreatZero,yGreatZero,zGreatZero;
if ((i>=dx)||(j>=dy)||(k>=dz))
{
return;
}
//HHL
df1 = HxHyLz_df1[i+j*dx+k*dx*dy];
df2 = HxHyLz_df2[i+j*dx+k*dx*dy];
n = HxHyLz_n[i+j*dx+k*dx*dy];
HxHyLz_n[i+j*dx+k*dx*dy] = df2;
zGreatZero = k>0;
HxHyLz_df1[i+j*dx+k*dx*dy] = df1+n-zGreatZero*0.125*df2;
HxHyLz_df2[i+j*dx+k*dx*dy] = n-df1-zGreatZero*0.125*df2;
//HLH
df1 = HxLyHz_df1[i+j*dx+k*dx*dy];
df2 = HxLyHz_df2[i+j*dx+k*dx*dy];
n = HxLyHz_n[i+j*dx+k*dx*dy];
HxLyHz_df2[i+j*dx+k*dx*dy] = df2;
yGreatZero = j>0;
HxLyHz_n[i+j*dx+k*dx*dy] = df1+n-yGreatZero*0.125*df2;
HxLyHz_df1[i+j*dx+k*dx*dy] = n-df1-yGreatZero*0.125*df2;
//LHH
df1 = LxHyHz_df1[i+j*dx+k*dx*dy];
df2 = LxHyHz_df2[i+j*dx+k*dx*dy];
n = LxHyHz_n[i+j*dx+k*dx*dy];
LxHyHz_df1[i+j*dx+k*dx*dy] = df2;
xGreatZero = i>0;
LxHyHz_df2[i+j*dx+k*dx*dy] = df1+n-xGreatZero*0.125*df2;
LxHyHz_n[i+j*dx+k*dx*dy] = n-df1-xGreatZero*0.125*df2;
}
extern "C" __global__ void cu_iwt3df_LC2_diff (_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t x,y,z;
_data_t zero = make_float2(0.f,0.f);
if ((i>=dx)||(j>=dy)||(k>=dz))
{
return;
}
//HHL
if (k>0)
z = HxHyLz_n[i+j*dx+(k-1)*dx*dy];
else
z = zero;
HxHyLz_df1[i+j*dx+k*dx*dy] += 0.125*z;
HxHyLz_df2[i+j*dx+k*dx*dy] += 0.125*z;
//HLH
if (j>0)
y = HxLyHz_df2[i+(j-1)*dx+k*dx*dy];
else
y = zero;
HxLyHz_df1[i+j*dx+k*dx*dy] += 0.125*y;
HxLyHz_n[i+j*dx+k*dx*dy] += 0.125*y;
//LHH
if (i>0)
x = LxHyHz_df1[(i-1)+j*dx+k*dx*dy];
else
x = zero;
LxHyHz_df2[i+j*dx+k*dx*dy] += 0.125*x;
LxHyHz_n[i+j*dx+k*dx*dy] += 0.125*x;
}
extern "C" __global__ void cu_iwt3df_LC3 (_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dx, int dy, int dz)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
int j = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.z*blockDim.z+threadIdx.z;
_data_t df1,df2,n;
if ((i>=dx)||(j>=dy)||(k>=dz))
{
return;
}
//HHH
df1 = HxHyHz_df1[i+j*dx+k*dx*dy];
df2 = HxHyHz_df2[i+j*dx+k*dx*dy];
n = HxHyHz_n[i+j*dx+k*dx*dy];
HxHyHz_df1[i+j*dx+k*dx*dy] = n-df1;
HxHyHz_df2[i+j*dx+k*dx*dy] = df2+n;
HxHyHz_n[i+j*dx+k*dx*dy] = df1-df2+n;
}
extern "C" __global__ void cu_mult(_data_t* in, _data_t mult, int maxInd)
{
int ind = blockIdx.x*blockDim.x+threadIdx.x;
if (ind > maxInd)
{
return;
}
in[ind] = in[ind]*mult;
}
extern "C" __global__ void cu_add_mult(_data_t* out, _data_t* in, _data_t mult, int maxInd)
{
int ind = blockIdx.x*blockDim.x+threadIdx.x;
if (ind > maxInd)
{
return;
}
_data_t i = out[ind];
out[ind] = i+(out[ind]-i)*mult;
}
__global__ void cu_soft_thresh (_data_t* in, scalar_t thresh, int numMax)
{
int const i = threadIdx.x + blockDim.x*blockIdx.x;
if (i>numMax)
return;
scalar_t norm = abs(in[i]);
scalar_t red = norm - thresh;
in[i] = (red > 0.f) ? ((red / norm) * (in[i])) : in[i]-in[i];
}
__global__ void cu_circshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz,int shift1, int shift2,int shift3) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= dx*dy*dz) {
return;
}
int indexShifted = (index+shift1+shift2*dx+shift3*dx*dy)%(dx*dy*dz);
data[indexShifted] = dataCopy[index];
}
__global__ void cu_circunshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz,int shift1, int shift2,int shift3) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= dx*dy*dz) {
return;
}
int indexShifted = (index+shift1+shift2*dx+shift3*dx*dy)%(dx*dy*dz);
data[index] = dataCopy[indexShifted];
}
|
92bab93958c54c79fb551fa7637a0f6877e6a0b4.hip | // !!! This is a file automatically generated by hipify!!!
/**
\file
This MEX function computes the mean and standard deviation of a vector
given in prhs[0] and returns the results in CPU. plhs[0] is the mean and
plhs[1] is the standard variation. The input can be on CPU or GPU and it
can be double or single precision. The computation is done where the input
is, but the results are always returned in CPU. If the input is double or
single the computation is in double or single precision, and the results
are double or single, respectively.
If the input is empty both outputs are 0. If it has one entry, only the mean is
computed and the standard deviation is set to zero.
*/
#include "mex.h"
#include "mex_gpu_tools.h"
#include "CudaDevInfo.h"
#include "cuda_sum_mean_var.h"
#include "cc_sum_mean_var.h"
#include "mex_assert.h"
#include "fast_heap.h"
#include "mex_context.h"
#include "timers.h"
template<class Float>
static void calcGPU(const mxGPUArray *pr,
size_t n_vec,
mxArray *output[2]
)
{
const Float *p_src_vec = (const Float*)mxGPUGetDataReadOnly(pr);
Float *pmean = (Float *) mxGetData(output[0]);
Float *pstdv = (Float *) mxGetData(output[1]);
if(n_vec > 1) {
size_t sz = (n_vec+1) * sizeof(Float);
GenericHeapElement &pres = d_fast_heap->get(sz);
Float *res = static_cast<Float*>(*pres);
h_mean_stdv_vec(n_vec, p_src_vec, res);
gpuErrChk(hipMemcpy(pmean, res, sizeof(Float), hipMemcpyDeviceToHost),
"cuda_mean_stdv_mex:memcpy", "");
gpuErrChk(hipMemcpy(pstdv, res+1, sizeof(Float), hipMemcpyDeviceToHost),
"cuda_mean_stdv_mex:memcpy", "");
pres.discard();
}
else if(n_vec == 1){
gpuErrChk(hipMemcpy(pmean, p_src_vec, sizeof(Float), hipMemcpyDeviceToHost),
"cuda_mean_stdv_mex:memcpy", "");
*pstdv = 0;
}
else { // n_vec == 0
*pmean = 0;
*pstdv = 0;
}
}
template<class Float>
static void calcCPU(const mxArray *pr,
size_t n_vec,
mxArray *output[2])
{
const Float *p_src_vec = (const Float*) mxGetData(pr);
Float *pmean = (Float *) mxGetData(output[0]);
Float *pstdv = (Float *) mxGetData(output[1]);
if(n_vec == 0) {
*pmean = 0;
*pstdv = 0;
}
else if(n_vec == 1) {
*pmean = *p_src_vec;
*pstdv = 0;
}
else {
*pmean = c_mean_vec(n_vec, p_src_vec);
*pstdv = c_stdv_vec(n_vec, p_src_vec, *pmean);
}
}
static const char *errId = "cuda_mean_stdv_mex:mex_assert";
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
mxClassID class_id;
size_t n_vec;
const mxGPUArray *pr;
TIMER_START(Timers::TIMER_MEAN_STDV);
// Using Macro to avoid unnecessary nvcc warning (defined but not referenced)
// Check correctness and get input information
mex_assert((nlhs == 2 || nrhs == 1),
(errId,
"%s:%d cuda_mean_stdv_mex got %d input and %d output arguments\n"
"should have 1 input and 2 output arguments",
__FILE__, __LINE__, nrhs, nlhs));
int is_gpu = mxIsGPUArray(prhs[0]);
if(is_gpu) {
pr = mxGPUCreateFromMxArray(prhs[0]);
mex_assert((mxGPUGetComplexity(pr) == mxREAL),
(errId, "Input to cuda_mean_mex must be real"));
class_id = mxGPUGetClassID(pr);
n_vec = (size_t) mxGPUGetNumberOfElements(pr);
}
else {
mex_assert(!mxIsComplex(prhs[0]), (errId, "Input must be real"));
class_id = mxGetClassID(prhs[0]);
n_vec = (size_t) mxGetNumberOfElements(prhs[0]);
}
mex_assert((class_id == mxSINGLE_CLASS || class_id == mxDOUBLE_CLASS),
(errId, "Input must be float (%d) of double (%d). Currnt type %d",
mxSINGLE_CLASS, mxDOUBLE_CLASS, class_id));
if(is_gpu)
mex_assert(mxGPUisVector(pr),
(errId, "Input should be a vector"));
else
mex_assert(mxIsVector(prhs[0]),
(errId, "Input should be a vector"));
plhs[0] = mxCreateNumericMatrix(1, 1, class_id, mxREAL);
plhs[1] = mxCreateNumericMatrix(1, 1, class_id, mxREAL);
if(is_gpu) {
if(class_id == mxSINGLE_CLASS)
calcGPU<float>(pr, n_vec, plhs);
else
calcGPU<double>(pr, n_vec, plhs);
mxGPUDestroyGPUArray(pr);
}
else {
if(class_id == mxSINGLE_CLASS)
calcCPU<float>(prhs[0], n_vec, plhs);
else
calcCPU<double>(prhs[0], n_vec, plhs);
}
TIMER_STOP(Timers::TIMER_MEAN_STDV);
}
| 92bab93958c54c79fb551fa7637a0f6877e6a0b4.cu |
/**
\file
This MEX function computes the mean and standard deviation of a vector
given in prhs[0] and returns the results in CPU. plhs[0] is the mean and
plhs[1] is the standard variation. The input can be on CPU or GPU and it
can be double or single precision. The computation is done where the input
is, but the results are always returned in CPU. If the input is double or
single the computation is in double or single precision, and the results
are double or single, respectively.
If the input is empty both outputs are 0. If it has one entry, only the mean is
computed and the standard deviation is set to zero.
*/
#include "mex.h"
#include "mex_gpu_tools.h"
#include "CudaDevInfo.h"
#include "cuda_sum_mean_var.h"
#include "cc_sum_mean_var.h"
#include "mex_assert.h"
#include "fast_heap.h"
#include "mex_context.h"
#include "timers.h"
template<class Float>
static void calcGPU(const mxGPUArray *pr,
size_t n_vec,
mxArray *output[2]
)
{
const Float *p_src_vec = (const Float*)mxGPUGetDataReadOnly(pr);
Float *pmean = (Float *) mxGetData(output[0]);
Float *pstdv = (Float *) mxGetData(output[1]);
if(n_vec > 1) {
size_t sz = (n_vec+1) * sizeof(Float);
GenericHeapElement &pres = d_fast_heap->get(sz);
Float *res = static_cast<Float*>(*pres);
h_mean_stdv_vec(n_vec, p_src_vec, res);
gpuErrChk(cudaMemcpy(pmean, res, sizeof(Float), cudaMemcpyDeviceToHost),
"cuda_mean_stdv_mex:memcpy", "");
gpuErrChk(cudaMemcpy(pstdv, res+1, sizeof(Float), cudaMemcpyDeviceToHost),
"cuda_mean_stdv_mex:memcpy", "");
pres.discard();
}
else if(n_vec == 1){
gpuErrChk(cudaMemcpy(pmean, p_src_vec, sizeof(Float), cudaMemcpyDeviceToHost),
"cuda_mean_stdv_mex:memcpy", "");
*pstdv = 0;
}
else { // n_vec == 0
*pmean = 0;
*pstdv = 0;
}
}
template<class Float>
static void calcCPU(const mxArray *pr,
size_t n_vec,
mxArray *output[2])
{
const Float *p_src_vec = (const Float*) mxGetData(pr);
Float *pmean = (Float *) mxGetData(output[0]);
Float *pstdv = (Float *) mxGetData(output[1]);
if(n_vec == 0) {
*pmean = 0;
*pstdv = 0;
}
else if(n_vec == 1) {
*pmean = *p_src_vec;
*pstdv = 0;
}
else {
*pmean = c_mean_vec(n_vec, p_src_vec);
*pstdv = c_stdv_vec(n_vec, p_src_vec, *pmean);
}
}
static const char *errId = "cuda_mean_stdv_mex:mex_assert";
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
mxClassID class_id;
size_t n_vec;
const mxGPUArray *pr;
TIMER_START(Timers::TIMER_MEAN_STDV);
// Using Macro to avoid unnecessary nvcc warning (defined but not referenced)
// Check correctness and get input information
mex_assert((nlhs == 2 || nrhs == 1),
(errId,
"%s:%d cuda_mean_stdv_mex got %d input and %d output arguments\n"
"should have 1 input and 2 output arguments",
__FILE__, __LINE__, nrhs, nlhs));
int is_gpu = mxIsGPUArray(prhs[0]);
if(is_gpu) {
pr = mxGPUCreateFromMxArray(prhs[0]);
mex_assert((mxGPUGetComplexity(pr) == mxREAL),
(errId, "Input to cuda_mean_mex must be real"));
class_id = mxGPUGetClassID(pr);
n_vec = (size_t) mxGPUGetNumberOfElements(pr);
}
else {
mex_assert(!mxIsComplex(prhs[0]), (errId, "Input must be real"));
class_id = mxGetClassID(prhs[0]);
n_vec = (size_t) mxGetNumberOfElements(prhs[0]);
}
mex_assert((class_id == mxSINGLE_CLASS || class_id == mxDOUBLE_CLASS),
(errId, "Input must be float (%d) of double (%d). Currnt type %d",
mxSINGLE_CLASS, mxDOUBLE_CLASS, class_id));
if(is_gpu)
mex_assert(mxGPUisVector(pr),
(errId, "Input should be a vector"));
else
mex_assert(mxIsVector(prhs[0]),
(errId, "Input should be a vector"));
plhs[0] = mxCreateNumericMatrix(1, 1, class_id, mxREAL);
plhs[1] = mxCreateNumericMatrix(1, 1, class_id, mxREAL);
if(is_gpu) {
if(class_id == mxSINGLE_CLASS)
calcGPU<float>(pr, n_vec, plhs);
else
calcGPU<double>(pr, n_vec, plhs);
mxGPUDestroyGPUArray(pr);
}
else {
if(class_id == mxSINGLE_CLASS)
calcCPU<float>(prhs[0], n_vec, plhs);
else
calcCPU<double>(prhs[0], n_vec, plhs);
}
TIMER_STOP(Timers::TIMER_MEAN_STDV);
}
|
8e0325715d4693ef1cdd2ff2cf211abb62440729.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/transpose_op.h"
#include <limits>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
// Cuda memory is precious so let's do a lower ndim limit.
#define COMPILE_TIME_CUDA_MAX_TRANSPOSE_DIMS 5
namespace {
// TODO(jiayq): one possible optimization is to copy the buffer into a shared
// memory location to speed up access.
template <typename Dtype>
__global__ void transpose_gpu(const int nthreads, const Dtype* from_data,
Dtype* to_data, const int* buffer, const int num_axes) {
int from_inds[COMPILE_TIME_CUDA_MAX_TRANSPOSE_DIMS];
const int* from_counts = buffer;
const int* to_counts = buffer + num_axes;
const int* axes = buffer + num_axes * 2;
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int from_index = index, to_index = 0;
for (int i = num_axes - 1; i >= 0; --i) {
from_inds[i] = from_index % from_counts[i];
from_index = from_index / from_counts[i];
}
for (int i = 0; i < num_axes - 1; i++) {
to_index = (to_index + from_inds[axes[i]]) * to_counts[i + 1];
}
to_index += from_inds[axes[num_axes - 1]];
to_data[to_index] = from_data[index];
}
}
} // namespace
template <>
template <typename T>
bool TransposeOp<CUDAContext>::DoRunWithType() {
const auto& input = Input(0);
auto* output = Output(0);
int count = input.size();
int ndim = input.ndim();
CAFFE_ENFORCE(count < std::numeric_limits<int>::max(),
"Transpose op on GPU only supports int32");
CAFFE_ENFORCE(ndim < COMPILE_TIME_CUDA_MAX_TRANSPOSE_DIMS,
"Input ndim exceeds compile time max.");
// Buffer contains the following data:
// (1) the dimenions of the inputs
// (2) the dimension of the outputs
// (3) the axis mapping from inputs to outputs
TensorCPU buffer_cpu(vector<int>{3 * ndim});
int* buffer_data = buffer_cpu.mutable_data<int>();
for (int i = 0; i < ndim; ++i) {
*(buffer_data++) = input.dim32(i);
}
for (int i = 0; i < ndim; ++i) {
*(buffer_data++) = output->dim32(i);
}
for (int i = 0; i < ndim; ++i) {
*(buffer_data++) = axes_[i];
}
// Copy the dimension information to GPU.
buffer_.CopyFrom(buffer_cpu, &context_);
hipLaunchKernelGGL(( transpose_gpu<T>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
count, input.template data<T>(), output->template mutable_data<T>(),
buffer_.data<int>(), ndim);
return true;
}
REGISTER_CUDA_OPERATOR(Transpose, TransposeOp<CUDAContext>);
} // namespace caffe2
| 8e0325715d4693ef1cdd2ff2cf211abb62440729.cu | #include "caffe2/operators/transpose_op.h"
#include <limits>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
// Cuda memory is precious so let's do a lower ndim limit.
#define COMPILE_TIME_CUDA_MAX_TRANSPOSE_DIMS 5
namespace {
// TODO(jiayq): one possible optimization is to copy the buffer into a shared
// memory location to speed up access.
template <typename Dtype>
__global__ void transpose_gpu(const int nthreads, const Dtype* from_data,
Dtype* to_data, const int* buffer, const int num_axes) {
int from_inds[COMPILE_TIME_CUDA_MAX_TRANSPOSE_DIMS];
const int* from_counts = buffer;
const int* to_counts = buffer + num_axes;
const int* axes = buffer + num_axes * 2;
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int from_index = index, to_index = 0;
for (int i = num_axes - 1; i >= 0; --i) {
from_inds[i] = from_index % from_counts[i];
from_index = from_index / from_counts[i];
}
for (int i = 0; i < num_axes - 1; i++) {
to_index = (to_index + from_inds[axes[i]]) * to_counts[i + 1];
}
to_index += from_inds[axes[num_axes - 1]];
to_data[to_index] = from_data[index];
}
}
} // namespace
template <>
template <typename T>
bool TransposeOp<CUDAContext>::DoRunWithType() {
const auto& input = Input(0);
auto* output = Output(0);
int count = input.size();
int ndim = input.ndim();
CAFFE_ENFORCE(count < std::numeric_limits<int>::max(),
"Transpose op on GPU only supports int32");
CAFFE_ENFORCE(ndim < COMPILE_TIME_CUDA_MAX_TRANSPOSE_DIMS,
"Input ndim exceeds compile time max.");
// Buffer contains the following data:
// (1) the dimenions of the inputs
// (2) the dimension of the outputs
// (3) the axis mapping from inputs to outputs
TensorCPU buffer_cpu(vector<int>{3 * ndim});
int* buffer_data = buffer_cpu.mutable_data<int>();
for (int i = 0; i < ndim; ++i) {
*(buffer_data++) = input.dim32(i);
}
for (int i = 0; i < ndim; ++i) {
*(buffer_data++) = output->dim32(i);
}
for (int i = 0; i < ndim; ++i) {
*(buffer_data++) = axes_[i];
}
// Copy the dimension information to GPU.
buffer_.CopyFrom(buffer_cpu, &context_);
transpose_gpu<T><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
count, input.template data<T>(), output->template mutable_data<T>(),
buffer_.data<int>(), ndim);
return true;
}
REGISTER_CUDA_OPERATOR(Transpose, TransposeOp<CUDAContext>);
} // namespace caffe2
|
1c3ca0718efa9bde1d50e86f6fb360e0599d7975.hip | // !!! This is a file automatically generated by hipify!!!
#include "md.h"
#include <cstdio>
#include <cmath>
#include <string>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <sm_20_atomic_functions.h>
#include <device_launch_parameters.h>
#include <hiprand/hiprand_kernel.h>
#include <thrust/sort.h>
#include <thrust/random.h>
#include <thrust/device_vector.h>
#include "hip/device_functions.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "memory.h"
#include "random.h"
float m = 1; float inv_m = 1. / m;
float dt = 0.0005;
float hdt = 0.5 * dt;
float sigma = 1, epsilon = 1, A, B, C, D;
float dcut = 2.5 * sigma;
float dcut2 = dcut * dcut;
float t = 0.0;
float tau = 0.01 / dt;
float kB = 1., ke = 0.0, pe = 0.0;
float T;
float T0 = 0.5, Tt = 10.5;
int Crate = 20;
int ifreq = 10, nstep = 100;
int N = (Tt - T0) / (Crate * dt * nstep);
int nunit = 4;
int nall = 4;
char Atom[3] = "Cr";
Memory* M = new Memory();;
RanPark* rnd = new RanPark(1234);;
float ncell[3] = { 8.0f, 8.0f, 8.0f };
float a0[3] = { 1.5f, 1.5f, 1.5f };
glm::vec3 L(ncell[0] * a0[0], ncell[1] * a0[1], ncell[2] * a0[2]);
glm::vec3 hL(L[0] / 2, L[1] / 2, L[2] / 2);
glm::vec3 *dev_vel = nullptr;
glm::vec3 *dev_pos = nullptr;
glm::vec3 *dev_force = nullptr;
glm::vec3 *h_vel = nullptr;
glm::vec3 *h_pos = nullptr;
glm::vec3 *h_force = nullptr;
float *ke_idata = nullptr;
float *ke_odata = nullptr;
float *dev_pe = nullptr;
const int threads = 256;
glm::vec3 *dev_vel_reorder = nullptr;
glm::vec3 *dev_pos_reorder = nullptr;
glm::vec3 *dev_force_reorder = nullptr;
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
int *dev_gridCellStartIndices;
int *dev_gridCellEndIndices;
int *dev_particleArrayIndices; // What index in dev_pos and dev_vel represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
thrust::minstd_rand rng;
thrust::uniform_real_distribution<float> unitDistrib(-0.5, 0.5);
hipEvent_t startSim;
hipEvent_t endSim;
hiprandState_t* states = nullptr;
////////////////////////////////////////////// Velocity //////////////////////////////////////////////
__global__ void kernInitVel(int n, glm::vec3 *vel, hiprandState_t* states)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
float vx = hiprand_uniform(&(states[tid])) - 0.5f;
float vy = hiprand_uniform(&(states[tid])) - 0.5f;
float vz = hiprand_uniform(&(states[tid])) - 0.5f;
vel[tid] = glm::vec3(vx,vy,vz);
}
__global__ void kernVelMinus(int n, glm::vec3 *vel, glm::vec3 mon) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
vel[tid] -= mon;
}
__global__ void kernVelMultiply(int n, glm::vec3 *vel, float gama) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
vel[tid] *= gama;
}
__global__ void kernComputeDotProduct(int n, glm::vec3 *vel, float *d_odata)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
d_odata[tid] = glm::dot(vel[tid], vel[tid]);
}
//////////////////////////////////// reduce energy //////////////////////////////////////////////////////////
__global__ void reduce_energy(const float* d_idata, float* d_odata, int n)
{
extern __shared__ float shm[];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n)
shm[threadIdx.x] = d_idata[idx];
__syncthreads();
for (int c = blockDim.x / 2; c > 0; c >>= 1)
{
if (threadIdx.x < c)
shm[threadIdx.x] += shm[threadIdx.x + c];
__syncthreads();
}
if (threadIdx.x == 0)
d_odata[blockIdx.x] = shm[0];
}
float reduce_energy_wrapper(const float *d_idata, float *d_odata, const int elements)
{
int dimThreads = threads;
int dimBlocks = (elements + dimThreads - 1) / (dimThreads);
if (elements < dimThreads) {
float *h_blocks = (float *)malloc(elements * sizeof(float));
hipMemcpy(h_blocks, d_odata, elements * sizeof(float), hipMemcpyDeviceToHost);
float gpu_result = 0;
for (int i = 0; i < elements; i++)
gpu_result += h_blocks[i];
free(h_blocks);
return gpu_result;
}
else {
reduce_energy << <dimBlocks, dimThreads, sizeof(float) * dimThreads >> >(d_idata, d_odata, elements);
return reduce_energy_wrapper(d_odata, d_odata, dimBlocks);
}
}
/////////////////////////////////////////// Reduce velocity //////////////////////////////////////////////
__global__ void vel_reduce(const glm::vec3* d_idata, glm::vec3* d_odata, int n)
{
extern __shared__ glm::vec3 smem[];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n)
smem[threadIdx.x] = d_idata[idx];
__syncthreads();
for (int c = blockDim.x / 2; c > 0; c >>= 1)
{
if (threadIdx.x < c)
smem[threadIdx.x] += smem[threadIdx.x + c];
__syncthreads();
}
if (threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
glm::vec3 vel_reduce_wrapper(const glm::vec3 *d_idata, glm::vec3 *d_odata, const int elements)
{
int dimThreads = threads;
int dimBlocks = (elements + dimThreads - 1) / (dimThreads);
if (elements < dimThreads) {
glm::vec3 *h_blocks = (glm::vec3 *)malloc(elements * sizeof(glm::vec3));
hipMemcpy(h_blocks, d_odata, elements * sizeof(glm::vec3), hipMemcpyDeviceToHost);
glm::vec3 gpu_result = glm::vec3(0.0f, 0.0f, 0.0f);
for (int i = 0; i < elements; i++)
gpu_result += h_blocks[i];
free(h_blocks);
return gpu_result;
}
else {
vel_reduce << <dimBlocks, dimThreads, sizeof(glm::vec3) * dimThreads >> >(d_idata, d_odata, elements);
return vel_reduce_wrapper(d_odata, d_odata, dimBlocks);
}
}
////////////////////////////////////////////////////
__global__ void kernNaiveVelocityIntegration(int n, glm::vec3 *vel, glm::vec3 *force, float inv_m, float hdt)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
vel[tid] += force[tid] * inv_m * hdt;
}
__global__ void kernNaivePositionIntegration(int n, glm::vec3 *pos, glm::vec3 *vel, float dt)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
pos[tid] += dt * vel[tid];
}
__global__ void kernNaiveForce(int n, glm::vec3 *pos, glm::vec3 *force, glm::vec3 *vel, float coef, glm::vec3 hL, glm::vec3 L,
float dcut2, float A, float B, float C, float D, float m, float tau, float *pe, hiprandState_t* states)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
float wx = hiprand_uniform(&(states[tid])) - 0.5f;
float wy = hiprand_uniform(&(states[tid])) - 0.5f;
float wz = hiprand_uniform(&(states[tid])) - 0.5f;
glm::vec3 w = glm::vec3(wx,wy,wz);
//#pragma unroll
for (int i = 0; i < n; i++) {
//for (int i = tid + 1; i < n; i++) {
if (i == tid) continue;
glm::vec3 dx = pos[i] - pos[tid];
//if (dx.x > hL.x) dx.x -= L.x;
//if (dx.y > hL.y) dx.y -= L.y;
//if (dx.z > hL.z) dx.z -= L.z;
//if (-dx.x > hL.x) dx.x += L.x;
//if (-dx.y > hL.y) dx.y += L.y;
//if (-dx.z > hL.z) dx.z += L.z;
while (dx.x > hL.x) dx.x -= L.x;
while (dx.y > hL.y) dx.y -= L.y;
while (dx.z > hL.z) dx.z -= L.z;
while (-dx.x > hL.x) dx.x += L.x;
while (-dx.y > hL.y) dx.y += L.y;
while (-dx.z > hL.z) dx.z += L.z;
float r2 = glm::dot(dx, dx);
float r6 = r2 * r2 *r2;
float r12 = r6 * r6;
if (r2 < dcut2) {
dx *= (A * 1. / r12 + B * 1. / r6) / r2;
force[tid] -= dx;
force[i] += dx;
//atomicAdd(&(force[tid].x), -dx.x);
//atomicAdd(&(force[tid].y), -dx.y);
//atomicAdd(&(force[tid].z), -dx.z);
//atomicAdd(&(force[i].x), dx.x);
//atomicAdd(&(force[i].y), dx.y);
//atomicAdd(&(force[i].z), dx.z);
atomicAdd(pe, C * 1. / r12 + D * 1. / r6);
}
}
__syncthreads();
force[tid] += -m * tau * vel[tid] + coef * w;
}
void forceCPU(int k) {
float TT = T0;
float Gc = (Tt - T0) / (N - 1);
TT = TT + k * Gc;
//printf("TT: %f\n", TT);
pe = 0.0;
float coef = sqrt(24. * tau * m * kB * TT / dt);//coef to calculate w
float dcut = 2.5 * sigma;
float dcut2 = dcut * dcut;
for (int i = 0; i < nall; i++){
h_force[i] = glm::vec3(0.0f, 0.0f, 0.0f);
}
for (int i = 0; i < nall - 1; i++) {
for (int j = i + 1; j < nall; j++) {
glm::vec3 dx = h_pos[j] - h_pos[i];
for (int k = 0; k < 3; k++) {
while (dx.x > hL.x) dx.x -= L.x;
while (dx.y > hL.y) dx.y -= L.y;
while (dx.z > hL.z) dx.z -= L.z;
while (dx.x < -hL.x) dx.x += L.x;
while (dx.y < -hL.y) dx.y += L.y;
while (dx.z < -hL.z) dx.z += L.z;
}
float r2 = glm::dot(dx, dx);
float r6 = r2 * r2 *r2;
float r12 = r6 * r6;
if (r2 < dcut2) {
dx *= (A * 1. / r12 + B * 1. / r6) / r2;
h_force[i] -= dx;
h_force[j] += dx;
pe += C * 1. / r12 + D * 1. / r6;
}
}
}
for (int i = 0; i < nall; i++) {
float a = (float)unitDistrib(rng);
float b = (float)unitDistrib(rng);
float c = (float)unitDistrib(rng);
glm::vec3 w(a, b, c);
w *= coef;
h_force[i] += -m * tau * h_vel[i] + w;
}
}
__global__ void kernUpdatePos(int n, glm::vec3 *pos, glm::vec3 L) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid > n) return;
while (pos[tid].x > L.x) pos[tid].x -= L.x;
while (pos[tid].y > L.y) pos[tid].y -= L.y;
while (pos[tid].z > L.z) pos[tid].z -= L.z;
while (pos[tid].x < 0) pos[tid].x += L.x;
while (pos[tid].y < 0) pos[tid].y += L.y;
while (pos[tid].z < 0) pos[tid].z += L.z;
}
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
int xx = ((x % gridResolution) + gridResolution) % gridResolution;
int yy = ((y % gridResolution) + gridResolution) % gridResolution;
int zz = ((z % gridResolution) + gridResolution) % gridResolution;
return xx + yy * gridResolution + zz * gridResolution * gridResolution;
}
__device__ int computerGridID(int gridResolution, glm::vec3 &gridMin,
glm::vec3 *m_pos, float inverseCellWidth) {
int x, y, z;
glm::vec3 index_vec = (*m_pos - gridMin) * inverseCellWidth;
x = static_cast<int>(index_vec.x);
y = static_cast<int>(index_vec.y);
z = static_cast<int>(index_vec.z);
int a = gridIndex3Dto1D(x, y, z, gridResolution);
return a;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
int boidIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (boidIndex < N)
{
int grid_index = computerGridID(gridResolution, gridMin, &pos[boidIndex], inverseCellWidth);
indices[boidIndex] = boidIndex;
gridIndices[boidIndex] = grid_index;
}
}
__global__ void kernReorderPos(int N, glm::vec3 *pos_reorder, glm::vec3 *pos, int *particleArrayIndices, int mark)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
{
pos_reorder[index] = pos[particleArrayIndices[index]];
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N - 1) // don't process when index equals N - 1
return;
if (index == 0)
{
gridCellStartIndices[particleGridIndices[0]] = 0;
/*printf("%d %d\n", index, particleGridIndices[0]);*/
}
else if (index == N - 1)
gridCellEndIndices[particleGridIndices[N - 1]] = N - 1;
else if (particleGridIndices[index] != particleGridIndices[index + 1])
{
gridCellStartIndices[particleGridIndices[index + 1]] = index + 1;
gridCellEndIndices[particleGridIndices[index]] = index;
}
}
__device__ void computeNeighbors(int *cells, glm::vec3 &m_pos, glm::vec3 &gridMin,
float inverseCellWidth, int gridResolution)
{
glm::vec3 index_vec = (m_pos - gridMin) * inverseCellWidth;
int x, y, z;
x = (int)index_vec.x;
y = (int)index_vec.y;
z = (int)index_vec.z;
glm::vec3 offset = index_vec - glm::vec3(x, y, z);
int dx, dy, dz;
dx = offset.x >= 0.5f ? 1 : -1;
dy = offset.y >= 0.5f ? 1 : -1;
dz = offset.z >= 0.5f ? 1 : -1;
cells[0] = gridIndex3Dto1D(x, y, z, gridResolution);
cells[7] = gridIndex3Dto1D(x + dx, y + dy, z + dz, gridResolution);
cells[1] = gridIndex3Dto1D(x + dx, y, z, gridResolution);
cells[2] = gridIndex3Dto1D(x, y + dy, z, gridResolution);
cells[3] = gridIndex3Dto1D(x, y, z + dz, gridResolution);
cells[4] = gridIndex3Dto1D(x + dx, y + dy, z, gridResolution);
cells[5] = gridIndex3Dto1D(x, y + dy, z + dz, gridResolution);
cells[6] = gridIndex3Dto1D(x + dx, y, z + dz, gridResolution);
}
__global__ void kernCoherentforce(int n, glm::vec3 *pos, glm::vec3 *force, glm::vec3 *vel, float coef, glm::vec3 hL, glm::vec3 L,
float dcut2, float A, float B, float C, float D, float m, float tau, float *pe, hiprandState_t* states,
int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid > n) return;
glm::vec3 m_pos = pos[tid];
int cells[8];
computeNeighbors(cells, m_pos, gridMin, inverseCellWidth, gridResolution);
#pragma unroll
for (int i = 0; i < 8; i++) {
int gridCellCount = gridResolution * gridResolution * gridResolution;
if (cells[i] < 0 || cells[i] >= gridCellCount) continue;
int start_idx = gridCellStartIndices[cells[i]];
int end_idx = gridCellEndIndices[cells[i]];
for (int j = start_idx; j <= end_idx; j++) {
if (j == tid) continue;
glm::vec3 dx = pos[j] - pos[tid];
while (dx.x > hL.x) dx.x -= L.x;
while (dx.y > hL.y) dx.y -= L.y;
while (dx.z > hL.z) dx.z -= L.z;
while (-dx.x > hL.x) dx.x += L.x;
while (-dx.y > hL.y) dx.y += L.y;
while (-dx.z > hL.z) dx.z += L.z;
float r2 = glm::dot(dx, dx);
float r6 = r2 * r2 *r2;
float r12 = r6 * r6;
if (r2 < dcut2) {
dx *= (A * 1. / r12 + B * 1. / r6) / r2;
force[tid] -= dx;
atomicAdd(pe, C * 1. / r12 + D * 1. / r6);
}
}
}
__syncthreads();
float wx = hiprand_uniform(&(states[tid])) - 0.5f;
float wy = hiprand_uniform(&(states[tid])) - 0.5f;
float wz = hiprand_uniform(&(states[tid])) - 0.5f;
glm::vec3 w = glm::vec3(wx, wy, wz);
force[tid] += -m * tau * vel[tid] + coef * w;
}
void coherentForce(int k)
{
int dimThreads = threads;
int dimBlocks = (gridCellCount + threads - 1) / threads;
kernResetIntBuffer << <dimBlocks, dimThreads >> >(gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <dimBlocks, dimThreads >> >(gridCellCount, dev_gridCellEndIndices, -2);
dimBlocks = (nall + dimThreads - 1) / dimThreads;
kernComputeIndices << <dimBlocks, dimThreads >> >(nall, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + nall, dev_thrust_particleArrayIndices);
kernReorderPos << <dimBlocks, dimThreads >> > (nall, dev_pos_reorder, dev_pos, dev_particleArrayIndices, 1);
kernReorderPos << <dimBlocks, dimThreads >> > (nall, dev_vel_reorder, dev_vel, dev_particleArrayIndices, 2);
kernIdentifyCellStartEnd << <dimBlocks, dimThreads >> >(nall, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
hipMemset(dev_force, 0.0f, sizeof(glm::vec3) * nall);
hipMemset(dev_pe, 0.0f, sizeof(float));
float TT = T0 + (Tt - T0) / (N - 1) * k;
float coef = sqrt(24. * tau * m * kB * TT / dt);
kernCoherentforce << <dimBlocks, dimThreads >> > (nall, dev_pos_reorder, dev_force, dev_vel_reorder, coef, hL, L, dcut2, A, B, C, D, m, tau, dev_pe, states,
gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices);
glm::vec3 *tmp = dev_vel;
dev_vel = dev_vel_reorder;
dev_vel_reorder = tmp;
tmp = dev_pos;
dev_pos = dev_pos_reorder;
dev_pos_reorder = tmp;
//output to dev_force, so next hdt to compute vel, need to use dev_vel_reorder. and use dev_pos_reorder for next iteration.
}
__global__ void kernInitRandom(int n, unsigned int seed, hiprandState_t* states)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
hiprand_init(seed, tid, 0, &states[tid]);
}
void MD::MD_init(int ratio, int cellsize)
{
//init variables
ncell[0] = (float)cellsize;
ncell[1] = (float)cellsize;
ncell[2] = (float)cellsize;
sigma = 1.f / ratio;
float sigma3 = sigma * sigma * sigma;
float sigma6 = sigma3 * sigma3;
float sigma12 = sigma6 * sigma6;
A = 48. * epsilon * sigma12;
B = -24. * epsilon * sigma6;
C = 4. * epsilon * sigma12;
D = -4. * epsilon * sigma6;
hipEventCreate(&startSim);
hipEventCreate(&endSim);
for (int i = 0; i < 3; i++) nall *= ncell[i];
int dimThreads = threads;
int dimBlocks = (nall + dimThreads - 1) / (dimThreads);
hipMalloc((void**)&dev_pe, sizeof(float));
hipMalloc((void**)&dev_pos, nall * sizeof(glm::vec3));
hipMalloc((void**)&dev_vel, nall * sizeof(glm::vec3));
hipMalloc((void**)&dev_force, nall * sizeof(glm::vec3));
hipMalloc((void**)&ke_idata, nall * sizeof(glm::vec3));
hipMalloc((void**)&ke_odata, dimBlocks * sizeof(glm::vec3));
hipMalloc((void**)&states, nall * sizeof(hiprandState_t));
hipMalloc((void **)&dev_pos_reorder, nall * sizeof(glm::vec3));
hipMalloc((void **)&dev_vel_reorder, nall * sizeof(glm::vec3));
hipMalloc((void **)&dev_force_reorder, nall * sizeof(glm::vec3));
kernInitRandom << <dimBlocks, dimThreads >> > (nall, 0, states);
gridCellWidth = 2.0f * dcut;
gridSideCount = (int)(L.x / gridCellWidth) + 1;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
gridMinimum.x = 0.f;
gridMinimum.y = 0.f;
gridMinimum.z = 0.f;
hipMalloc((void**)&dev_particleGridIndices, nall * sizeof(int));
hipMalloc((void**)&dev_particleArrayIndices, nall * sizeof(int));
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
h_pos = new glm::vec3[nall];
h_vel = new glm::vec3[nall];
h_force = new glm::vec3[nall];
//init cell and pos
float **cell = nullptr;
M->create(cell, 4, 3, "cell");
cell[0][0] = cell[0][1] = cell[0][2] = 0.0;
cell[1][0] = 0.0; cell[1][1] = 0.5 * a0[1]; cell[1][2] = 0.5 * a0[2];
cell[2][0] = 0.5 * a0[0]; cell[2][1] = 0.0; cell[2][2] = 0.5 * a0[2];
cell[3][0] = 0.5 * a0[0]; cell[3][1] = 0.5 * a0[1]; cell[3][2] = 0.0;
int ii = 0;
for (int ix = 0; ix < ncell[0]; ix++) {
for (int iy = 0; iy < ncell[1]; iy++) {
for (int iz = 0; iz < ncell[2]; iz++) {
for (int iu = 0; iu < nunit; iu++) {
h_pos[ii].x = float(ix) * a0[0] + cell[iu][0];
h_pos[ii].y = float(iy) * a0[1] + cell[iu][1];
h_pos[ii].z = float(iz) * a0[2] + cell[iu][2];
++ii;
}
}
}
}
hipMemcpy(dev_pos, h_pos, nall * sizeof(glm::vec3), hipMemcpyHostToDevice);
M->destroy(cell);
//////////////////////////////
kernInitVel << <dimBlocks, dimThreads >> > (nall, dev_vel, states);
glm::vec3 *vel_odata = nullptr;
hipMalloc((void**)&vel_odata, dimBlocks * sizeof(glm::vec3));
glm::vec3 mon = vel_reduce_wrapper(dev_vel, vel_odata, nall);
hipFree(vel_odata);
mon /= float(nall);
kernVelMinus << <dimBlocks, dimThreads >> >(nall, dev_vel, mon);
kernComputeDotProduct << <dimBlocks, dimThreads >> > (nall, dev_vel, ke_idata);
ke = reduce_energy_wrapper(ke_idata, ke_odata, nall);
ke *= 0.5 * m; T = ke / (1.5 * float(nall) * kB);
float gamma = sqrt(T0 / T);
ke = 0.0;
kernVelMultiply << <dimBlocks, dimThreads >> > (nall, dev_vel, gamma);
kernComputeDotProduct << <dimBlocks, dimThreads >> > (nall, dev_vel, ke_idata);
ke = reduce_energy_wrapper(ke_idata, ke_odata, nall);
ke *= 0.5 * m;
T = ke / (1.5 * float(nall) * kB);
hipMemset(dev_force, 0, sizeof(glm::vec3) * nall);
hipMemset(dev_pe, 0, sizeof(float));
float coef = sqrt(24. * tau * m * kB * T0 / dt);
kernNaiveForce << <dimBlocks, dimThreads >> >(nall, dev_pos, dev_force, dev_vel, coef, hL, L, dcut2, A, B, C, D, m, tau, dev_pe, states);
hipMemcpy(&pe, dev_pe, sizeof(float), hipMemcpyDeviceToHost);
printf("ke = %f pe = %f T = %f\n", ke, pe, T);
}
void MD::MD_free()
{
hipFree(dev_pe);
hipFree(dev_force);
hipFree(dev_vel);
hipFree(dev_pos);
hipFree(dev_force_reorder);
hipFree(dev_vel_reorder);
hipFree(dev_pos_reorder);
hipFree(states);
hipFree(ke_idata);
hipFree(ke_odata);
hipFree(dev_particleGridIndices);
hipFree(dev_particleArrayIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
delete[] h_force;
delete[] h_pos;
delete[] h_vel;
delete M;
delete rnd;
}
void MD::MD_run()
{
for (int i = 1; i < N; i++) {
MD_Loop(i);
}
}
void stepCoherent(int k) {
int dimThreads = threads;
int dimBlocks = (nall + dimThreads - 1) / dimThreads;
hipEventRecord(startSim);
for (int i = 0; i < nstep; i++) {
kernNaiveVelocityIntegration << <dimBlocks, dimThreads >> > (nall, dev_vel, dev_force, inv_m, hdt);
kernNaivePositionIntegration << <dimBlocks, dimThreads >> > (nall, dev_pos, dev_vel, dt);
kernUpdatePos << <dimBlocks, dimThreads >> > (nall, dev_pos, L);
coherentForce(k);
kernNaiveVelocityIntegration << <dimBlocks, dimThreads >> > (nall, dev_vel, dev_force, inv_m, hdt);
if (i % ifreq == 0) {
float TT = T0 + (Tt - T0) / (N - 1) * k;
float coef = sqrt(24. * tau * m * kB * TT / dt);
kernComputeDotProduct << <dimBlocks, dimThreads >> > (nall, dev_vel, ke_idata);
ke = reduce_energy_wrapper(ke_idata, ke_odata, nall);
ke *= 0.5 * m; t += dt;
T = ke / (1.5 * float(nall) * kB);
hipMemcpy(&pe, dev_pe, sizeof(float), hipMemcpyDeviceToHost);
printf("step %d ke %f pe %f T %f TT %f coef %f\n", i, ke, pe / 2.0, T, TT, coef);
}
}
hipEventRecord(endSim);
hipEventSynchronize(endSim);
float timeElapsedMilliseconds = 0.0f;
hipEventElapsedTime(&timeElapsedMilliseconds, startSim, endSim);
printf("time run: %f\n", timeElapsedMilliseconds);
}
void stepNaive(int k) {
int dimThreads = threads;
int dimBlocks = (nall + dimThreads - 1) / (dimThreads);
hipEventRecord(startSim);
for (int i = 0; i < nstep; i++) {
kernNaiveVelocityIntegration << <dimBlocks, dimThreads >> > (nall, dev_vel, dev_force, inv_m, hdt);
kernNaivePositionIntegration << <dimBlocks, dimThreads >> > (nall, dev_pos, dev_vel, dt);
hipMemset(dev_force, 0.0f, sizeof(glm::vec3) * nall);
hipMemset(dev_pe, 0.0f, sizeof(float));
float TT = T0 + (Tt - T0) / (N - 1) * k;
float coef = sqrt(24. * tau * m * kB * TT / dt);
kernNaiveForce << <dimBlocks, dimThreads >> > (nall, dev_pos, dev_force, dev_vel,
coef, hL, L, dcut2, A, B, C, D, m, tau, dev_pe, states);
kernNaiveVelocityIntegration << <dimBlocks, dimThreads >> > (nall, dev_vel, dev_force, inv_m, hdt);
if (i % ifreq == 0) {
kernComputeDotProduct << <dimBlocks, dimThreads >> > (nall, dev_vel, ke_idata);
ke = reduce_energy_wrapper(ke_idata, ke_odata, nall);
ke *= 0.5 * m; t += dt;
T = ke / (1.5 * float(nall) * kB);
hipMemcpy(&pe, dev_pe, sizeof(float), hipMemcpyDeviceToHost);
printf("step %d ke %f pe %f T %f TT %f coef %f\n", i, ke, pe, T, TT, coef);
}
}
hipEventRecord(endSim);
hipEventSynchronize(endSim);
float timeElapsedMilliseconds = 0.0f;
hipEventElapsedTime(&timeElapsedMilliseconds, startSim, endSim);
printf("time run: %f\n", timeElapsedMilliseconds);
}
void MD::MD_Loop(int k)
{
//stepCoherent(k);
stepNaive(k);
}
| 1c3ca0718efa9bde1d50e86f6fb360e0599d7975.cu | #include "md.h"
#include <cstdio>
#include <cmath>
#include <string>
#include <cuda.h>
#include <cuda_runtime.h>
#include <sm_20_atomic_functions.h>
#include <device_launch_parameters.h>
#include <curand_kernel.h>
#include <thrust/sort.h>
#include <thrust/random.h>
#include <thrust/device_vector.h>
#include "device_functions.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "memory.h"
#include "random.h"
float m = 1; float inv_m = 1. / m;
float dt = 0.0005;
float hdt = 0.5 * dt;
float sigma = 1, epsilon = 1, A, B, C, D;
float dcut = 2.5 * sigma;
float dcut2 = dcut * dcut;
float t = 0.0;
float tau = 0.01 / dt;
float kB = 1., ke = 0.0, pe = 0.0;
float T;
float T0 = 0.5, Tt = 10.5;
int Crate = 20;
int ifreq = 10, nstep = 100;
int N = (Tt - T0) / (Crate * dt * nstep);
int nunit = 4;
int nall = 4;
char Atom[3] = "Cr";
Memory* M = new Memory();;
RanPark* rnd = new RanPark(1234);;
float ncell[3] = { 8.0f, 8.0f, 8.0f };
float a0[3] = { 1.5f, 1.5f, 1.5f };
glm::vec3 L(ncell[0] * a0[0], ncell[1] * a0[1], ncell[2] * a0[2]);
glm::vec3 hL(L[0] / 2, L[1] / 2, L[2] / 2);
glm::vec3 *dev_vel = nullptr;
glm::vec3 *dev_pos = nullptr;
glm::vec3 *dev_force = nullptr;
glm::vec3 *h_vel = nullptr;
glm::vec3 *h_pos = nullptr;
glm::vec3 *h_force = nullptr;
float *ke_idata = nullptr;
float *ke_odata = nullptr;
float *dev_pe = nullptr;
const int threads = 256;
glm::vec3 *dev_vel_reorder = nullptr;
glm::vec3 *dev_pos_reorder = nullptr;
glm::vec3 *dev_force_reorder = nullptr;
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
int *dev_gridCellStartIndices;
int *dev_gridCellEndIndices;
int *dev_particleArrayIndices; // What index in dev_pos and dev_vel represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
thrust::minstd_rand rng;
thrust::uniform_real_distribution<float> unitDistrib(-0.5, 0.5);
cudaEvent_t startSim;
cudaEvent_t endSim;
curandState_t* states = nullptr;
////////////////////////////////////////////// Velocity //////////////////////////////////////////////
__global__ void kernInitVel(int n, glm::vec3 *vel, curandState_t* states)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
float vx = curand_uniform(&(states[tid])) - 0.5f;
float vy = curand_uniform(&(states[tid])) - 0.5f;
float vz = curand_uniform(&(states[tid])) - 0.5f;
vel[tid] = glm::vec3(vx,vy,vz);
}
__global__ void kernVelMinus(int n, glm::vec3 *vel, glm::vec3 mon) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
vel[tid] -= mon;
}
__global__ void kernVelMultiply(int n, glm::vec3 *vel, float gama) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
vel[tid] *= gama;
}
__global__ void kernComputeDotProduct(int n, glm::vec3 *vel, float *d_odata)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
d_odata[tid] = glm::dot(vel[tid], vel[tid]);
}
//////////////////////////////////// reduce energy //////////////////////////////////////////////////////////
__global__ void reduce_energy(const float* d_idata, float* d_odata, int n)
{
extern __shared__ float shm[];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n)
shm[threadIdx.x] = d_idata[idx];
__syncthreads();
for (int c = blockDim.x / 2; c > 0; c >>= 1)
{
if (threadIdx.x < c)
shm[threadIdx.x] += shm[threadIdx.x + c];
__syncthreads();
}
if (threadIdx.x == 0)
d_odata[blockIdx.x] = shm[0];
}
float reduce_energy_wrapper(const float *d_idata, float *d_odata, const int elements)
{
int dimThreads = threads;
int dimBlocks = (elements + dimThreads - 1) / (dimThreads);
if (elements < dimThreads) {
float *h_blocks = (float *)malloc(elements * sizeof(float));
cudaMemcpy(h_blocks, d_odata, elements * sizeof(float), cudaMemcpyDeviceToHost);
float gpu_result = 0;
for (int i = 0; i < elements; i++)
gpu_result += h_blocks[i];
free(h_blocks);
return gpu_result;
}
else {
reduce_energy << <dimBlocks, dimThreads, sizeof(float) * dimThreads >> >(d_idata, d_odata, elements);
return reduce_energy_wrapper(d_odata, d_odata, dimBlocks);
}
}
/////////////////////////////////////////// Reduce velocity //////////////////////////////////////////////
__global__ void vel_reduce(const glm::vec3* d_idata, glm::vec3* d_odata, int n)
{
extern __shared__ glm::vec3 smem[];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n)
smem[threadIdx.x] = d_idata[idx];
__syncthreads();
for (int c = blockDim.x / 2; c > 0; c >>= 1)
{
if (threadIdx.x < c)
smem[threadIdx.x] += smem[threadIdx.x + c];
__syncthreads();
}
if (threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
glm::vec3 vel_reduce_wrapper(const glm::vec3 *d_idata, glm::vec3 *d_odata, const int elements)
{
int dimThreads = threads;
int dimBlocks = (elements + dimThreads - 1) / (dimThreads);
if (elements < dimThreads) {
glm::vec3 *h_blocks = (glm::vec3 *)malloc(elements * sizeof(glm::vec3));
cudaMemcpy(h_blocks, d_odata, elements * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
glm::vec3 gpu_result = glm::vec3(0.0f, 0.0f, 0.0f);
for (int i = 0; i < elements; i++)
gpu_result += h_blocks[i];
free(h_blocks);
return gpu_result;
}
else {
vel_reduce << <dimBlocks, dimThreads, sizeof(glm::vec3) * dimThreads >> >(d_idata, d_odata, elements);
return vel_reduce_wrapper(d_odata, d_odata, dimBlocks);
}
}
////////////////////////////////////////////////////
__global__ void kernNaiveVelocityIntegration(int n, glm::vec3 *vel, glm::vec3 *force, float inv_m, float hdt)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
vel[tid] += force[tid] * inv_m * hdt;
}
__global__ void kernNaivePositionIntegration(int n, glm::vec3 *pos, glm::vec3 *vel, float dt)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
pos[tid] += dt * vel[tid];
}
__global__ void kernNaiveForce(int n, glm::vec3 *pos, glm::vec3 *force, glm::vec3 *vel, float coef, glm::vec3 hL, glm::vec3 L,
float dcut2, float A, float B, float C, float D, float m, float tau, float *pe, curandState_t* states)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
float wx = curand_uniform(&(states[tid])) - 0.5f;
float wy = curand_uniform(&(states[tid])) - 0.5f;
float wz = curand_uniform(&(states[tid])) - 0.5f;
glm::vec3 w = glm::vec3(wx,wy,wz);
//#pragma unroll
for (int i = 0; i < n; i++) {
//for (int i = tid + 1; i < n; i++) {
if (i == tid) continue;
glm::vec3 dx = pos[i] - pos[tid];
//if (dx.x > hL.x) dx.x -= L.x;
//if (dx.y > hL.y) dx.y -= L.y;
//if (dx.z > hL.z) dx.z -= L.z;
//if (-dx.x > hL.x) dx.x += L.x;
//if (-dx.y > hL.y) dx.y += L.y;
//if (-dx.z > hL.z) dx.z += L.z;
while (dx.x > hL.x) dx.x -= L.x;
while (dx.y > hL.y) dx.y -= L.y;
while (dx.z > hL.z) dx.z -= L.z;
while (-dx.x > hL.x) dx.x += L.x;
while (-dx.y > hL.y) dx.y += L.y;
while (-dx.z > hL.z) dx.z += L.z;
float r2 = glm::dot(dx, dx);
float r6 = r2 * r2 *r2;
float r12 = r6 * r6;
if (r2 < dcut2) {
dx *= (A * 1. / r12 + B * 1. / r6) / r2;
force[tid] -= dx;
force[i] += dx;
//atomicAdd(&(force[tid].x), -dx.x);
//atomicAdd(&(force[tid].y), -dx.y);
//atomicAdd(&(force[tid].z), -dx.z);
//atomicAdd(&(force[i].x), dx.x);
//atomicAdd(&(force[i].y), dx.y);
//atomicAdd(&(force[i].z), dx.z);
atomicAdd(pe, C * 1. / r12 + D * 1. / r6);
}
}
__syncthreads();
force[tid] += -m * tau * vel[tid] + coef * w;
}
void forceCPU(int k) {
float TT = T0;
float Gc = (Tt - T0) / (N - 1);
TT = TT + k * Gc;
//printf("TT: %f\n", TT);
pe = 0.0;
float coef = sqrt(24. * tau * m * kB * TT / dt);//coef to calculate w
float dcut = 2.5 * sigma;
float dcut2 = dcut * dcut;
for (int i = 0; i < nall; i++){
h_force[i] = glm::vec3(0.0f, 0.0f, 0.0f);
}
for (int i = 0; i < nall - 1; i++) {
for (int j = i + 1; j < nall; j++) {
glm::vec3 dx = h_pos[j] - h_pos[i];
for (int k = 0; k < 3; k++) {
while (dx.x > hL.x) dx.x -= L.x;
while (dx.y > hL.y) dx.y -= L.y;
while (dx.z > hL.z) dx.z -= L.z;
while (dx.x < -hL.x) dx.x += L.x;
while (dx.y < -hL.y) dx.y += L.y;
while (dx.z < -hL.z) dx.z += L.z;
}
float r2 = glm::dot(dx, dx);
float r6 = r2 * r2 *r2;
float r12 = r6 * r6;
if (r2 < dcut2) {
dx *= (A * 1. / r12 + B * 1. / r6) / r2;
h_force[i] -= dx;
h_force[j] += dx;
pe += C * 1. / r12 + D * 1. / r6;
}
}
}
for (int i = 0; i < nall; i++) {
float a = (float)unitDistrib(rng);
float b = (float)unitDistrib(rng);
float c = (float)unitDistrib(rng);
glm::vec3 w(a, b, c);
w *= coef;
h_force[i] += -m * tau * h_vel[i] + w;
}
}
__global__ void kernUpdatePos(int n, glm::vec3 *pos, glm::vec3 L) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid > n) return;
while (pos[tid].x > L.x) pos[tid].x -= L.x;
while (pos[tid].y > L.y) pos[tid].y -= L.y;
while (pos[tid].z > L.z) pos[tid].z -= L.z;
while (pos[tid].x < 0) pos[tid].x += L.x;
while (pos[tid].y < 0) pos[tid].y += L.y;
while (pos[tid].z < 0) pos[tid].z += L.z;
}
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
int xx = ((x % gridResolution) + gridResolution) % gridResolution;
int yy = ((y % gridResolution) + gridResolution) % gridResolution;
int zz = ((z % gridResolution) + gridResolution) % gridResolution;
return xx + yy * gridResolution + zz * gridResolution * gridResolution;
}
__device__ int computerGridID(int gridResolution, glm::vec3 &gridMin,
glm::vec3 *m_pos, float inverseCellWidth) {
int x, y, z;
glm::vec3 index_vec = (*m_pos - gridMin) * inverseCellWidth;
x = static_cast<int>(index_vec.x);
y = static_cast<int>(index_vec.y);
z = static_cast<int>(index_vec.z);
int a = gridIndex3Dto1D(x, y, z, gridResolution);
return a;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
int boidIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (boidIndex < N)
{
int grid_index = computerGridID(gridResolution, gridMin, &pos[boidIndex], inverseCellWidth);
indices[boidIndex] = boidIndex;
gridIndices[boidIndex] = grid_index;
}
}
__global__ void kernReorderPos(int N, glm::vec3 *pos_reorder, glm::vec3 *pos, int *particleArrayIndices, int mark)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
{
pos_reorder[index] = pos[particleArrayIndices[index]];
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N - 1) // don't process when index equals N - 1
return;
if (index == 0)
{
gridCellStartIndices[particleGridIndices[0]] = 0;
/*printf("%d %d\n", index, particleGridIndices[0]);*/
}
else if (index == N - 1)
gridCellEndIndices[particleGridIndices[N - 1]] = N - 1;
else if (particleGridIndices[index] != particleGridIndices[index + 1])
{
gridCellStartIndices[particleGridIndices[index + 1]] = index + 1;
gridCellEndIndices[particleGridIndices[index]] = index;
}
}
__device__ void computeNeighbors(int *cells, glm::vec3 &m_pos, glm::vec3 &gridMin,
float inverseCellWidth, int gridResolution)
{
glm::vec3 index_vec = (m_pos - gridMin) * inverseCellWidth;
int x, y, z;
x = (int)index_vec.x;
y = (int)index_vec.y;
z = (int)index_vec.z;
glm::vec3 offset = index_vec - glm::vec3(x, y, z);
int dx, dy, dz;
dx = offset.x >= 0.5f ? 1 : -1;
dy = offset.y >= 0.5f ? 1 : -1;
dz = offset.z >= 0.5f ? 1 : -1;
cells[0] = gridIndex3Dto1D(x, y, z, gridResolution);
cells[7] = gridIndex3Dto1D(x + dx, y + dy, z + dz, gridResolution);
cells[1] = gridIndex3Dto1D(x + dx, y, z, gridResolution);
cells[2] = gridIndex3Dto1D(x, y + dy, z, gridResolution);
cells[3] = gridIndex3Dto1D(x, y, z + dz, gridResolution);
cells[4] = gridIndex3Dto1D(x + dx, y + dy, z, gridResolution);
cells[5] = gridIndex3Dto1D(x, y + dy, z + dz, gridResolution);
cells[6] = gridIndex3Dto1D(x + dx, y, z + dz, gridResolution);
}
__global__ void kernCoherentforce(int n, glm::vec3 *pos, glm::vec3 *force, glm::vec3 *vel, float coef, glm::vec3 hL, glm::vec3 L,
float dcut2, float A, float B, float C, float D, float m, float tau, float *pe, curandState_t* states,
int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid > n) return;
glm::vec3 m_pos = pos[tid];
int cells[8];
computeNeighbors(cells, m_pos, gridMin, inverseCellWidth, gridResolution);
#pragma unroll
for (int i = 0; i < 8; i++) {
int gridCellCount = gridResolution * gridResolution * gridResolution;
if (cells[i] < 0 || cells[i] >= gridCellCount) continue;
int start_idx = gridCellStartIndices[cells[i]];
int end_idx = gridCellEndIndices[cells[i]];
for (int j = start_idx; j <= end_idx; j++) {
if (j == tid) continue;
glm::vec3 dx = pos[j] - pos[tid];
while (dx.x > hL.x) dx.x -= L.x;
while (dx.y > hL.y) dx.y -= L.y;
while (dx.z > hL.z) dx.z -= L.z;
while (-dx.x > hL.x) dx.x += L.x;
while (-dx.y > hL.y) dx.y += L.y;
while (-dx.z > hL.z) dx.z += L.z;
float r2 = glm::dot(dx, dx);
float r6 = r2 * r2 *r2;
float r12 = r6 * r6;
if (r2 < dcut2) {
dx *= (A * 1. / r12 + B * 1. / r6) / r2;
force[tid] -= dx;
atomicAdd(pe, C * 1. / r12 + D * 1. / r6);
}
}
}
__syncthreads();
float wx = curand_uniform(&(states[tid])) - 0.5f;
float wy = curand_uniform(&(states[tid])) - 0.5f;
float wz = curand_uniform(&(states[tid])) - 0.5f;
glm::vec3 w = glm::vec3(wx, wy, wz);
force[tid] += -m * tau * vel[tid] + coef * w;
}
void coherentForce(int k)
{
int dimThreads = threads;
int dimBlocks = (gridCellCount + threads - 1) / threads;
kernResetIntBuffer << <dimBlocks, dimThreads >> >(gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <dimBlocks, dimThreads >> >(gridCellCount, dev_gridCellEndIndices, -2);
dimBlocks = (nall + dimThreads - 1) / dimThreads;
kernComputeIndices << <dimBlocks, dimThreads >> >(nall, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + nall, dev_thrust_particleArrayIndices);
kernReorderPos << <dimBlocks, dimThreads >> > (nall, dev_pos_reorder, dev_pos, dev_particleArrayIndices, 1);
kernReorderPos << <dimBlocks, dimThreads >> > (nall, dev_vel_reorder, dev_vel, dev_particleArrayIndices, 2);
kernIdentifyCellStartEnd << <dimBlocks, dimThreads >> >(nall, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
cudaMemset(dev_force, 0.0f, sizeof(glm::vec3) * nall);
cudaMemset(dev_pe, 0.0f, sizeof(float));
float TT = T0 + (Tt - T0) / (N - 1) * k;
float coef = sqrt(24. * tau * m * kB * TT / dt);
kernCoherentforce << <dimBlocks, dimThreads >> > (nall, dev_pos_reorder, dev_force, dev_vel_reorder, coef, hL, L, dcut2, A, B, C, D, m, tau, dev_pe, states,
gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices);
glm::vec3 *tmp = dev_vel;
dev_vel = dev_vel_reorder;
dev_vel_reorder = tmp;
tmp = dev_pos;
dev_pos = dev_pos_reorder;
dev_pos_reorder = tmp;
//output to dev_force, so next hdt to compute vel, need to use dev_vel_reorder. and use dev_pos_reorder for next iteration.
}
__global__ void kernInitRandom(int n, unsigned int seed, curandState_t* states)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) return;
curand_init(seed, tid, 0, &states[tid]);
}
void MD::MD_init(int ratio, int cellsize)
{
//init variables
ncell[0] = (float)cellsize;
ncell[1] = (float)cellsize;
ncell[2] = (float)cellsize;
sigma = 1.f / ratio;
float sigma3 = sigma * sigma * sigma;
float sigma6 = sigma3 * sigma3;
float sigma12 = sigma6 * sigma6;
A = 48. * epsilon * sigma12;
B = -24. * epsilon * sigma6;
C = 4. * epsilon * sigma12;
D = -4. * epsilon * sigma6;
cudaEventCreate(&startSim);
cudaEventCreate(&endSim);
for (int i = 0; i < 3; i++) nall *= ncell[i];
int dimThreads = threads;
int dimBlocks = (nall + dimThreads - 1) / (dimThreads);
cudaMalloc((void**)&dev_pe, sizeof(float));
cudaMalloc((void**)&dev_pos, nall * sizeof(glm::vec3));
cudaMalloc((void**)&dev_vel, nall * sizeof(glm::vec3));
cudaMalloc((void**)&dev_force, nall * sizeof(glm::vec3));
cudaMalloc((void**)&ke_idata, nall * sizeof(glm::vec3));
cudaMalloc((void**)&ke_odata, dimBlocks * sizeof(glm::vec3));
cudaMalloc((void**)&states, nall * sizeof(curandState_t));
cudaMalloc((void **)&dev_pos_reorder, nall * sizeof(glm::vec3));
cudaMalloc((void **)&dev_vel_reorder, nall * sizeof(glm::vec3));
cudaMalloc((void **)&dev_force_reorder, nall * sizeof(glm::vec3));
kernInitRandom << <dimBlocks, dimThreads >> > (nall, 0, states);
gridCellWidth = 2.0f * dcut;
gridSideCount = (int)(L.x / gridCellWidth) + 1;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
gridMinimum.x = 0.f;
gridMinimum.y = 0.f;
gridMinimum.z = 0.f;
cudaMalloc((void**)&dev_particleGridIndices, nall * sizeof(int));
cudaMalloc((void**)&dev_particleArrayIndices, nall * sizeof(int));
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
h_pos = new glm::vec3[nall];
h_vel = new glm::vec3[nall];
h_force = new glm::vec3[nall];
//init cell and pos
float **cell = nullptr;
M->create(cell, 4, 3, "cell");
cell[0][0] = cell[0][1] = cell[0][2] = 0.0;
cell[1][0] = 0.0; cell[1][1] = 0.5 * a0[1]; cell[1][2] = 0.5 * a0[2];
cell[2][0] = 0.5 * a0[0]; cell[2][1] = 0.0; cell[2][2] = 0.5 * a0[2];
cell[3][0] = 0.5 * a0[0]; cell[3][1] = 0.5 * a0[1]; cell[3][2] = 0.0;
int ii = 0;
for (int ix = 0; ix < ncell[0]; ix++) {
for (int iy = 0; iy < ncell[1]; iy++) {
for (int iz = 0; iz < ncell[2]; iz++) {
for (int iu = 0; iu < nunit; iu++) {
h_pos[ii].x = float(ix) * a0[0] + cell[iu][0];
h_pos[ii].y = float(iy) * a0[1] + cell[iu][1];
h_pos[ii].z = float(iz) * a0[2] + cell[iu][2];
++ii;
}
}
}
}
cudaMemcpy(dev_pos, h_pos, nall * sizeof(glm::vec3), cudaMemcpyHostToDevice);
M->destroy(cell);
//////////////////////////////
kernInitVel << <dimBlocks, dimThreads >> > (nall, dev_vel, states);
glm::vec3 *vel_odata = nullptr;
cudaMalloc((void**)&vel_odata, dimBlocks * sizeof(glm::vec3));
glm::vec3 mon = vel_reduce_wrapper(dev_vel, vel_odata, nall);
cudaFree(vel_odata);
mon /= float(nall);
kernVelMinus << <dimBlocks, dimThreads >> >(nall, dev_vel, mon);
kernComputeDotProduct << <dimBlocks, dimThreads >> > (nall, dev_vel, ke_idata);
ke = reduce_energy_wrapper(ke_idata, ke_odata, nall);
ke *= 0.5 * m; T = ke / (1.5 * float(nall) * kB);
float gamma = sqrt(T0 / T);
ke = 0.0;
kernVelMultiply << <dimBlocks, dimThreads >> > (nall, dev_vel, gamma);
kernComputeDotProduct << <dimBlocks, dimThreads >> > (nall, dev_vel, ke_idata);
ke = reduce_energy_wrapper(ke_idata, ke_odata, nall);
ke *= 0.5 * m;
T = ke / (1.5 * float(nall) * kB);
cudaMemset(dev_force, 0, sizeof(glm::vec3) * nall);
cudaMemset(dev_pe, 0, sizeof(float));
float coef = sqrt(24. * tau * m * kB * T0 / dt);
kernNaiveForce << <dimBlocks, dimThreads >> >(nall, dev_pos, dev_force, dev_vel, coef, hL, L, dcut2, A, B, C, D, m, tau, dev_pe, states);
cudaMemcpy(&pe, dev_pe, sizeof(float), cudaMemcpyDeviceToHost);
printf("ke = %f pe = %f T = %f\n", ke, pe, T);
}
void MD::MD_free()
{
cudaFree(dev_pe);
cudaFree(dev_force);
cudaFree(dev_vel);
cudaFree(dev_pos);
cudaFree(dev_force_reorder);
cudaFree(dev_vel_reorder);
cudaFree(dev_pos_reorder);
cudaFree(states);
cudaFree(ke_idata);
cudaFree(ke_odata);
cudaFree(dev_particleGridIndices);
cudaFree(dev_particleArrayIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
delete[] h_force;
delete[] h_pos;
delete[] h_vel;
delete M;
delete rnd;
}
void MD::MD_run()
{
for (int i = 1; i < N; i++) {
MD_Loop(i);
}
}
void stepCoherent(int k) {
int dimThreads = threads;
int dimBlocks = (nall + dimThreads - 1) / dimThreads;
cudaEventRecord(startSim);
for (int i = 0; i < nstep; i++) {
kernNaiveVelocityIntegration << <dimBlocks, dimThreads >> > (nall, dev_vel, dev_force, inv_m, hdt);
kernNaivePositionIntegration << <dimBlocks, dimThreads >> > (nall, dev_pos, dev_vel, dt);
kernUpdatePos << <dimBlocks, dimThreads >> > (nall, dev_pos, L);
coherentForce(k);
kernNaiveVelocityIntegration << <dimBlocks, dimThreads >> > (nall, dev_vel, dev_force, inv_m, hdt);
if (i % ifreq == 0) {
float TT = T0 + (Tt - T0) / (N - 1) * k;
float coef = sqrt(24. * tau * m * kB * TT / dt);
kernComputeDotProduct << <dimBlocks, dimThreads >> > (nall, dev_vel, ke_idata);
ke = reduce_energy_wrapper(ke_idata, ke_odata, nall);
ke *= 0.5 * m; t += dt;
T = ke / (1.5 * float(nall) * kB);
cudaMemcpy(&pe, dev_pe, sizeof(float), cudaMemcpyDeviceToHost);
printf("step %d ke %f pe %f T %f TT %f coef %f\n", i, ke, pe / 2.0, T, TT, coef);
}
}
cudaEventRecord(endSim);
cudaEventSynchronize(endSim);
float timeElapsedMilliseconds = 0.0f;
cudaEventElapsedTime(&timeElapsedMilliseconds, startSim, endSim);
printf("time run: %f\n", timeElapsedMilliseconds);
}
void stepNaive(int k) {
int dimThreads = threads;
int dimBlocks = (nall + dimThreads - 1) / (dimThreads);
cudaEventRecord(startSim);
for (int i = 0; i < nstep; i++) {
kernNaiveVelocityIntegration << <dimBlocks, dimThreads >> > (nall, dev_vel, dev_force, inv_m, hdt);
kernNaivePositionIntegration << <dimBlocks, dimThreads >> > (nall, dev_pos, dev_vel, dt);
cudaMemset(dev_force, 0.0f, sizeof(glm::vec3) * nall);
cudaMemset(dev_pe, 0.0f, sizeof(float));
float TT = T0 + (Tt - T0) / (N - 1) * k;
float coef = sqrt(24. * tau * m * kB * TT / dt);
kernNaiveForce << <dimBlocks, dimThreads >> > (nall, dev_pos, dev_force, dev_vel,
coef, hL, L, dcut2, A, B, C, D, m, tau, dev_pe, states);
kernNaiveVelocityIntegration << <dimBlocks, dimThreads >> > (nall, dev_vel, dev_force, inv_m, hdt);
if (i % ifreq == 0) {
kernComputeDotProduct << <dimBlocks, dimThreads >> > (nall, dev_vel, ke_idata);
ke = reduce_energy_wrapper(ke_idata, ke_odata, nall);
ke *= 0.5 * m; t += dt;
T = ke / (1.5 * float(nall) * kB);
cudaMemcpy(&pe, dev_pe, sizeof(float), cudaMemcpyDeviceToHost);
printf("step %d ke %f pe %f T %f TT %f coef %f\n", i, ke, pe, T, TT, coef);
}
}
cudaEventRecord(endSim);
cudaEventSynchronize(endSim);
float timeElapsedMilliseconds = 0.0f;
cudaEventElapsedTime(&timeElapsedMilliseconds, startSim, endSim);
printf("time run: %f\n", timeElapsedMilliseconds);
}
void MD::MD_Loop(int k)
{
//stepCoherent(k);
stepNaive(k);
}
|
f5204d8522a2cce22e3250521ee47529492a5ecf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Name: Paul Talaga
Date: Nov 27, 2017
Desc: Program to add two arrays using the GPU
Only 1 thread in 1 block, so this will NOT be fast,
but it is using the GPU
To compile this, do: nvcc add-single.cu
*/
#include <iostream>
using namespace std;
// CUDA kernel function to add to arrays element by element
// This will add all elements in the array in 1 call.
__global__
void add(int size, int* x, int* y, int* z){
for(int i = 0; i < size; i++){
z[i] = x[i] + y[i];
}
}
int main(){
// Size of the arrays we'll be adding
const unsigned N = 100;
// To used unified memory (CUDA takes care of data movement)
// all memory must be allocated via the hipMallocManaged call below.
int* x;
int* y;
int* z;
hipMallocManaged(&x, N * sizeof(int));
hipMallocManaged(&y, N * sizeof(int));
hipMallocManaged(&z, N * sizeof(int));
// Fill the arrays with numbers
for(int i = 0; i < N; i++){
x[i] = i;
y[i] = 2 * i;
}
// Call the add function, with 1 block, and 1 thread
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, N, x, y, z);
// Wait until the device is done before proceeding, otherwise we'd be
// accessing x, y, and z in the loop below before the add function completes
// on the device.
hipDeviceSynchronize();
// Check to see if the math is correct
int errors = 0;
for(int i = 0; i < N; i++){
if(z[i] != x[i] + y[i]){
cout << i << " did not add correctly!" << endl;
errors++;
}
}
if(!errors)cout << "All good!" << endl;
return 0;
} | f5204d8522a2cce22e3250521ee47529492a5ecf.cu | /*
Name: Paul Talaga
Date: Nov 27, 2017
Desc: Program to add two arrays using the GPU
Only 1 thread in 1 block, so this will NOT be fast,
but it is using the GPU
To compile this, do: nvcc add-single.cu
*/
#include <iostream>
using namespace std;
// CUDA kernel function to add to arrays element by element
// This will add all elements in the array in 1 call.
__global__
void add(int size, int* x, int* y, int* z){
for(int i = 0; i < size; i++){
z[i] = x[i] + y[i];
}
}
int main(){
// Size of the arrays we'll be adding
const unsigned N = 100;
// To used unified memory (CUDA takes care of data movement)
// all memory must be allocated via the cudaMallocManaged call below.
int* x;
int* y;
int* z;
cudaMallocManaged(&x, N * sizeof(int));
cudaMallocManaged(&y, N * sizeof(int));
cudaMallocManaged(&z, N * sizeof(int));
// Fill the arrays with numbers
for(int i = 0; i < N; i++){
x[i] = i;
y[i] = 2 * i;
}
// Call the add function, with 1 block, and 1 thread
add<<<1,1>>>(N, x, y, z);
// Wait until the device is done before proceeding, otherwise we'd be
// accessing x, y, and z in the loop below before the add function completes
// on the device.
cudaDeviceSynchronize();
// Check to see if the math is correct
int errors = 0;
for(int i = 0; i < N; i++){
if(z[i] != x[i] + y[i]){
cout << i << " did not add correctly!" << endl;
errors++;
}
}
if(!errors)cout << "All good!" << endl;
return 0;
} |
75bb3036dd66c0fab03d921ff29a5ecec2fa2d97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_supp.h"
//using namespace std;
namespace Arnoldi
{
//random normal distribution
double rand_normal(double mean, double stddev)
{//Box muller method
static double n2 = 0.0;
static int n2_cached = 0;
if (!n2_cached)
{
double x, y, r;
do
{
x = 2.0*rand()/RAND_MAX - 1;
y = 2.0*rand()/RAND_MAX - 1;
r = x*x + y*y;
}
while (r == 0.0 || r > 1.0);
{
double d = sqrt(-2.0*log(r)/r);
double n1 = x*d;
n2 = y*d;
double result = n1*stddev + mean;
n2_cached = 1;
return result;
}
}
else
{
n2_cached = 0;
return n2*stddev + mean;
}
}
bool InitCUDA(int GPU_number)
{
int count = 0;
int i = 0;
hipGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no compartable device found.\n");
return false;
}
int deviceNumber=0;
int deviceNumberTemp=0;
if(count>1){
for(i = 0; i < count; i++) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, i);
printf( "#%i: %s, pci-bus id:%i %i %i \n", i, &deviceProp,deviceProp.pciBusID,deviceProp.pciDeviceID,deviceProp.pciDomainID);
}
if(GPU_number==-1){
printf("Device number for it to use>>>\n",i);
scanf("%i", &deviceNumberTemp);
}
else{
printf("Using device number %i\n",GPU_number);
deviceNumberTemp=GPU_number;
}
deviceNumber=deviceNumberTemp;
}
else{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, deviceNumber);
printf( "#%i: %s, pci-bus id:%i %i %i \n", deviceNumber, &deviceProp,deviceProp.pciBusID,deviceProp.pciDeviceID,deviceProp.pciDomainID);
printf( " using it...\n");
}
hipSetDevice(deviceNumber);
return true;
}
void device_host_real_cpy(real* device, real* host, int Nx, int Ny){
int mem_size=sizeof(real)*Nx*Ny;
hipError_t cuerr=hipMemcpy(device, host, mem_size, hipMemcpyHostToDevice);
if (cuerr != hipSuccess)
{
fprintf(stderr, "Cannot copy real array from host to device because: %s\n",
hipGetErrorString(cuerr));
exit(-1);
}
}
void host_device_real_cpy(real* host, real* device, int Nx, int Ny){
int mem_size=sizeof(real)*Nx*Ny;
hipError_t cuerr=hipMemcpy(host, device, mem_size, hipMemcpyDeviceToHost);
if (cuerr != hipSuccess)
{
printf("Cannot copy real array from device to host because: %s\n",
hipGetErrorString(cuerr));
exit(-1);
}
}
void check_for_nans(char message[], int Size, real *array){
real Array_CPU[2]={0,0};
hipError_t cuerr=hipMemcpy(Array_CPU, array, 2*sizeof(real), hipMemcpyDeviceToHost);
if (cuerr != hipSuccess)
{
printf("Cannot copy real array from device to host because: %s\n",
hipGetErrorString(cuerr));
exit(-1);
}
if(Array_CPU[0]!=Array_CPU[0]){
std::cerr << "NANS!!!";
std::cerr << message << "\n";
exit(1);
}
}
void checkError(hipblasStatus_t status, const char *msg)
{
if (status != HIPBLAS_STATUS_SUCCESS)
{
printf("%s", msg);
switch(status){
case HIPBLAS_STATUS_NOT_INITIALIZED:
printf(" the library was not initialized!\n");
break;
case HIPBLAS_STATUS_INVALID_VALUE:
printf(" the parameters m,n<0 or incx,incy=0!\n");
break;
case HIPBLAS_STATUS_ALLOC_FAILED:
printf(" the reduction buffer could not be allocated!\n");
break;
case HIPBLAS_STATUS_ARCH_MISMATCH:
printf(" the device does not support double-precision!\n");
break;
case HIPBLAS_STATUS_MAPPING_ERROR:
printf(" An access to GPU memory space failed.!\n");
break;
case HIPBLAS_STATUS_EXECUTION_FAILED:
printf(" the function failed to launch on the GPU!\n");
break;
case HIPBLAS_STATUS_INTERNAL_ERROR:
printf(" An internal cuBLAS operation failed. This error is usually caused by a hipMemcpyAsync() failure!\n");
break;
default:
printf(" Unknown error!\n");
break;
}
exit(EXIT_FAILURE);
}
}
void vectors_add_GPU(hipblasHandle_t handle, int N, real alpha, real *x, real *y){
/*
hipblasStatus_t hipblasSaxpy(hipblasHandle_t handle, int n,
const float *alpha,
const float *x, int incx,
float *y, int incy)
hipblasStatus_t hipblasDaxpy(hipblasHandle_t handle, int n,
const double *alpha,
const double *x, int incx,
double *y, int incy)
This function multiplies the vector x by the scalar and adds it to the vector y overwriting the latest vector with the result.
*/
hipblasStatus_t ret;
#ifdef real_float
ret=hipblasSaxpy(handle, N, &alpha, x, 1, y, 1);
#endif
#ifdef real_double
ret=hipblasDaxpy(handle, N, &alpha, x, 1, y, 1);
#endif
checkError(ret, " vectors_add_GPU(). ");
}
void normalize_vector_GPU(hipblasHandle_t handle, int N, real *x){
/*
hipblasStatus_t hipblasSscal(hipblasHandle_t handle, int n,
const float *alpha,
float *x, int incx)
hipblasStatus_t hipblasDscal(hipblasHandle_t handle, int n,
const double *alpha,
double *x, int incx)
This function scales the vector x by the scalar and overwrites it with the result.
*/
hipblasStatus_t ret;
real norm2=0.0;
norm2=Arnoldi::vector_norm2_GPU(handle, N, x);
//if(norm2>1E-15){
norm2=1.0/norm2;
#ifdef real_float
ret=hipblasSscal(handle, N, &norm2, x, 1);
#endif
#ifdef real_double
ret=hipblasDscal(handle, N, &norm2, x, 1);
#endif
checkError(ret, " normalize_vector_GPU(). ");
//}
//else{
// printf("\nVector length is less than 1E-15!\n");
// exit(-1);
//}
}
real vector_norm2_GPU(hipblasHandle_t handle, int N, real *x){
/*
hipblasStatus_t hipblasSnrm2(hipblasHandle_t handle, int n,
const float *x, int incx, float *result)
hipblasStatus_t hipblasDnrm2(hipblasHandle_t handle, int n,
const double *x, int incx, double *result)
*/
hipblasStatus_t ret;
real result;
#ifdef real_float
ret=hipblasSnrm2(handle, N, x, 1, &result);
#endif
#ifdef real_double
ret=hipblasDnrm2(handle, N, x, 1, &result);
#endif
checkError(ret, " vector_norm2_GPU(). ");
return result;
}
void vector_copy_GPU(hipblasHandle_t handle, int N, real *vec_source, real *vec_dest){
/*
hipblasStatus_t hipblasScopy(hipblasHandle_t handle, int n,
const float *x, int incx,
float *y, int incy)
hipblasStatus_t hipblasDcopy(hipblasHandle_t handle, int n,
const double *x, int incx,
double *y, int incy)
This function copies the vector x into the vector y
*/
hipblasStatus_t ret;
#ifdef real_float
ret=hipblasScopy(handle, N, vec_source, 1, vec_dest, 1);
#endif
#ifdef real_double
ret=hipblasDcopy(handle, N, vec_source, 1, vec_dest, 1);
#endif
checkError(ret, " vector_copy_GPU(). ");
}
real vector_dot_product_GPU(hipblasHandle_t handle, int N, real *vec1, real *vec2){
/*
hipblasStatus_t hipblasSdot (hipblasHandle_t handle, int n,
const float *x, int incx,
const float *y, int incy,
float *result)
hipblasStatus_t hipblasDdot (hipblasHandle_t handle, int n,
const double *x, int incx,
const double *y, int incy,
double *result)
*/
hipblasStatus_t ret;
real result;
#ifdef real_float
ret=hipblasSdot(handle, N, vec1, 1, vec2, 1, &result);
#endif
#ifdef real_double
ret=hipblasDdot(handle, N, vec1, 1, vec2, 1, &result);
#endif
checkError(ret, " vector_dot_product_GPU(). ");
return result;
}
void matrixMultVector_GPU(hipblasHandle_t handle, int RowA, real *A, int ColA, real alpha, real *x, real beta, real *res){ // res=*A*x+*res){
hipblasStatus_t ret;
/*
hipblasStatus_t hipblasSgemv(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n,
const float *alpha,
const float *A, int lda,
const float *x, int incx,
const float *beta,
float *y, int incy)
hipblasStatus_t hipblasDgemv(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n,
const double *alpha,
const double *A, int lda,
const double *x, int incx,
const double *beta,
double *y, int incy)
This function performs the matrix-vector multiplication
y = op ( A ) x + y
where A is a m n matrix stored in column-major format, x and y are vectors, and and are scalars. Also, for matrix A
A if transa == HIPBLAS_OP_N
A^T if transa == HIPBLAS_OP_T
A^H if transa == CUBLAS_OP_H
handle input handle to the cuBLAS library context.
trans input operation op(A) that is non- or (conj.) transpose.
m input number of rows of matrix A.
n input number of columns of matrix A.
host or device input <type> scalar used for multiplication.
A device input <type> array of dimension lda x n with lda >= max(1,m) if transa==HIPBLAS_OP_N and lda x m with lda >= max(1,n) otherwise.
lda input leading dimension of two-dimensional array used to store matrix A.
x device input <type> vector with n elements if transa==HIPBLAS_OP_N and m elements otherwise.
incx input stride between consecutive elements of x.
host or device input <type> scalar used for multiplication, if beta==0 then y does not have to be a valid input.
y device in/out <type> vector with m elements if transa==HIPBLAS_OP_N and n elements otherwise.
incy input stride between consecutive elements of .y
*/
int LDA=RowA;
#ifdef real_float
ret=hipblasSgemv(handle, HIPBLAS_OP_N, RowA, ColA, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
#ifdef real_double
ret=hipblasDgemv(handle, HIPBLAS_OP_N, RowA, ColA, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
checkError(ret, " matrixMultVector_GPU(). ");
}
__global__ void set_matrix_colomn_kernel(int Row, int Col, real* matrix, real *vec, int col_number){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if((i<Row)&&(col_number<Col)){
matrix[I2(i,col_number,Row)]=vec[i];
}
}
__global__ void get_matrix_colomn_kernel(int Row, int Col, real* matrix, real *vec, int col_number){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if((i<Row)&&(col_number<Col)){
vec[i]=matrix[I2(i,col_number,Row)];
}
}
void set_matrix_colomn_GPU(int Row, int Col, real *mat, real *vec, int col_number){
dim3 threads(BLOCKSIZE);
int blocks_x=(Row+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( set_matrix_colomn_kernel), dim3(blocks), dim3(threads), 0, 0, Row, Col, mat, vec, col_number);
}
void get_matrix_colomn_GPU(int Row, int Col, real *mat, real *vec, int col_number){
dim3 threads(BLOCKSIZE);
int blocks_x=(Row+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( get_matrix_colomn_kernel), dim3(blocks), dim3(threads), 0, 0, Row, Col, mat, vec, col_number);
}
__global__ void set_vector_value_kernel(int N, real val, real *vec){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
vec[i]=val;
}
}
__global__ void set_initial_Krylov_vector_value_kernel(int N, real *vec){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
vec[i]=0.0;
}
vec[0]=0.0;
vec[1]=1.0;
vec[N/4]=1.5;
vec[N/2]=0.5;
vec[N-1]=1.0;
vec[N-4]=1.0;
}
void set_vector_value_GPU(int N, real val, real *vec){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( set_vector_value_kernel), dim3(blocks), dim3(threads), 0, 0, N, val,vec);
}
void set_initial_Krylov_vector_value_GPU(int N, real *vec){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( set_initial_Krylov_vector_value_kernel), dim3(blocks), dim3(threads), 0, 0, N, vec);
}
__global__ void set_vector_inverce_kernel(int N, real *vec){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
vec[i]=-vec[i];
}
}
void set_vector_inverce_GPU(int N, real *vec){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( set_vector_inverce_kernel), dim3(blocks), dim3(threads), 0, 0, N, vec);
}
void matrixMultVector_part_GPU(hipblasHandle_t handle, int RowA, real *A, int ColA, real alpha, real *x, int part_Cols, real beta, real *res){ // res=*A*x+*res){
hipblasStatus_t ret;
/*
hipblasStatus_t hipblasSgemv(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n,
const float *alpha,
const float *A, int lda,
const float *x, int incx,
const float *beta,
float *y, int incy)
hipblasStatus_t hipblasDgemv(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n,
const double *alpha,
const double *A, int lda,
const double *x, int incx,
const double *beta,
double *y, int incy)
This function performs the matrix-vector multiplication
y = op ( A ) x + y
where A is a m n matrix stored in column-major format, x and y are vectors, and and are scalars. Also, for matrix A
A if transa == HIPBLAS_OP_N
A^T if transa == HIPBLAS_OP_T
A^H if transa == CUBLAS_OP_H
handle input handle to the cuBLAS library context.
trans input operation op(A) that is non- or (conj.) transpose.
m input number of rows of matrix A.
n input number of columns of matrix A.
host or device input <type> scalar used for multiplication.
A device input <type> array of dimension lda x n with lda >= max(1,m) if transa==HIPBLAS_OP_N and lda x m with lda >= max(1,n) otherwise.
lda input leading dimension of two-dimensional array used to store matrix A.
x device input <type> vector with n elements if transa==HIPBLAS_OP_N and m elements otherwise.
incx input stride between consecutive elements of x.
host or device input <type> scalar used for multiplication, if beta==0 then y does not have to be a valid input.
y device in/out <type> vector with m elements if transa==HIPBLAS_OP_N and n elements otherwise.
incy input stride between consecutive elements of .y
*/
int LDA=RowA;
#ifdef real_float
ret=hipblasSgemv(handle, HIPBLAS_OP_N, RowA, part_Cols, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
#ifdef real_double
ret=hipblasDgemv(handle, HIPBLAS_OP_N, RowA, part_Cols, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
checkError(ret, " matrixMultVector_part_GPU(). ");
}
void matrixDotVector_GPU(hipblasHandle_t handle, int RowA, real *A, int ColA, real alpha, real *x, real beta, real *res){ // res=*A*x+*res){
hipblasStatus_t ret;
/*
hipblasStatus_t hipblasSgemv(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n,
const float *alpha,
const float *A, int lda,
const float *x, int incx,
const float *beta,
float *y, int incy)
hipblasStatus_t hipblasDgemv(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n,
const double *alpha,
const double *A, int lda,
const double *x, int incx,
const double *beta,
double *y, int incy)
This function performs the matrix-vector multiplication
y = op ( A ) x + y
where A is a m n matrix stored in column-major format, x and y are vectors, and and are scalars. Also, for matrix A
A if transa == HIPBLAS_OP_N
A^T if transa == HIPBLAS_OP_T
A^H if transa == CUBLAS_OP_H
handle input handle to the cuBLAS library context.
trans input operation op(A) that is non- or (conj.) transpose.
m input number of rows of matrix A.
n input number of columns of matrix A.
host or device input <type> scalar used for multiplication.
A device input <type> array of dimension lda x n with lda >= max(1,m) if transa==HIPBLAS_OP_N and lda x m with lda >= max(1,n) otherwise.
lda input leading dimension of two-dimensional array used to store matrix A.
x device input <type> vector with n elements if transa==HIPBLAS_OP_N and m elements otherwise.
incx input stride between consecutive elements of x.
host or device input <type> scalar used for multiplication, if beta==0 then y does not have to be a valid input.
y device in/out <type> vector with m elements if transa==HIPBLAS_OP_N and n elements otherwise.
incy input stride between consecutive elements of .y
*/
int LDA=RowA;
#ifdef real_float
ret=hipblasSgemv(handle, HIPBLAS_OP_T, RowA, ColA, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
#ifdef real_double
ret=hipblasDgemv(handle, HIPBLAS_OP_T, RowA, ColA, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
checkError(ret, " matrixDotVector_GPU(). ");
}
void matrixDotVector_part_GPU(hipblasHandle_t handle, int RowA, real *A, int ColA, real alpha, real *x, int part_Cols, real beta, real *res){ // res=*A*x+*res)
hipblasStatus_t ret;
/*
hipblasStatus_t hipblasSgemv(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n,
const float *alpha,
const float *A, int lda,
const float *x, int incx,
const float *beta,
float *y, int incy)
hipblasStatus_t hipblasDgemv(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n,
const double *alpha,
const double *A, int lda,
const double *x, int incx,
const double *beta,
double *y, int incy)
This function performs the matrix-vector multiplication
y = op ( A ) x + y
where A is a m n matrix stored in column-major format, x and y are vectors, and and are scalars. Also, for matrix A
A if transa == HIPBLAS_OP_N
A^T if transa == HIPBLAS_OP_T
A^H if transa == CUBLAS_OP_H
handle input handle to the cuBLAS library context.
trans input operation op(A) that is non- or (conj.) transpose.
m input number of rows of matrix A.
n input number of columns of matrix A.
host or device input <type> scalar used for multiplication.
A device input <type> array of dimension lda x n with lda >= max(1,m) if transa==HIPBLAS_OP_N and lda x m with lda >= max(1,n) otherwise.
lda input leading dimension of two-dimensional array used to store matrix A.
x device input <type> vector with n elements if transa==HIPBLAS_OP_N and m elements otherwise.
incx input stride between consecutive elements of x.
host or device input <type> scalar used for multiplication, if beta==0 then y does not have to be a valid input.
y device in/out <type> vector with m elements if transa==HIPBLAS_OP_N and n elements otherwise.
incy input stride between consecutive elements of .y
*/
int LDA=RowA;
#ifdef real_float
ret=hipblasSgemv(handle, HIPBLAS_OP_T, RowA, part_Cols, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
#ifdef real_double
ret=hipblasDgemv(handle, HIPBLAS_OP_T, RowA, part_Cols, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
checkError(ret, " matrixDotVector_part_GPU(). ");
}
void matrixMultMatrix_GPU(hipblasHandle_t handle, int RowAC, int ColBC, int ColA, real *A, real alpha, real *B, real beta, real *C){
// C = op ( A ) op ( B ) + C
/*
hipblasStatus_t hipblasSgemm(hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const float *alpha,
const float *A, int lda,
const float *B, int ldb,
const float *beta,
float *C, int ldc)
hipblasStatus_t hipblasDgemm(hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const double *alpha,
const double *A, int lda,
const double *B, int ldb,
const double *beta,
double *C, int ldc)
This function performs the matrix-matrix multiplication
C = op ( A ) op ( B ) + C
where and are scalars, and A , B and C are matrices stored in column-major format with dimensions op ( A ) m k , op ( B ) k n and C m n , respectively. Also, for matrix A
op ( A ) = A if transa == HIPBLAS_OP_N
A^T if transa == HIPBLAS_OP_T
A^H if transa == HIPBLAS_OP_C
and op ( B ) is defined similarly for matrix B
handle input handle to the cuBLAS library context.
transa input operation op(A) that is non- or (conj.) transpose.
transb input operation op(B) that is non- or (conj.) transpose.
m input number of rows of matrix op(A) and C.
n input number of columns of matrix op(B) and C.
k input number of columns of op(A) and rows of op(B).
alpha host or device input <type> scalar used for multiplication.
A device input <type> array of dimensions lda x k with lda>=max(1,m) if transa == HIPBLAS_OP_N and lda x m with lda>=max(1,k) otherwise.
lda input leading dimension of two-dimensional array used to store the matrix A.
B device input <type> array of dimension ldb x n with ldb>=max(1,k) if transa == HIPBLAS_OP_N and ldb x k with ldb>=max(1,n) otherwise.
ldb input leading dimension of two-dimensional array used to store matrix B.
beta host or device input <type> scalar used for multiplication. If beta==0, C does not have to be a valid input.
C device in/out <type> array of dimensions ldc x n with ldc>=max(1,m).
ldc input leading dimension of a two-dimensional array used to store the matrix C.
*/
int LDA=RowAC;
int LDB=ColA;
int LDC=RowAC;
hipblasStatus_t ret;
#ifdef real_float
ret=hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
RowAC, ColBC, ColA,
&alpha, A, LDA,
B, LDB,
&beta,
C, LDC);
#endif
#ifdef real_double
ret=hipblasDgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
RowAC, ColBC, ColA,
&alpha, A, LDA,
B, LDB,
&beta,
C, LDC);
#endif
checkError(ret, " matrixMultMatrix_GPU(). ");
}
void matrixTMultMatrix_GPU(hipblasHandle_t handle, int RowAC, int ColBC, int ColA, real *A, real alpha, real *B, real beta, real *C){
// C = op ( A ) op ( B ) + C
/*
hipblasStatus_t hipblasSgemm(hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const float *alpha,
const float *A, int lda,
const float *B, int ldb,
const float *beta,
float *C, int ldc)
hipblasStatus_t hipblasDgemm(hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const double *alpha,
const double *A, int lda,
const double *B, int ldb,
const double *beta,
double *C, int ldc)
This function performs the matrix-matrix multiplication
C = op ( A ) op ( B ) + C
where and are scalars, and A , B and C are matrices stored in column-major format with dimensions op ( A ) m k , op ( B ) k n and C m n , respectively. Also, for matrix A
op ( A ) = A if transa == HIPBLAS_OP_N
A^T if transa == HIPBLAS_OP_T
A^H if transa == HIPBLAS_OP_C
and op ( B ) is defined similarly for matrix B
handle input handle to the cuBLAS library context.
transa input operation op(A) that is non- or (conj.) transpose.
transb input operation op(B) that is non- or (conj.) transpose.
m input number of rows of matrix op(A) and C.
n input number of columns of matrix op(B) and C.
k input number of columns of op(A) and rows of op(B).
alpha host or device input <type> scalar used for multiplication.
A device input <type> array of dimensions lda x k with lda>=max(1,m) if transa == HIPBLAS_OP_N and lda x m with lda>=max(1,k) otherwise.
lda input leading dimension of two-dimensional array used to store the matrix A.
B device input <type> array of dimension ldb x n with ldb>=max(1,k) if transa == HIPBLAS_OP_N and ldb x k with ldb>=max(1,n) otherwise.
ldb input leading dimension of two-dimensional array used to store matrix B.
beta host or device input <type> scalar used for multiplication. If beta==0, C does not have to be a valid input.
C device in/out <type> array of dimensions ldc x n with ldc>=max(1,m).
ldc input leading dimension of a two-dimensional array used to store the matrix C.
*/
int LDA=ColA;
int LDB=ColA;
int LDC=RowAC;
hipblasStatus_t ret;
#ifdef real_float
ret=hipblasSgemm(handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
RowAC, ColBC, ColA,
&alpha, A, LDA,
B, LDB,
&beta,
C, LDC);
#endif
#ifdef real_double
ret=hipblasDgemm(handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
RowAC, ColBC, ColA,
&alpha, A, LDA,
B, LDB,
&beta,
C, LDC);
#endif
checkError(ret, " matrixTMultMatrix_GPU(). ");
}
void matrixMultComplexMatrix_GPU(hipblasHandle_t handle, int RowAC, int ColBC, int ColA, cublasComplex *A, cublasComplex *B, cublasComplex *C){ // C = op ( A ) op ( B ) + C
/*
hipblasStatus_t hipblasSgemm(hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const float *alpha,
const float *A, int lda,
const float *B, int ldb,
const float *beta,
float *C, int ldc)
hipblasStatus_t hipblasDgemm(hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const double *alpha,
const double *A, int lda,
const double *B, int ldb,
const double *beta,
double *C, int ldc)
This function performs the matrix-matrix multiplication
C = op ( A ) op ( B ) + C
where and are scalars, and A , B and C are matrices stored in column-major format with dimensions op ( A ) m k , op ( B ) k n and C m n , respectively. Also, for matrix A
op ( A ) = A if transa == HIPBLAS_OP_N
A^T if transa == HIPBLAS_OP_T
A^H if transa == HIPBLAS_OP_C
and op ( B ) is defined similarly for matrix B
handle input handle to the cuBLAS library context.
transa input operation op(A) that is non- or (conj.) transpose.
transb input operation op(B) that is non- or (conj.) transpose.
m input number of rows of matrix op(A) and C.
n input number of columns of matrix op(B) and C.
k input number of columns of op(A) and rows of op(B).
alpha host or device input <type> scalar used for multiplication.
A device input <type> array of dimensions lda x k with lda>=max(1,m) if transa == HIPBLAS_OP_N and lda x m with lda>=max(1,k) otherwise.
lda input leading dimension of two-dimensional array used to store the matrix A.
B device input <type> array of dimension ldb x n with ldb>=max(1,k) if transa == HIPBLAS_OP_N and ldb x k with ldb>=max(1,n) otherwise.
ldb input leading dimension of two-dimensional array used to store matrix B.
beta host or device input <type> scalar used for multiplication. If beta==0, C does not have to be a valid input.
C device in/out <type> array of dimensions ldc x n with ldc>=max(1,m).
ldc input leading dimension of a two-dimensional array used to store the matrix C.
*/
int LDA=RowAC;
int LDB=ColA;
int LDC=RowAC;
hipblasStatus_t ret;
cublasComplex alpha;
cublasComplex beta;
#ifdef real_float
alpha=make_cuComplex(1.0, 0.0);
beta=make_cuComplex(0.0, 0.0);
ret=hipblasCgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
RowAC, ColBC, ColA,
&alpha, A, LDA,
B, LDB,
&beta,
C, LDC);
#endif
#ifdef real_double
alpha=make_cuDoubleComplex(1.0, 0.0);
beta=make_cuDoubleComplex(0.0, 0.0);
ret=hipblasZgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
RowAC, ColBC, ColA,
&alpha, A, LDA,
B, LDB,
&beta,
C, LDC);
#endif
checkError(ret, " matrixMultComplexMatrix_GPU(). ");
}
//namespace!
}
| 75bb3036dd66c0fab03d921ff29a5ecec2fa2d97.cu | #include "cuda_supp.h"
//using namespace std;
namespace Arnoldi
{
//random normal distribution
double rand_normal(double mean, double stddev)
{//Box muller method
static double n2 = 0.0;
static int n2_cached = 0;
if (!n2_cached)
{
double x, y, r;
do
{
x = 2.0*rand()/RAND_MAX - 1;
y = 2.0*rand()/RAND_MAX - 1;
r = x*x + y*y;
}
while (r == 0.0 || r > 1.0);
{
double d = sqrt(-2.0*log(r)/r);
double n1 = x*d;
n2 = y*d;
double result = n1*stddev + mean;
n2_cached = 1;
return result;
}
}
else
{
n2_cached = 0;
return n2*stddev + mean;
}
}
bool InitCUDA(int GPU_number)
{
int count = 0;
int i = 0;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no compartable device found.\n");
return false;
}
int deviceNumber=0;
int deviceNumberTemp=0;
if(count>1){
for(i = 0; i < count; i++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, i);
printf( "#%i: %s, pci-bus id:%i %i %i \n", i, &deviceProp,deviceProp.pciBusID,deviceProp.pciDeviceID,deviceProp.pciDomainID);
}
if(GPU_number==-1){
printf("Device number for it to use>>>\n",i);
scanf("%i", &deviceNumberTemp);
}
else{
printf("Using device number %i\n",GPU_number);
deviceNumberTemp=GPU_number;
}
deviceNumber=deviceNumberTemp;
}
else{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, deviceNumber);
printf( "#%i: %s, pci-bus id:%i %i %i \n", deviceNumber, &deviceProp,deviceProp.pciBusID,deviceProp.pciDeviceID,deviceProp.pciDomainID);
printf( " using it...\n");
}
cudaSetDevice(deviceNumber);
return true;
}
void device_host_real_cpy(real* device, real* host, int Nx, int Ny){
int mem_size=sizeof(real)*Nx*Ny;
cudaError_t cuerr=cudaMemcpy(device, host, mem_size, cudaMemcpyHostToDevice);
if (cuerr != cudaSuccess)
{
fprintf(stderr, "Cannot copy real array from host to device because: %s\n",
cudaGetErrorString(cuerr));
exit(-1);
}
}
void host_device_real_cpy(real* host, real* device, int Nx, int Ny){
int mem_size=sizeof(real)*Nx*Ny;
cudaError_t cuerr=cudaMemcpy(host, device, mem_size, cudaMemcpyDeviceToHost);
if (cuerr != cudaSuccess)
{
printf("Cannot copy real array from device to host because: %s\n",
cudaGetErrorString(cuerr));
exit(-1);
}
}
void check_for_nans(char message[], int Size, real *array){
real Array_CPU[2]={0,0};
cudaError_t cuerr=cudaMemcpy(Array_CPU, array, 2*sizeof(real), cudaMemcpyDeviceToHost);
if (cuerr != cudaSuccess)
{
printf("Cannot copy real array from device to host because: %s\n",
cudaGetErrorString(cuerr));
exit(-1);
}
if(Array_CPU[0]!=Array_CPU[0]){
std::cerr << "NANS!!!";
std::cerr << message << "\n";
exit(1);
}
}
void checkError(cublasStatus_t status, const char *msg)
{
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("%s", msg);
switch(status){
case CUBLAS_STATUS_NOT_INITIALIZED:
printf(" the library was not initialized!\n");
break;
case CUBLAS_STATUS_INVALID_VALUE:
printf(" the parameters m,n<0 or incx,incy=0!\n");
break;
case CUBLAS_STATUS_ALLOC_FAILED:
printf(" the reduction buffer could not be allocated!\n");
break;
case CUBLAS_STATUS_ARCH_MISMATCH:
printf(" the device does not support double-precision!\n");
break;
case CUBLAS_STATUS_MAPPING_ERROR:
printf(" An access to GPU memory space failed.!\n");
break;
case CUBLAS_STATUS_EXECUTION_FAILED:
printf(" the function failed to launch on the GPU!\n");
break;
case CUBLAS_STATUS_INTERNAL_ERROR:
printf(" An internal cuBLAS operation failed. This error is usually caused by a cudaMemcpyAsync() failure!\n");
break;
default:
printf(" Unknown error!\n");
break;
}
exit(EXIT_FAILURE);
}
}
void vectors_add_GPU(cublasHandle_t handle, int N, real alpha, real *x, real *y){
/*
cublasStatus_t cublasSaxpy(cublasHandle_t handle, int n,
const float *alpha,
const float *x, int incx,
float *y, int incy)
cublasStatus_t cublasDaxpy(cublasHandle_t handle, int n,
const double *alpha,
const double *x, int incx,
double *y, int incy)
This function multiplies the vector x by the scalar α and adds it to the vector y overwriting the latest vector with the result.
*/
cublasStatus_t ret;
#ifdef real_float
ret=cublasSaxpy(handle, N, &alpha, x, 1, y, 1);
#endif
#ifdef real_double
ret=cublasDaxpy(handle, N, &alpha, x, 1, y, 1);
#endif
checkError(ret, " vectors_add_GPU(). ");
}
void normalize_vector_GPU(cublasHandle_t handle, int N, real *x){
/*
cublasStatus_t cublasSscal(cublasHandle_t handle, int n,
const float *alpha,
float *x, int incx)
cublasStatus_t cublasDscal(cublasHandle_t handle, int n,
const double *alpha,
double *x, int incx)
This function scales the vector x by the scalar α and overwrites it with the result.
*/
cublasStatus_t ret;
real norm2=0.0;
norm2=Arnoldi::vector_norm2_GPU(handle, N, x);
//if(norm2>1E-15){
norm2=1.0/norm2;
#ifdef real_float
ret=cublasSscal(handle, N, &norm2, x, 1);
#endif
#ifdef real_double
ret=cublasDscal(handle, N, &norm2, x, 1);
#endif
checkError(ret, " normalize_vector_GPU(). ");
//}
//else{
// printf("\nVector length is less than 1E-15!\n");
// exit(-1);
//}
}
real vector_norm2_GPU(cublasHandle_t handle, int N, real *x){
/*
cublasStatus_t cublasSnrm2(cublasHandle_t handle, int n,
const float *x, int incx, float *result)
cublasStatus_t cublasDnrm2(cublasHandle_t handle, int n,
const double *x, int incx, double *result)
*/
cublasStatus_t ret;
real result;
#ifdef real_float
ret=cublasSnrm2(handle, N, x, 1, &result);
#endif
#ifdef real_double
ret=cublasDnrm2(handle, N, x, 1, &result);
#endif
checkError(ret, " vector_norm2_GPU(). ");
return result;
}
void vector_copy_GPU(cublasHandle_t handle, int N, real *vec_source, real *vec_dest){
/*
cublasStatus_t cublasScopy(cublasHandle_t handle, int n,
const float *x, int incx,
float *y, int incy)
cublasStatus_t cublasDcopy(cublasHandle_t handle, int n,
const double *x, int incx,
double *y, int incy)
This function copies the vector x into the vector y
*/
cublasStatus_t ret;
#ifdef real_float
ret=cublasScopy(handle, N, vec_source, 1, vec_dest, 1);
#endif
#ifdef real_double
ret=cublasDcopy(handle, N, vec_source, 1, vec_dest, 1);
#endif
checkError(ret, " vector_copy_GPU(). ");
}
real vector_dot_product_GPU(cublasHandle_t handle, int N, real *vec1, real *vec2){
/*
cublasStatus_t cublasSdot (cublasHandle_t handle, int n,
const float *x, int incx,
const float *y, int incy,
float *result)
cublasStatus_t cublasDdot (cublasHandle_t handle, int n,
const double *x, int incx,
const double *y, int incy,
double *result)
*/
cublasStatus_t ret;
real result;
#ifdef real_float
ret=cublasSdot(handle, N, vec1, 1, vec2, 1, &result);
#endif
#ifdef real_double
ret=cublasDdot(handle, N, vec1, 1, vec2, 1, &result);
#endif
checkError(ret, " vector_dot_product_GPU(). ");
return result;
}
void matrixMultVector_GPU(cublasHandle_t handle, int RowA, real *A, int ColA, real alpha, real *x, real beta, real *res){ // res=α*A*x+β*res){
cublasStatus_t ret;
/*
cublasStatus_t cublasSgemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
const float *alpha,
const float *A, int lda,
const float *x, int incx,
const float *beta,
float *y, int incy)
cublasStatus_t cublasDgemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
const double *alpha,
const double *A, int lda,
const double *x, int incx,
const double *beta,
double *y, int incy)
This function performs the matrix-vector multiplication
y = α op ( A ) x + β y
where A is a m × n matrix stored in column-major format, x and y are vectors, and α and β are scalars. Also, for matrix A
A if transa == CUBLAS_OP_N
A^T if transa == CUBLAS_OP_T
A^H if transa == CUBLAS_OP_H
handle input handle to the cuBLAS library context.
trans input operation op(A) that is non- or (conj.) transpose.
m input number of rows of matrix A.
n input number of columns of matrix A.
α host or device input <type> scalar used for multiplication.
A device input <type> array of dimension lda x n with lda >= max(1,m) if transa==CUBLAS_OP_N and lda x m with lda >= max(1,n) otherwise.
lda input leading dimension of two-dimensional array used to store matrix A.
x device input <type> vector with n elements if transa==CUBLAS_OP_N and m elements otherwise.
incx input stride between consecutive elements of x.
β host or device input <type> scalar used for multiplication, if beta==0 then y does not have to be a valid input.
y device in/out <type> vector with m elements if transa==CUBLAS_OP_N and n elements otherwise.
incy input stride between consecutive elements of .y
*/
int LDA=RowA;
#ifdef real_float
ret=cublasSgemv(handle, CUBLAS_OP_N, RowA, ColA, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
#ifdef real_double
ret=cublasDgemv(handle, CUBLAS_OP_N, RowA, ColA, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
checkError(ret, " matrixMultVector_GPU(). ");
}
__global__ void set_matrix_colomn_kernel(int Row, int Col, real* matrix, real *vec, int col_number){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if((i<Row)&&(col_number<Col)){
matrix[I2(i,col_number,Row)]=vec[i];
}
}
__global__ void get_matrix_colomn_kernel(int Row, int Col, real* matrix, real *vec, int col_number){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if((i<Row)&&(col_number<Col)){
vec[i]=matrix[I2(i,col_number,Row)];
}
}
void set_matrix_colomn_GPU(int Row, int Col, real *mat, real *vec, int col_number){
dim3 threads(BLOCKSIZE);
int blocks_x=(Row+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
set_matrix_colomn_kernel<<< blocks, threads>>>(Row, Col, mat, vec, col_number);
}
void get_matrix_colomn_GPU(int Row, int Col, real *mat, real *vec, int col_number){
dim3 threads(BLOCKSIZE);
int blocks_x=(Row+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
get_matrix_colomn_kernel<<< blocks, threads>>>(Row, Col, mat, vec, col_number);
}
__global__ void set_vector_value_kernel(int N, real val, real *vec){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
vec[i]=val;
}
}
__global__ void set_initial_Krylov_vector_value_kernel(int N, real *vec){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
vec[i]=0.0;
}
vec[0]=0.0;
vec[1]=1.0;
vec[N/4]=1.5;
vec[N/2]=0.5;
vec[N-1]=1.0;
vec[N-4]=1.0;
}
void set_vector_value_GPU(int N, real val, real *vec){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
set_vector_value_kernel<<< blocks, threads>>>(N, val,vec);
}
void set_initial_Krylov_vector_value_GPU(int N, real *vec){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
set_initial_Krylov_vector_value_kernel<<< blocks, threads>>>(N, vec);
}
__global__ void set_vector_inverce_kernel(int N, real *vec){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
vec[i]=-vec[i];
}
}
void set_vector_inverce_GPU(int N, real *vec){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
set_vector_inverce_kernel<<< blocks, threads>>>(N, vec);
}
void matrixMultVector_part_GPU(cublasHandle_t handle, int RowA, real *A, int ColA, real alpha, real *x, int part_Cols, real beta, real *res){ // res=α*A*x+β*res){
cublasStatus_t ret;
/*
cublasStatus_t cublasSgemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
const float *alpha,
const float *A, int lda,
const float *x, int incx,
const float *beta,
float *y, int incy)
cublasStatus_t cublasDgemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
const double *alpha,
const double *A, int lda,
const double *x, int incx,
const double *beta,
double *y, int incy)
This function performs the matrix-vector multiplication
y = α op ( A ) x + β y
where A is a m × n matrix stored in column-major format, x and y are vectors, and α and β are scalars. Also, for matrix A
A if transa == CUBLAS_OP_N
A^T if transa == CUBLAS_OP_T
A^H if transa == CUBLAS_OP_H
handle input handle to the cuBLAS library context.
trans input operation op(A) that is non- or (conj.) transpose.
m input number of rows of matrix A.
n input number of columns of matrix A.
α host or device input <type> scalar used for multiplication.
A device input <type> array of dimension lda x n with lda >= max(1,m) if transa==CUBLAS_OP_N and lda x m with lda >= max(1,n) otherwise.
lda input leading dimension of two-dimensional array used to store matrix A.
x device input <type> vector with n elements if transa==CUBLAS_OP_N and m elements otherwise.
incx input stride between consecutive elements of x.
β host or device input <type> scalar used for multiplication, if beta==0 then y does not have to be a valid input.
y device in/out <type> vector with m elements if transa==CUBLAS_OP_N and n elements otherwise.
incy input stride between consecutive elements of .y
*/
int LDA=RowA;
#ifdef real_float
ret=cublasSgemv(handle, CUBLAS_OP_N, RowA, part_Cols, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
#ifdef real_double
ret=cublasDgemv(handle, CUBLAS_OP_N, RowA, part_Cols, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
checkError(ret, " matrixMultVector_part_GPU(). ");
}
void matrixDotVector_GPU(cublasHandle_t handle, int RowA, real *A, int ColA, real alpha, real *x, real beta, real *res){ // res=α*A*x+β*res){
cublasStatus_t ret;
/*
cublasStatus_t cublasSgemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
const float *alpha,
const float *A, int lda,
const float *x, int incx,
const float *beta,
float *y, int incy)
cublasStatus_t cublasDgemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
const double *alpha,
const double *A, int lda,
const double *x, int incx,
const double *beta,
double *y, int incy)
This function performs the matrix-vector multiplication
y = α op ( A ) x + β y
where A is a m × n matrix stored in column-major format, x and y are vectors, and α and β are scalars. Also, for matrix A
A if transa == CUBLAS_OP_N
A^T if transa == CUBLAS_OP_T
A^H if transa == CUBLAS_OP_H
handle input handle to the cuBLAS library context.
trans input operation op(A) that is non- or (conj.) transpose.
m input number of rows of matrix A.
n input number of columns of matrix A.
α host or device input <type> scalar used for multiplication.
A device input <type> array of dimension lda x n with lda >= max(1,m) if transa==CUBLAS_OP_N and lda x m with lda >= max(1,n) otherwise.
lda input leading dimension of two-dimensional array used to store matrix A.
x device input <type> vector with n elements if transa==CUBLAS_OP_N and m elements otherwise.
incx input stride between consecutive elements of x.
β host or device input <type> scalar used for multiplication, if beta==0 then y does not have to be a valid input.
y device in/out <type> vector with m elements if transa==CUBLAS_OP_N and n elements otherwise.
incy input stride between consecutive elements of .y
*/
int LDA=RowA;
#ifdef real_float
ret=cublasSgemv(handle, CUBLAS_OP_T, RowA, ColA, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
#ifdef real_double
ret=cublasDgemv(handle, CUBLAS_OP_T, RowA, ColA, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
checkError(ret, " matrixDotVector_GPU(). ");
}
void matrixDotVector_part_GPU(cublasHandle_t handle, int RowA, real *A, int ColA, real alpha, real *x, int part_Cols, real beta, real *res){ // res=α*A*x+β*res)
cublasStatus_t ret;
/*
cublasStatus_t cublasSgemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
const float *alpha,
const float *A, int lda,
const float *x, int incx,
const float *beta,
float *y, int incy)
cublasStatus_t cublasDgemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
const double *alpha,
const double *A, int lda,
const double *x, int incx,
const double *beta,
double *y, int incy)
This function performs the matrix-vector multiplication
y = α op ( A ) x + β y
where A is a m × n matrix stored in column-major format, x and y are vectors, and α and β are scalars. Also, for matrix A
A if transa == CUBLAS_OP_N
A^T if transa == CUBLAS_OP_T
A^H if transa == CUBLAS_OP_H
handle input handle to the cuBLAS library context.
trans input operation op(A) that is non- or (conj.) transpose.
m input number of rows of matrix A.
n input number of columns of matrix A.
α host or device input <type> scalar used for multiplication.
A device input <type> array of dimension lda x n with lda >= max(1,m) if transa==CUBLAS_OP_N and lda x m with lda >= max(1,n) otherwise.
lda input leading dimension of two-dimensional array used to store matrix A.
x device input <type> vector with n elements if transa==CUBLAS_OP_N and m elements otherwise.
incx input stride between consecutive elements of x.
β host or device input <type> scalar used for multiplication, if beta==0 then y does not have to be a valid input.
y device in/out <type> vector with m elements if transa==CUBLAS_OP_N and n elements otherwise.
incy input stride between consecutive elements of .y
*/
int LDA=RowA;
#ifdef real_float
ret=cublasSgemv(handle, CUBLAS_OP_T, RowA, part_Cols, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
#ifdef real_double
ret=cublasDgemv(handle, CUBLAS_OP_T, RowA, part_Cols, &alpha, A, LDA, x, 1, &beta, res, 1);
#endif
checkError(ret, " matrixDotVector_part_GPU(). ");
}
void matrixMultMatrix_GPU(cublasHandle_t handle, int RowAC, int ColBC, int ColA, real *A, real alpha, real *B, real beta, real *C){
// C = α op ( A ) op ( B ) + β C
/*
cublasStatus_t cublasSgemm(cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const float *alpha,
const float *A, int lda,
const float *B, int ldb,
const float *beta,
float *C, int ldc)
cublasStatus_t cublasDgemm(cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const double *alpha,
const double *A, int lda,
const double *B, int ldb,
const double *beta,
double *C, int ldc)
This function performs the matrix-matrix multiplication
C = α op ( A ) op ( B ) + β C
where α and β are scalars, and A , B and C are matrices stored in column-major format with dimensions op ( A ) m × k , op ( B ) k × n and C m × n , respectively. Also, for matrix A
op ( A ) = A if transa == CUBLAS_OP_N
A^T if transa == CUBLAS_OP_T
A^H if transa == CUBLAS_OP_C
and op ( B ) is defined similarly for matrix B
handle input handle to the cuBLAS library context.
transa input operation op(A) that is non- or (conj.) transpose.
transb input operation op(B) that is non- or (conj.) transpose.
m input number of rows of matrix op(A) and C.
n input number of columns of matrix op(B) and C.
k input number of columns of op(A) and rows of op(B).
alpha host or device input <type> scalar used for multiplication.
A device input <type> array of dimensions lda x k with lda>=max(1,m) if transa == CUBLAS_OP_N and lda x m with lda>=max(1,k) otherwise.
lda input leading dimension of two-dimensional array used to store the matrix A.
B device input <type> array of dimension ldb x n with ldb>=max(1,k) if transa == CUBLAS_OP_N and ldb x k with ldb>=max(1,n) otherwise.
ldb input leading dimension of two-dimensional array used to store matrix B.
beta host or device input <type> scalar used for multiplication. If beta==0, C does not have to be a valid input.
C device in/out <type> array of dimensions ldc x n with ldc>=max(1,m).
ldc input leading dimension of a two-dimensional array used to store the matrix C.
*/
int LDA=RowAC;
int LDB=ColA;
int LDC=RowAC;
cublasStatus_t ret;
#ifdef real_float
ret=cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_N,
RowAC, ColBC, ColA,
&alpha, A, LDA,
B, LDB,
&beta,
C, LDC);
#endif
#ifdef real_double
ret=cublasDgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_N,
RowAC, ColBC, ColA,
&alpha, A, LDA,
B, LDB,
&beta,
C, LDC);
#endif
checkError(ret, " matrixMultMatrix_GPU(). ");
}
void matrixTMultMatrix_GPU(cublasHandle_t handle, int RowAC, int ColBC, int ColA, real *A, real alpha, real *B, real beta, real *C){
// C = α op ( A ) op ( B ) + β C
/*
cublasStatus_t cublasSgemm(cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const float *alpha,
const float *A, int lda,
const float *B, int ldb,
const float *beta,
float *C, int ldc)
cublasStatus_t cublasDgemm(cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const double *alpha,
const double *A, int lda,
const double *B, int ldb,
const double *beta,
double *C, int ldc)
This function performs the matrix-matrix multiplication
C = α op ( A ) op ( B ) + β C
where α and β are scalars, and A , B and C are matrices stored in column-major format with dimensions op ( A ) m × k , op ( B ) k × n and C m × n , respectively. Also, for matrix A
op ( A ) = A if transa == CUBLAS_OP_N
A^T if transa == CUBLAS_OP_T
A^H if transa == CUBLAS_OP_C
and op ( B ) is defined similarly for matrix B
handle input handle to the cuBLAS library context.
transa input operation op(A) that is non- or (conj.) transpose.
transb input operation op(B) that is non- or (conj.) transpose.
m input number of rows of matrix op(A) and C.
n input number of columns of matrix op(B) and C.
k input number of columns of op(A) and rows of op(B).
alpha host or device input <type> scalar used for multiplication.
A device input <type> array of dimensions lda x k with lda>=max(1,m) if transa == CUBLAS_OP_N and lda x m with lda>=max(1,k) otherwise.
lda input leading dimension of two-dimensional array used to store the matrix A.
B device input <type> array of dimension ldb x n with ldb>=max(1,k) if transa == CUBLAS_OP_N and ldb x k with ldb>=max(1,n) otherwise.
ldb input leading dimension of two-dimensional array used to store matrix B.
beta host or device input <type> scalar used for multiplication. If beta==0, C does not have to be a valid input.
C device in/out <type> array of dimensions ldc x n with ldc>=max(1,m).
ldc input leading dimension of a two-dimensional array used to store the matrix C.
*/
int LDA=ColA;
int LDB=ColA;
int LDC=RowAC;
cublasStatus_t ret;
#ifdef real_float
ret=cublasSgemm(handle,
CUBLAS_OP_T, CUBLAS_OP_N,
RowAC, ColBC, ColA,
&alpha, A, LDA,
B, LDB,
&beta,
C, LDC);
#endif
#ifdef real_double
ret=cublasDgemm(handle,
CUBLAS_OP_T, CUBLAS_OP_N,
RowAC, ColBC, ColA,
&alpha, A, LDA,
B, LDB,
&beta,
C, LDC);
#endif
checkError(ret, " matrixTMultMatrix_GPU(). ");
}
void matrixMultComplexMatrix_GPU(cublasHandle_t handle, int RowAC, int ColBC, int ColA, cublasComplex *A, cublasComplex *B, cublasComplex *C){ // C = α op ( A ) op ( B ) + β C
/*
cublasStatus_t cublasSgemm(cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const float *alpha,
const float *A, int lda,
const float *B, int ldb,
const float *beta,
float *C, int ldc)
cublasStatus_t cublasDgemm(cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const double *alpha,
const double *A, int lda,
const double *B, int ldb,
const double *beta,
double *C, int ldc)
This function performs the matrix-matrix multiplication
C = α op ( A ) op ( B ) + β C
where α and β are scalars, and A , B and C are matrices stored in column-major format with dimensions op ( A ) m × k , op ( B ) k × n and C m × n , respectively. Also, for matrix A
op ( A ) = A if transa == CUBLAS_OP_N
A^T if transa == CUBLAS_OP_T
A^H if transa == CUBLAS_OP_C
and op ( B ) is defined similarly for matrix B
handle input handle to the cuBLAS library context.
transa input operation op(A) that is non- or (conj.) transpose.
transb input operation op(B) that is non- or (conj.) transpose.
m input number of rows of matrix op(A) and C.
n input number of columns of matrix op(B) and C.
k input number of columns of op(A) and rows of op(B).
alpha host or device input <type> scalar used for multiplication.
A device input <type> array of dimensions lda x k with lda>=max(1,m) if transa == CUBLAS_OP_N and lda x m with lda>=max(1,k) otherwise.
lda input leading dimension of two-dimensional array used to store the matrix A.
B device input <type> array of dimension ldb x n with ldb>=max(1,k) if transa == CUBLAS_OP_N and ldb x k with ldb>=max(1,n) otherwise.
ldb input leading dimension of two-dimensional array used to store matrix B.
beta host or device input <type> scalar used for multiplication. If beta==0, C does not have to be a valid input.
C device in/out <type> array of dimensions ldc x n with ldc>=max(1,m).
ldc input leading dimension of a two-dimensional array used to store the matrix C.
*/
int LDA=RowAC;
int LDB=ColA;
int LDC=RowAC;
cublasStatus_t ret;
cublasComplex alpha;
cublasComplex beta;
#ifdef real_float
alpha=make_cuComplex(1.0, 0.0);
beta=make_cuComplex(0.0, 0.0);
ret=cublasCgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_N,
RowAC, ColBC, ColA,
&alpha, A, LDA,
B, LDB,
&beta,
C, LDC);
#endif
#ifdef real_double
alpha=make_cuDoubleComplex(1.0, 0.0);
beta=make_cuDoubleComplex(0.0, 0.0);
ret=cublasZgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_N,
RowAC, ColBC, ColA,
&alpha, A, LDA,
B, LDB,
&beta,
C, LDC);
#endif
checkError(ret, " matrixMultComplexMatrix_GPU(). ");
}
//namespace!
}
|
b1f52a0edf27ca7d6a9e0e1d1f8f19fa96cc714f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "macro.h"
#include "common_hip.cuh"
#include "utilities.cuh"
#include <kat/on_device/builtins.cuh>
#include <kat/on_device/non-builtins.cuh>
#include <kat/on_device/printing.cuh>
#include <limits>
/*
To test:
T multiplication_high_bits(T x, T y);
F divide(F dividend, F divisor);
T absolute_value(T x);
T minimum(T x, T y) = delete; // don't worry, it's not really deleted for all types
T maximum(T x, T y) = delete; // don't worry, it's not really deleted for all types
template <typename T, typename S> S sum_with_absolute_difference(T x, T y, S addend);
int population_count(I x);
T bit_reverse(T x) = delete;
unsigned find_last_non_sign_bit(I x) = delete;
T load_global_with_non_coherent_cache(const T* ptr);
int count_leading_zeros(I x) = delete;
T extract(T bit_field, unsigned int start_pos, unsigned int num_bits);
T insert(T original_bit_field, T bits_to_insert, unsigned int start_pos, unsigned int num_bits);
T select_bytes(T x, T y, unsigned byte_selector);
native_word_t funnel_shift(native_word_t low_word, native_word_t high_word, native_word_t shift_amount);
typename std::conditional<Signed, int, unsigned>::type average(
typename std::conditional<Signed, int, unsigned>::type x,
typename std::conditional<Signed, int, unsigned>::type y);
unsigned special_registers::lane_index();
unsigned special_registers::symmetric_multiprocessor_index();
unsigned long long special_registers::grid_index();
unsigned int special_registers::dynamic_shared_memory_size();
unsigned int special_registers::total_shared_memory_size();
} // namespace special_registers
#if (__CUDACC_VER_MAJOR__ >= 9)
lane_mask_t ballot (int condition, lane_mask_t lane_mask = full_warp_mask);
int all_lanes_satisfy (int condition, lane_mask_t lane_mask = full_warp_mask);
int some_lanes_satisfy(int condition, lane_mask_t lane_mask = full_warp_mask);
int all_lanes_agree (int condition, lane_mask_t lane_mask = full_warp_mask);
#else
lane_mask_t ballot (int condition);
int all_lanes_satisfy (int condition);
int some_lanes_satisfy(int condition);
#endif
#if (__CUDACC_VER_MAJOR__ >= 9)
bool is_uniform_across_lanes(T value, lane_mask_t lane_mask = full_warp_mask);
bool is_uniform_across_warp(T value);
lane_mask_t matching_lanes(T value, lane_mask_t lanes = full_warp_mask);
#endif
unsigned int mask_of_lanes::preceding();
unsigned int mask_of_lanes::preceding_and_self();
unsigned int mask_of_lanes::self();
unsigned int mask_of_lanes::succeeding_and_self();
unsigned int mask_of_lanes::succeeding();
lane_mask_t mask_of_lanes::matching_value(lane_mask_t lane_mask, T value);
lane_mask_t mask_of_lanes::matching_value(T value);
int find_first_set(I x);
int count_trailing_zeros(I x) { return find_first_set<I>(x) - 1; }
int count_leading_zeros(I x);
*/
template <typename F>
void invoke_if(F, std::integral_constant<false>) { }
void invoke_if(F f, std::integral_constant<true>) { f(); }
namespace kernels {
template <typename I>
__global__ void multiplication_high_bits(
__restrict__ I* results,
const __restrict__ I* lhs,
const __restrict__ I* rhs,
size_t num_tests)
{
// Note: This kernel will only be run with one block
auto pos = threadIdx.x;
results[pos] = kat::builtins::multiplication_high_bits<I>(lhs[pos], rhs[pos]);
}
} // namespace kernels
namespace kernels {
template <typename I>
__global__ void try_out_integral_builtins(I* results, I* __restrict expected)
{
bool print_first_indices_for_each_function { false };
auto maybe_print = [&](const char* section_title) {
if (print_first_indices_for_each_function) {
printf("%-30s tests start at index %3d\n", section_title, i);
}
};
results[i] = kat::strictly_between<I>( I{ 0 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 1 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 4 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 5 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 6 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::strictly_between<I>( I{ 8 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::strictly_between<I>( I{ 9 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::strictly_between<I>( I{ 10 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 11 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 123 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
maybe_print("between_or_equal");
results[i] = kat::between_or_equal<I>( I{ 1 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::between_or_equal<I>( I{ 4 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::between_or_equal<I>( I{ 5 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 6 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 8 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 9 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 10 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 11 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::between_or_equal<I>( I{ 123 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
maybe_print("is_power_of_2");
results[i] = kat::is_power_of_2<I>(I{ 1}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{ 2}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{ 4}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{ 7}); expected[i++] = false;
results[i] = kat::is_power_of_2<I>(I{32}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{33}); expected[i++] = false;
maybe_print("modular_increment");
results[i] = kat::modular_increment<I>(I{ 0}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_increment<I>(I{ 1}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_increment<I>(I{ 0}, I{ 3}); expected[i++] = I{ 1 };
results[i] = kat::modular_increment<I>(I{ 1}, I{ 3}); expected[i++] = I{ 2 };
results[i] = kat::modular_increment<I>(I{ 2}, I{ 3}); expected[i++] = I{ 0 };
results[i] = kat::modular_increment<I>(I{ 3}, I{ 3}); expected[i++] = I{ 1 };
results[i] = kat::modular_increment<I>(I{ 4}, I{ 3}); expected[i++] = I{ 2 };
maybe_print("modular_decrement");
results[i] = kat::modular_decrement<I>(I{ 0}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_decrement<I>(I{ 1}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_decrement<I>(I{ 0}, I{ 3}); expected[i++] = I{ 2 };
results[i] = kat::modular_decrement<I>(I{ 1}, I{ 3}); expected[i++] = I{ 0 };
results[i] = kat::modular_decrement<I>(I{ 2}, I{ 3}); expected[i++] = I{ 1 };
results[i] = kat::modular_decrement<I>(I{ 3}, I{ 3}); expected[i++] = I{ 2 };
results[i] = kat::modular_decrement<I>(I{ 4}, I{ 3}); expected[i++] = I{ 0 };
maybe_print("ipow");
results[i] = kat::ipow<I>(I{ 0 }, 1 ); expected[i++] = I{ 0 };
results[i] = kat::ipow<I>(I{ 0 }, 2 ); expected[i++] = I{ 0 };
results[i] = kat::ipow<I>(I{ 0 }, 100 ); expected[i++] = I{ 0 };
results[i] = kat::ipow<I>(I{ 1 }, 0 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 1 }, 1 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 1 }, 2 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 1 }, 100 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 3 }, 0 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 3 }, 1 ); expected[i++] = I{ 3 };
results[i] = kat::ipow<I>(I{ 3 }, 2 ); expected[i++] = I{ 9 };
results[i] = kat::ipow<I>(I{ 3 }, 4 ); expected[i++] = I{ 81 };
maybe_print("unsafe div_rounding_up");
results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 };
maybe_print("div_rounding_up");
results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::div_rounding_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 };
results[i] = kat::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 };
results[i] = kat::div_rounding_up<I>( std::numeric_limits<I>::max() , std::numeric_limits<I>::max() - 1 ); expected[i++] = I{ 2 };
results[i] = kat::div_rounding_up<I>( std::numeric_limits<I>::max() - 1, std::numeric_limits<I>::max() ); expected[i++] = I{ 1 };
maybe_print("round_down");
results[i] = kat::round_down<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 123 };
results[i] = kat::round_down<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 123 };
maybe_print("round_down_to_full_warps");
results[i] = kat::round_down_to_full_warps<I>( I{ 0 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 8 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 16 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 31 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 32 } ); expected[i++] = I{ 32 };
results[i] = kat::round_down_to_full_warps<I>( I{ 33 } ); expected[i++] = I{ 32 };
results[i] = kat::round_down_to_full_warps<I>( I{ 125 } ); expected[i++] = I{ 96 };
// TODO: Consider testing rounding-up with negative dividends
maybe_print("unsafe round_up");
results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::round_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::round_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up<I>( I{ 63 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::unsafe::round_up<I>( I{ 64 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::unsafe::round_up<I>( I{ 65 }, I{ 32 } ); expected[i++] = I{ 96 };
maybe_print("round_up");
results[i] = kat::round_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::round_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up<I>( I{ 63 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::round_up<I>( I{ 64 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::round_up<I>( I{ 65 }, I{ 32 } ); expected[i++] = I{ 96 };
results[i] = kat::round_up<I>( std::numeric_limits<I>::max() - 1, std::numeric_limits<I>::max() ); expected[i++] = I{ std::numeric_limits<I>::max() };
maybe_print("round_down_to_power_of_2");
results[i] = kat::round_down_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 123 }, I{ 1 } ); expected[i++] = I{ 123 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 123 }, I{ 2 } ); expected[i++] = I{ 122 };
maybe_print("round_up_to_power_of_2");
results[i] = kat::round_up_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 23 }, I{ 1 } ); expected[i++] = I{ 23 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 64 };
maybe_print("unsafe round_up_to_power_of_2");
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 23 }, I{ 1 } ); expected[i++] = I{ 23 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 64 };
maybe_print("round_up_to_full_warps");
results[i] = kat::round_up_to_full_warps<I>( I{ 0 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up_to_full_warps<I>( I{ 1 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 8 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 16 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 31 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 32 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 33 } ); expected[i++] = I{ 64 };
results[i] = kat::round_up_to_full_warps<I>( I{ 63 } ); expected[i++] = I{ 64 };
maybe_print("gcd");
results[i] = kat::gcd<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::gcd<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::gcd<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::gcd<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::gcd<I>( I{ 8 }, I{ 4 } ); expected[i++] = I{ 4 };
results[i] = kat::gcd<I>( I{ 4 }, I{ 8 } ); expected[i++] = I{ 4 };
results[i] = kat::gcd<I>( I{ 10 }, I{ 6 } ); expected[i++] = I{ 2 };
results[i] = kat::gcd<I>( I{ 120 }, I{ 70 } ); expected[i++] = I{ 10 };
results[i] = kat::gcd<I>( I{ 70 }, I{ 120 } ); expected[i++] = I{ 10 };
results[i] = kat::gcd<I>( I{ 97 }, I{ 120 } ); expected[i++] = I{ 1 };
maybe_print("lcm");
results[i] = kat::lcm<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::lcm<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::lcm<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::lcm<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::lcm<I>( I{ 5 }, I{ 3 } ); expected[i++] = I{ 15 };
results[i] = kat::lcm<I>( I{ 8 }, I{ 4 } ); expected[i++] = I{ 8 };
results[i] = kat::lcm<I>( I{ 4 }, I{ 8 } ); expected[i++] = I{ 8 };
results[i] = kat::lcm<I>( I{ 10 }, I{ 6 } ); expected[i++] = I{ 30 };
maybe_print("is_even");
results[i] = kat::is_even<I>( I{ 0 } ); expected[i++] = true;
results[i] = kat::is_even<I>( I{ 1 } ); expected[i++] = false;
results[i] = kat::is_even<I>( I{ 2 } ); expected[i++] = true;
results[i] = kat::is_even<I>( I{ 3 } ); expected[i++] = false;
results[i] = kat::is_even<I>( I{ 123 } ); expected[i++] = false;
results[i] = kat::is_even<I>( I{ 124 } ); expected[i++] = true;
maybe_print("is_odd");
results[i] = kat::is_odd<I>( I{ 0 } ); expected[i++] = false;
results[i] = kat::is_odd<I>( I{ 1 } ); expected[i++] = true;
results[i] = kat::is_odd<I>( I{ 2 } ); expected[i++] = false;
results[i] = kat::is_odd<I>( I{ 3 } ); expected[i++] = true;
results[i] = kat::is_odd<I>( I{ 123 } ); expected[i++] = true;
results[i] = kat::is_odd<I>( I{ 124 } ); expected[i++] = false;
maybe_print("log2");
results[i] = kat::log2<I>( I{ 1 } ); expected[i++] = 0;
results[i] = kat::log2<I>( I{ 2 } ); expected[i++] = 1;
results[i] = kat::log2<I>( I{ 3 } ); expected[i++] = 1;
results[i] = kat::log2<I>( I{ 4 } ); expected[i++] = 2;
results[i] = kat::log2<I>( I{ 6 } ); expected[i++] = 2;
results[i] = kat::log2<I>( I{ 7 } ); expected[i++] = 2;
results[i] = kat::log2<I>( I{ 8 } ); expected[i++] = 3;
results[i] = kat::log2<I>( I{ 127 } ); expected[i++] = 6;
// We don't have a goot integer sqrt() implementation to offer here. Perhaps
// we could offer something based on casting to float?
//
// results[i] = kat::sqrt<I>( I{ 0 } ); expected[i++] = 0;
// results[i] = kat::sqrt<I>( I{ 1 } ); expected[i++] = 1;
// results[i] = kat::sqrt<I>( I{ 2 } ); expected[i++] = 1;
// results[i] = kat::sqrt<I>( I{ 3 } ); expected[i++] = 1;
// results[i] = kat::sqrt<I>( I{ 4 } ); expected[i++] = 2;
// results[i] = kat::sqrt<I>( I{ 5 } ); expected[i++] = 2;
// results[i] = kat::sqrt<I>( I{ 9 } ); expected[i++] = 3;
// results[i] = kat::sqrt<I>( I{ 10 } ); expected[i++] = 3;
// results[i] = kat::sqrt<I>( I{ 127 } ); expected[i++] = 11;
maybe_print("div_by_power_of_2");
results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 1 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 1 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 1 }); expected[i++] = I{ 111 };
results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 2 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 2 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 2 }, I { 2 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 3 }, I { 2 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 4 }, I { 2 }); expected[i++] = I{ 2 };
results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 2 }); expected[i++] = I{ 55 };
results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 16 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 16 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 15 }, I { 16 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 16 }, I { 16 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 17 }, I { 16 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 32 }, I { 16 }); expected[i++] = I{ 2 };
results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 16 }); expected[i++] = I{ 6 };
maybe_print("divides");
results[i] = kat::divides<I>( I{ 1 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 2 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 3 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 2 }, I{ 1 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 3 }, I{ 1 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 1 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 3 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 4 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 6 }, I{ 9 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 9 }, I{ 6 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 4 }, I{ 24 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 24 }, I{ 4 } ); expected[i++] = false;
maybe_print("is_divisible_by");
results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 3 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 3 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 3 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 4 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 9 }, I{ 6 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 6 }, I{ 9 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 24 }, I{ 4 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 4 }, I{ 24 } ); expected[i++] = false;
maybe_print("is_divisible_by_power_of_2");
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 0 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 0 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 4 } ); expected[i++] = false;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 24 }, I{ 4 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 72 }, I{ 16 } ); expected[i++] = false;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 64 }, I{ 16 } ); expected[i++] = true;
maybe_print("power_of_2_divides");
results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 1 } ); expected[i++] = false;
results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 4 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::power_of_2_divides<I>( I{ 4 }, I{ 24 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 16 }, I{ 72 } ); expected[i++] = false;
results[i] = kat::power_of_2_divides<I>( I{ 16 }, I{ 64 } ); expected[i++] = true;
maybe_print("log2_of_power_of_2");
results[i] = kat::log2_of_power_of_2<I>( I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::log2_of_power_of_2<I>( I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::log2_of_power_of_2<I>( I{ 4 } ); expected[i++] = I{ 2 };
results[i] = kat::log2_of_power_of_2<I>( I{ 8 } ); expected[i++] = I{ 3 };
results[i] = kat::log2_of_power_of_2<I>( I{ 16 } ); expected[i++] = I{ 4 };
results[i] = kat::log2_of_power_of_2<I>( I{ 32 } ); expected[i++] = I{ 5 };
results[i] = kat::log2_of_power_of_2<I>( I{ 64 } ); expected[i++] = I{ 6 };
maybe_print("modulo_power_of_2");
results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 4 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 4 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 4 } ); expected[i++] = I{ 2 };
results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 4 } ); expected[i++] = I{ 3 };
results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 4 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 4 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 4 } ); expected[i++] = I{ 3 };
// #define NUM_TEST_LINES 268
}
} // namespace kernels
// TODO:
// * Test between_or_equal and strictly_between with differing types for all 3 arguments
// * Some floating-point tests
// * gcd tests with values of different types
// * Some tests with negative values
#define INSTANTIATE_CONSTEXPR_MATH_TEST(_tp) \
compile_time_execution_results<_tp> UNIQUE_IDENTIFIER(test_struct_); \
#define INTEGER_TYPES \
int8_t, int16_t, int32_t, int64_t, \
uint8_t, uint16_t, uint32_t, uint64_t, \
char, short, int, long, long long, \
signed char, signed short, signed int, signed long, signed long long, \
unsigned char, unsigned short, unsigned int, unsigned long, unsigned long long
TEST_SUITE("builtins (and non-builtins)") {
TEST_CASE_TEMPLATE("multiplication high bits", I, int, unsigned, unsigned long, unsigned long long)
{
// Data arrays here
cuda::device_t<> device { cuda::device::current::get() };
auto block_size { 1 };
auto num_grid_blocks { 1 };
auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) };
auto device_side_results { cuda::memory::device::make_unique<I[]>(device, NUM_TEST_LINES) };
auto device_side_expected_results { cuda::memory::device::make_unique<I[]>(device, NUM_TEST_LINES) };
auto host_side_results { std::unique_ptr<I[]>(new I[NUM_TEST_LINES]) };
auto host_side_expected_results { std::unique_ptr<I[]>(new I[NUM_TEST_LINES]) };
cuda::launch(
kernels::try_out_integral_math_functions<I>,
launch_config,
device_side_results.get(), device_side_expected_results.get());
cuda::memory::copy(host_side_results.get(), device_side_results.get(), sizeof(I) * NUM_TEST_LINES);
cuda::memory::copy(host_side_expected_results.get(), device_side_expected_results.get(), sizeof(I) * NUM_TEST_LINES);
for(auto i { 0 }; i < NUM_TEST_LINES; i++) {
CHECK(host_side_results.get()[i] == host_side_expected_results.get()[i]);
if (host_side_results.get()[i] != host_side_expected_results.get()[i]) {
MESSAGE("index of failure was: " << i);
}
}
}
} // TEST_SUITE("constexpr_math")
| b1f52a0edf27ca7d6a9e0e1d1f8f19fa96cc714f.cu | #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "macro.h"
#include "common.cuh"
#include "utilities.cuh"
#include <kat/on_device/builtins.cuh>
#include <kat/on_device/non-builtins.cuh>
#include <kat/on_device/printing.cuh>
#include <limits>
/*
To test:
T multiplication_high_bits(T x, T y);
F divide(F dividend, F divisor);
T absolute_value(T x);
T minimum(T x, T y) = delete; // don't worry, it's not really deleted for all types
T maximum(T x, T y) = delete; // don't worry, it's not really deleted for all types
template <typename T, typename S> S sum_with_absolute_difference(T x, T y, S addend);
int population_count(I x);
T bit_reverse(T x) = delete;
unsigned find_last_non_sign_bit(I x) = delete;
T load_global_with_non_coherent_cache(const T* ptr);
int count_leading_zeros(I x) = delete;
T extract(T bit_field, unsigned int start_pos, unsigned int num_bits);
T insert(T original_bit_field, T bits_to_insert, unsigned int start_pos, unsigned int num_bits);
T select_bytes(T x, T y, unsigned byte_selector);
native_word_t funnel_shift(native_word_t low_word, native_word_t high_word, native_word_t shift_amount);
typename std::conditional<Signed, int, unsigned>::type average(
typename std::conditional<Signed, int, unsigned>::type x,
typename std::conditional<Signed, int, unsigned>::type y);
unsigned special_registers::lane_index();
unsigned special_registers::symmetric_multiprocessor_index();
unsigned long long special_registers::grid_index();
unsigned int special_registers::dynamic_shared_memory_size();
unsigned int special_registers::total_shared_memory_size();
} // namespace special_registers
#if (__CUDACC_VER_MAJOR__ >= 9)
lane_mask_t ballot (int condition, lane_mask_t lane_mask = full_warp_mask);
int all_lanes_satisfy (int condition, lane_mask_t lane_mask = full_warp_mask);
int some_lanes_satisfy(int condition, lane_mask_t lane_mask = full_warp_mask);
int all_lanes_agree (int condition, lane_mask_t lane_mask = full_warp_mask);
#else
lane_mask_t ballot (int condition);
int all_lanes_satisfy (int condition);
int some_lanes_satisfy(int condition);
#endif
#if (__CUDACC_VER_MAJOR__ >= 9)
bool is_uniform_across_lanes(T value, lane_mask_t lane_mask = full_warp_mask);
bool is_uniform_across_warp(T value);
lane_mask_t matching_lanes(T value, lane_mask_t lanes = full_warp_mask);
#endif
unsigned int mask_of_lanes::preceding();
unsigned int mask_of_lanes::preceding_and_self();
unsigned int mask_of_lanes::self();
unsigned int mask_of_lanes::succeeding_and_self();
unsigned int mask_of_lanes::succeeding();
lane_mask_t mask_of_lanes::matching_value(lane_mask_t lane_mask, T value);
lane_mask_t mask_of_lanes::matching_value(T value);
int find_first_set(I x);
int count_trailing_zeros(I x) { return find_first_set<I>(x) - 1; }
int count_leading_zeros(I x);
*/
template <typename F>
void invoke_if(F, std::integral_constant<false>) { }
void invoke_if(F f, std::integral_constant<true>) { f(); }
namespace kernels {
template <typename I>
__global__ void multiplication_high_bits(
__restrict__ I* results,
const __restrict__ I* lhs,
const __restrict__ I* rhs,
size_t num_tests)
{
// Note: This kernel will only be run with one block
auto pos = threadIdx.x;
results[pos] = kat::builtins::multiplication_high_bits<I>(lhs[pos], rhs[pos]);
}
} // namespace kernels
namespace kernels {
template <typename I>
__global__ void try_out_integral_builtins(I* results, I* __restrict expected)
{
bool print_first_indices_for_each_function { false };
auto maybe_print = [&](const char* section_title) {
if (print_first_indices_for_each_function) {
printf("%-30s tests start at index %3d\n", section_title, i);
}
};
results[i] = kat::strictly_between<I>( I{ 0 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 1 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 4 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 5 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 6 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::strictly_between<I>( I{ 8 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::strictly_between<I>( I{ 9 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::strictly_between<I>( I{ 10 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 11 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 123 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
maybe_print("between_or_equal");
results[i] = kat::between_or_equal<I>( I{ 1 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::between_or_equal<I>( I{ 4 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::between_or_equal<I>( I{ 5 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 6 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 8 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 9 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 10 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 11 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::between_or_equal<I>( I{ 123 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
maybe_print("is_power_of_2");
results[i] = kat::is_power_of_2<I>(I{ 1}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{ 2}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{ 4}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{ 7}); expected[i++] = false;
results[i] = kat::is_power_of_2<I>(I{32}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{33}); expected[i++] = false;
maybe_print("modular_increment");
results[i] = kat::modular_increment<I>(I{ 0}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_increment<I>(I{ 1}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_increment<I>(I{ 0}, I{ 3}); expected[i++] = I{ 1 };
results[i] = kat::modular_increment<I>(I{ 1}, I{ 3}); expected[i++] = I{ 2 };
results[i] = kat::modular_increment<I>(I{ 2}, I{ 3}); expected[i++] = I{ 0 };
results[i] = kat::modular_increment<I>(I{ 3}, I{ 3}); expected[i++] = I{ 1 };
results[i] = kat::modular_increment<I>(I{ 4}, I{ 3}); expected[i++] = I{ 2 };
maybe_print("modular_decrement");
results[i] = kat::modular_decrement<I>(I{ 0}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_decrement<I>(I{ 1}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_decrement<I>(I{ 0}, I{ 3}); expected[i++] = I{ 2 };
results[i] = kat::modular_decrement<I>(I{ 1}, I{ 3}); expected[i++] = I{ 0 };
results[i] = kat::modular_decrement<I>(I{ 2}, I{ 3}); expected[i++] = I{ 1 };
results[i] = kat::modular_decrement<I>(I{ 3}, I{ 3}); expected[i++] = I{ 2 };
results[i] = kat::modular_decrement<I>(I{ 4}, I{ 3}); expected[i++] = I{ 0 };
maybe_print("ipow");
results[i] = kat::ipow<I>(I{ 0 }, 1 ); expected[i++] = I{ 0 };
results[i] = kat::ipow<I>(I{ 0 }, 2 ); expected[i++] = I{ 0 };
results[i] = kat::ipow<I>(I{ 0 }, 100 ); expected[i++] = I{ 0 };
results[i] = kat::ipow<I>(I{ 1 }, 0 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 1 }, 1 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 1 }, 2 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 1 }, 100 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 3 }, 0 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 3 }, 1 ); expected[i++] = I{ 3 };
results[i] = kat::ipow<I>(I{ 3 }, 2 ); expected[i++] = I{ 9 };
results[i] = kat::ipow<I>(I{ 3 }, 4 ); expected[i++] = I{ 81 };
maybe_print("unsafe div_rounding_up");
results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 };
maybe_print("div_rounding_up");
results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::div_rounding_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 };
results[i] = kat::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 };
results[i] = kat::div_rounding_up<I>( std::numeric_limits<I>::max() , std::numeric_limits<I>::max() - 1 ); expected[i++] = I{ 2 };
results[i] = kat::div_rounding_up<I>( std::numeric_limits<I>::max() - 1, std::numeric_limits<I>::max() ); expected[i++] = I{ 1 };
maybe_print("round_down");
results[i] = kat::round_down<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 123 };
results[i] = kat::round_down<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 123 };
maybe_print("round_down_to_full_warps");
results[i] = kat::round_down_to_full_warps<I>( I{ 0 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 8 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 16 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 31 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 32 } ); expected[i++] = I{ 32 };
results[i] = kat::round_down_to_full_warps<I>( I{ 33 } ); expected[i++] = I{ 32 };
results[i] = kat::round_down_to_full_warps<I>( I{ 125 } ); expected[i++] = I{ 96 };
// TODO: Consider testing rounding-up with negative dividends
maybe_print("unsafe round_up");
results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::round_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::round_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up<I>( I{ 63 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::unsafe::round_up<I>( I{ 64 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::unsafe::round_up<I>( I{ 65 }, I{ 32 } ); expected[i++] = I{ 96 };
maybe_print("round_up");
results[i] = kat::round_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::round_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up<I>( I{ 63 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::round_up<I>( I{ 64 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::round_up<I>( I{ 65 }, I{ 32 } ); expected[i++] = I{ 96 };
results[i] = kat::round_up<I>( std::numeric_limits<I>::max() - 1, std::numeric_limits<I>::max() ); expected[i++] = I{ std::numeric_limits<I>::max() };
maybe_print("round_down_to_power_of_2");
results[i] = kat::round_down_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 123 }, I{ 1 } ); expected[i++] = I{ 123 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 123 }, I{ 2 } ); expected[i++] = I{ 122 };
maybe_print("round_up_to_power_of_2");
results[i] = kat::round_up_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 23 }, I{ 1 } ); expected[i++] = I{ 23 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 64 };
maybe_print("unsafe round_up_to_power_of_2");
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 23 }, I{ 1 } ); expected[i++] = I{ 23 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 64 };
maybe_print("round_up_to_full_warps");
results[i] = kat::round_up_to_full_warps<I>( I{ 0 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up_to_full_warps<I>( I{ 1 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 8 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 16 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 31 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 32 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 33 } ); expected[i++] = I{ 64 };
results[i] = kat::round_up_to_full_warps<I>( I{ 63 } ); expected[i++] = I{ 64 };
maybe_print("gcd");
results[i] = kat::gcd<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::gcd<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::gcd<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::gcd<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::gcd<I>( I{ 8 }, I{ 4 } ); expected[i++] = I{ 4 };
results[i] = kat::gcd<I>( I{ 4 }, I{ 8 } ); expected[i++] = I{ 4 };
results[i] = kat::gcd<I>( I{ 10 }, I{ 6 } ); expected[i++] = I{ 2 };
results[i] = kat::gcd<I>( I{ 120 }, I{ 70 } ); expected[i++] = I{ 10 };
results[i] = kat::gcd<I>( I{ 70 }, I{ 120 } ); expected[i++] = I{ 10 };
results[i] = kat::gcd<I>( I{ 97 }, I{ 120 } ); expected[i++] = I{ 1 };
maybe_print("lcm");
results[i] = kat::lcm<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::lcm<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::lcm<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::lcm<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::lcm<I>( I{ 5 }, I{ 3 } ); expected[i++] = I{ 15 };
results[i] = kat::lcm<I>( I{ 8 }, I{ 4 } ); expected[i++] = I{ 8 };
results[i] = kat::lcm<I>( I{ 4 }, I{ 8 } ); expected[i++] = I{ 8 };
results[i] = kat::lcm<I>( I{ 10 }, I{ 6 } ); expected[i++] = I{ 30 };
maybe_print("is_even");
results[i] = kat::is_even<I>( I{ 0 } ); expected[i++] = true;
results[i] = kat::is_even<I>( I{ 1 } ); expected[i++] = false;
results[i] = kat::is_even<I>( I{ 2 } ); expected[i++] = true;
results[i] = kat::is_even<I>( I{ 3 } ); expected[i++] = false;
results[i] = kat::is_even<I>( I{ 123 } ); expected[i++] = false;
results[i] = kat::is_even<I>( I{ 124 } ); expected[i++] = true;
maybe_print("is_odd");
results[i] = kat::is_odd<I>( I{ 0 } ); expected[i++] = false;
results[i] = kat::is_odd<I>( I{ 1 } ); expected[i++] = true;
results[i] = kat::is_odd<I>( I{ 2 } ); expected[i++] = false;
results[i] = kat::is_odd<I>( I{ 3 } ); expected[i++] = true;
results[i] = kat::is_odd<I>( I{ 123 } ); expected[i++] = true;
results[i] = kat::is_odd<I>( I{ 124 } ); expected[i++] = false;
maybe_print("log2");
results[i] = kat::log2<I>( I{ 1 } ); expected[i++] = 0;
results[i] = kat::log2<I>( I{ 2 } ); expected[i++] = 1;
results[i] = kat::log2<I>( I{ 3 } ); expected[i++] = 1;
results[i] = kat::log2<I>( I{ 4 } ); expected[i++] = 2;
results[i] = kat::log2<I>( I{ 6 } ); expected[i++] = 2;
results[i] = kat::log2<I>( I{ 7 } ); expected[i++] = 2;
results[i] = kat::log2<I>( I{ 8 } ); expected[i++] = 3;
results[i] = kat::log2<I>( I{ 127 } ); expected[i++] = 6;
// We don't have a goot integer sqrt() implementation to offer here. Perhaps
// we could offer something based on casting to float?
//
// results[i] = kat::sqrt<I>( I{ 0 } ); expected[i++] = 0;
// results[i] = kat::sqrt<I>( I{ 1 } ); expected[i++] = 1;
// results[i] = kat::sqrt<I>( I{ 2 } ); expected[i++] = 1;
// results[i] = kat::sqrt<I>( I{ 3 } ); expected[i++] = 1;
// results[i] = kat::sqrt<I>( I{ 4 } ); expected[i++] = 2;
// results[i] = kat::sqrt<I>( I{ 5 } ); expected[i++] = 2;
// results[i] = kat::sqrt<I>( I{ 9 } ); expected[i++] = 3;
// results[i] = kat::sqrt<I>( I{ 10 } ); expected[i++] = 3;
// results[i] = kat::sqrt<I>( I{ 127 } ); expected[i++] = 11;
maybe_print("div_by_power_of_2");
results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 1 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 1 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 1 }); expected[i++] = I{ 111 };
results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 2 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 2 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 2 }, I { 2 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 3 }, I { 2 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 4 }, I { 2 }); expected[i++] = I{ 2 };
results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 2 }); expected[i++] = I{ 55 };
results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 16 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 16 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 15 }, I { 16 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 16 }, I { 16 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 17 }, I { 16 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 32 }, I { 16 }); expected[i++] = I{ 2 };
results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 16 }); expected[i++] = I{ 6 };
maybe_print("divides");
results[i] = kat::divides<I>( I{ 1 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 2 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 3 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 2 }, I{ 1 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 3 }, I{ 1 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 1 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 3 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 4 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 6 }, I{ 9 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 9 }, I{ 6 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 4 }, I{ 24 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 24 }, I{ 4 } ); expected[i++] = false;
maybe_print("is_divisible_by");
results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 3 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 3 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 3 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 4 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 9 }, I{ 6 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 6 }, I{ 9 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 24 }, I{ 4 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 4 }, I{ 24 } ); expected[i++] = false;
maybe_print("is_divisible_by_power_of_2");
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 0 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 0 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 4 } ); expected[i++] = false;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 24 }, I{ 4 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 72 }, I{ 16 } ); expected[i++] = false;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 64 }, I{ 16 } ); expected[i++] = true;
maybe_print("power_of_2_divides");
results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 1 } ); expected[i++] = false;
results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 4 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::power_of_2_divides<I>( I{ 4 }, I{ 24 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 16 }, I{ 72 } ); expected[i++] = false;
results[i] = kat::power_of_2_divides<I>( I{ 16 }, I{ 64 } ); expected[i++] = true;
maybe_print("log2_of_power_of_2");
results[i] = kat::log2_of_power_of_2<I>( I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::log2_of_power_of_2<I>( I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::log2_of_power_of_2<I>( I{ 4 } ); expected[i++] = I{ 2 };
results[i] = kat::log2_of_power_of_2<I>( I{ 8 } ); expected[i++] = I{ 3 };
results[i] = kat::log2_of_power_of_2<I>( I{ 16 } ); expected[i++] = I{ 4 };
results[i] = kat::log2_of_power_of_2<I>( I{ 32 } ); expected[i++] = I{ 5 };
results[i] = kat::log2_of_power_of_2<I>( I{ 64 } ); expected[i++] = I{ 6 };
maybe_print("modulo_power_of_2");
results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 4 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 4 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 4 } ); expected[i++] = I{ 2 };
results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 4 } ); expected[i++] = I{ 3 };
results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 4 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 4 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 4 } ); expected[i++] = I{ 3 };
// #define NUM_TEST_LINES 268
}
} // namespace kernels
// TODO:
// * Test between_or_equal and strictly_between with differing types for all 3 arguments
// * Some floating-point tests
// * gcd tests with values of different types
// * Some tests with negative values
#define INSTANTIATE_CONSTEXPR_MATH_TEST(_tp) \
compile_time_execution_results<_tp> UNIQUE_IDENTIFIER(test_struct_); \
#define INTEGER_TYPES \
int8_t, int16_t, int32_t, int64_t, \
uint8_t, uint16_t, uint32_t, uint64_t, \
char, short, int, long, long long, \
signed char, signed short, signed int, signed long, signed long long, \
unsigned char, unsigned short, unsigned int, unsigned long, unsigned long long
TEST_SUITE("builtins (and non-builtins)") {
TEST_CASE_TEMPLATE("multiplication high bits", I, int, unsigned, unsigned long, unsigned long long)
{
// Data arrays here
cuda::device_t<> device { cuda::device::current::get() };
auto block_size { 1 };
auto num_grid_blocks { 1 };
auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) };
auto device_side_results { cuda::memory::device::make_unique<I[]>(device, NUM_TEST_LINES) };
auto device_side_expected_results { cuda::memory::device::make_unique<I[]>(device, NUM_TEST_LINES) };
auto host_side_results { std::unique_ptr<I[]>(new I[NUM_TEST_LINES]) };
auto host_side_expected_results { std::unique_ptr<I[]>(new I[NUM_TEST_LINES]) };
cuda::launch(
kernels::try_out_integral_math_functions<I>,
launch_config,
device_side_results.get(), device_side_expected_results.get());
cuda::memory::copy(host_side_results.get(), device_side_results.get(), sizeof(I) * NUM_TEST_LINES);
cuda::memory::copy(host_side_expected_results.get(), device_side_expected_results.get(), sizeof(I) * NUM_TEST_LINES);
for(auto i { 0 }; i < NUM_TEST_LINES; i++) {
CHECK(host_side_results.get()[i] == host_side_expected_results.get()[i]);
if (host_side_results.get()[i] != host_side_expected_results.get()[i]) {
MESSAGE("index of failure was: " << i);
}
}
}
} // TEST_SUITE("constexpr_math")
|
e213536cde7ec25e21a50d3ff1264bb0bc51fb38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions mixed zc -> ds
*/
#include "magmasparse_internal.h"
#define blksize 512
// TODO get rid of global variable!
__device__ int flag = 0;
__global__ void
magmaint_clag2z_sparse( int M, int N,
const magmaFloatComplex *SA, int ldsa,
magmaDoubleComplex *A, int lda,
double RMAX )
{
int inner_bsize = blockDim.x;
int outer_bsize = inner_bsize * 512;
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
// global thread index
if( thread_id < M ){
for( int i= outer_bsize * blockIdx.x + threadIdx.x;
i<min( M, outer_bsize * ( blockIdx.x + 1)); i+=inner_bsize){
A[i] = cuComplexFloatToDouble( SA[i] );
}
}
}
/**
Purpose
-------
CLAG2Z converts a COMPLEX matrix SA to a COMPLEX_16
matrix A.
RMAX is the overflow for the COMPLEX arithmetic.
CLAG2Z checks that all the entries of A are between -RMAX and
RMAX. If not the convertion is aborted and a flag is raised.
Arguments
---------
@param[in]
M INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
N INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
SA COMPLEX array, dimension (LDSA,N)
On entry, the M-by-N coefficient matrix SA.
@param[in]
ldsa INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
A COMPLEX_16 array, dimension (LDA,N)
On exit, if INFO=0, the M-by-N coefficient matrix A; if
INFO>0, the content of A is unspecified.
@param[in]
lda INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the COMPLEX
overflow threshold, in this case, the content
of SA in exit is unspecified.
@ingroup magmasparse_caux
********************************************************************/
extern "C" void
magmablas_clag2z_sparse(
magma_int_t M, magma_int_t N,
const magmaFloatComplex *SA, magma_int_t ldsa,
magmaDoubleComplex *A, magma_int_t lda,
magma_queue_t queue,
magma_int_t *info )
{
/*
(TODO note from original dense source)
Note
----
- We have to provide INFO at the end that zlag2c isn't doable now.
- Transfer a single value TO/FROM CPU/GPU
- SLAMCH that's needed is called from underlying BLAS
- Only used in iterative refinement
- Do we want to provide this in the release?
*/
*info = 0;
if ( M < 0 )
*info = -1;
else if ( N < 0 )
*info = -2;
else if ( lda < max(1,M) )
*info = -4;
else if ( ldsa < max(1,M) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
//return *info;
}
double RMAX = (double)lapackf77_slamch("O");
int block;
dim3 dimBlock(blksize); // Number of Threads per Block
block = (M/blksize)/blksize;
if (block*blksize*blksize < M)
block++;
dim3 dimGrid(block); // Number of Blocks
dim3 threads( blksize );
dim3 grid( magma_ceildiv( M, blksize ) );
hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
hipLaunchKernelGGL(( magmaint_clag2z_sparse), dim3(dimGrid) , dim3(dimBlock), 0, queue->cuda_stream ,
M, N, SA, lda, A, ldsa, RMAX );
hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
| e213536cde7ec25e21a50d3ff1264bb0bc51fb38.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions mixed zc -> ds
*/
#include "magmasparse_internal.h"
#define blksize 512
// TODO get rid of global variable!
__device__ int flag = 0;
__global__ void
magmaint_clag2z_sparse( int M, int N,
const magmaFloatComplex *SA, int ldsa,
magmaDoubleComplex *A, int lda,
double RMAX )
{
int inner_bsize = blockDim.x;
int outer_bsize = inner_bsize * 512;
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
// global thread index
if( thread_id < M ){
for( int i= outer_bsize * blockIdx.x + threadIdx.x;
i<min( M, outer_bsize * ( blockIdx.x + 1)); i+=inner_bsize){
A[i] = cuComplexFloatToDouble( SA[i] );
}
}
}
/**
Purpose
-------
CLAG2Z converts a COMPLEX matrix SA to a COMPLEX_16
matrix A.
RMAX is the overflow for the COMPLEX arithmetic.
CLAG2Z checks that all the entries of A are between -RMAX and
RMAX. If not the convertion is aborted and a flag is raised.
Arguments
---------
@param[in]
M INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
N INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
SA COMPLEX array, dimension (LDSA,N)
On entry, the M-by-N coefficient matrix SA.
@param[in]
ldsa INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
A COMPLEX_16 array, dimension (LDA,N)
On exit, if INFO=0, the M-by-N coefficient matrix A; if
INFO>0, the content of A is unspecified.
@param[in]
lda INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the COMPLEX
overflow threshold, in this case, the content
of SA in exit is unspecified.
@ingroup magmasparse_caux
********************************************************************/
extern "C" void
magmablas_clag2z_sparse(
magma_int_t M, magma_int_t N,
const magmaFloatComplex *SA, magma_int_t ldsa,
magmaDoubleComplex *A, magma_int_t lda,
magma_queue_t queue,
magma_int_t *info )
{
/*
(TODO note from original dense source)
Note
----
- We have to provide INFO at the end that zlag2c isn't doable now.
- Transfer a single value TO/FROM CPU/GPU
- SLAMCH that's needed is called from underlying BLAS
- Only used in iterative refinement
- Do we want to provide this in the release?
*/
*info = 0;
if ( M < 0 )
*info = -1;
else if ( N < 0 )
*info = -2;
else if ( lda < max(1,M) )
*info = -4;
else if ( ldsa < max(1,M) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
//return *info;
}
double RMAX = (double)lapackf77_slamch("O");
int block;
dim3 dimBlock(blksize); // Number of Threads per Block
block = (M/blksize)/blksize;
if (block*blksize*blksize < M)
block++;
dim3 dimGrid(block); // Number of Blocks
dim3 threads( blksize );
dim3 grid( magma_ceildiv( M, blksize ) );
cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
magmaint_clag2z_sparse<<< dimGrid , dimBlock, 0, queue->cuda_stream >>>
( M, N, SA, lda, A, ldsa, RMAX );
cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
|
6c9780aa4f63e6c8f30ecce628d5ab823daeda4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void vecAdd(float* A, float* B, float* C) {
int i = threadIdx.x;
A[i] = 0;
B[i] = i;
C[i] = A[i] + B[i];
}
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define SIZE 10
__device__ __host__
void map_on( int x, int y, int* mx, int* my ) {
*mx = x;
*my = x + y - 1;
}
__device__ __host__
void map_off( int mx, int my, int* x, int* y ) {
*x = mx;
*y = my - mx + 1;
}
/*
void transform(int m, int n) {
// alloc d x n grid
// copy each (x,y) => (d-y+1, d-x+1)
int num_diag = m + n - 1;
int nm = num_diag, nn = n;
printf("tranforming %i x %i to %i x %i\n", m, n, nm, nn);
int x, y;
for(y = 1; y <= m; y++) {
for(x = 1; x <= n; x++) {
int mx, my;
map_on( x, y, &mx, &my );
//int ox = 0;
//int oy = 0;
//map_off( nx, ny, &ox, &oy );
printf("(%i, %i) => (%i, %i) => (%i, %i)\n", x, y, mx, my);
}
}
}
*/
char* strrev(char * string) {
int length = strlen(string);
char * result = (char*)malloc(length+1);
if( result != NULL ) {
int i,j;
result[length] = '\0';
for ( i = length-1, j=0; i >= 0; i--, j++ )
result[j] = string[i];
}
strcpy(string, result);
free(result);
return string;
}
int read_string(char* filename, char** s, int* len) {
FILE* fd = fopen( filename, "r" );
if( fd == NULL ) {
fprintf( stderr, "Could not open file %s for reading\n", filename );
return -1;
}
fseek(fd, 0, SEEK_END);
*len = ftell(fd);
fseek(fd, 0, SEEK_SET);
*s = (char*)malloc( *len * sizeof(char) );
fread(*s, *len, 1, fd);
fclose( fd );
return 0;
}
double get_timeofday() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec+(tv.tv_usec/1000000.0);
}
// grid API
typedef enum { none = 0, up, left, upleft } dir_t;
typedef struct cell_t {
int len;
dir_t dir;
} cell_t;
typedef struct grid_t {
int w;
int h;
int d;
char* sx;
char* sy;
cell_t* cells;
} grid_t;
void cudaMallocOrDie( void** pptr, const size_t size, const char* code ) {
if( hipMalloc(pptr, size) != hipSuccess) {
printf("hipMalloc Failed: %s\n", code);
exit(-1);
}
}
void cudaMemcpyOrDie( void* dst, const void* src, const size_t size, const enum hipMemcpyKind kind, const char* code ) {
if( hipMemcpy( dst, src, size, kind ) != hipSuccess) {
printf("hipMalloc Failed: %s\n", code);
exit(-1);
}
}
void cudaMemsetOrDie( void* dst, char value, size_t size, const char* code ) {
if( hipMemset( dst, value, size ) != hipSuccess) {
printf("hipMemset Failed: %s\n", code);
exit(-1);
}
}
void grid_init(grid_t* dgrid, const char* s1, const int m, const char* s2, const int n) {
grid_t grid;
// set sizes
grid.w = m;
grid.h = n;
grid.d = grid.w + grid.h - 1;
// copy strings to device
cudaMallocOrDie( (void**)&(grid.sx), m*sizeof(char), "grid_init 1" );
cudaMemcpyOrDie( grid.sx, s1, m*sizeof(char), hipMemcpyHostToDevice, "grid_init 2" );
cudaMallocOrDie( (void**)&(grid.sy), n*sizeof(char), "grid_init 3" );
cudaMemcpyOrDie( grid.sy, s2, n*sizeof(char), hipMemcpyHostToDevice, "grid_init 4" );
// create cell grid on device
size_t size = (grid.d + 1) * (grid.w + 1) * sizeof(cell_t);
printf("size=%i\n", size);
cudaMallocOrDie( (void**)&(grid.cells), size, "grid_init 5" );
cudaMemsetOrDie( grid.cells, 0, size, "grid_init 6" );
// copy the stuff we just created to the device. NOTE all pointers
// are relative to device.
cudaMemcpyOrDie( dgrid, &grid, sizeof(grid_t), hipMemcpyHostToDevice, "grid_init 7" );
}
__device__
cell_t* grid_cell(const grid_t* grid, const int x, const int y) {
int mx, my;
map_on( x, y, &mx, &my );
return &(grid->cells[ (grid->w + 1)*x + y ]);
}
/*
void build_table(int m, int n) {
int num_diag = m + n - 1;
int x, y;
for(y = 1; y <= m; y++) {
for(x = 1; x <= n; x++) {
int mx, my;
// get neighboring values
map_on( x, y - 1, &mx, &my );
int val_up = grid( mx, my );
map_on( x - 1, y, &mx, &my );
int val_left = grid( mx, my );
map_on( x - 1, y - 1, &mx, &my );
int val_upleft = grid( mx, my );
// set value of cell
int val;
if( s2[x-1] == s1[y-1] ) {
val = val_upleft + 1;
} else {
val = max( val_up, val_left );
}
map_on( x, y, &mx, &my );
grid( mx, my ) = val;
// same LCS build table code, but with nx and ny mapping.
// x = thread. maybe x/p loop.
}
}
}
*/
/*
Computes a block of the LCS length matrix. Before the call,
all cells immediately above and to the left of the block of interest
must be calculated. that is, the cells above as ([x, x+w], y-1) and
to the left as ([y, y+h], x-1). These are the cells represented by #.
x x+w
+-------+-------+-------+-------+
| * * * | * * * | * * * | |
| * * * | * * # | # # # | |
+-------+-------+-------+-------+ y
| * * * | * * # | 0 0 0 | |
| * * * | * * # | 0 0 0 | |
+-------+-------+-------+-------+ y+h
| | | | |
| | | | |
+-------+-------+-------+-------+
This structure allows for usage by either message passing or shared memory based
formulations.
*/
__global__
void lcs_length_row_cuda( const grid_t* grid, int d, int bw, int bh ) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y;
for(y = d; y < d + bh; y++) {
int i, j;
map_off( x, y, &i, &j );
//printf( "process [%i, %i] => [%i, %i]\n", d, x, j, i );
cell_t* cell = grid_cell( grid, i, j );
//printf("(%i,%i)\n", i, j);
if(grid->sx[i-1] == grid->sy[j-1]) {
cell->len = grid_cell( grid, i-1, j-1 )->len + 1;
cell->dir = upleft;
//printf("ul: [%i, %i] = %c\n", j, i, grid->sx[i-1]);
} else {
cell_t* cell_up = grid_cell( grid, i-1, j );
cell_t* cell_left = grid_cell( grid, i, j-1 );
if(cell_up->len >= cell_left->len) {
cell->len = cell_up->len;
cell->dir = up;
} else {
cell->len = cell_left->len;
cell->dir = left;
}
}
}
}
void lcs_length_cuda( const grid_t* dgrid, int maxb, int maxt ) {
// FIXME
grid_t lgrid;
grid_t* grid = &lgrid;
cudaMemcpyOrDie( grid, dgrid, sizeof(grid_t), hipMemcpyDeviceToHost, "lcs_length_cuda 1" );
int bw = ceil(grid->w / (double)maxb);
int bh = 10;
int nd = grid->d/bh;
int d;
for(d = 0; d < nd; d ++) {
int sx = max(d - grid->h, 0);
int fx = min(d, grid->w - 1);
int nt = min(bw, maxt);
int nb = min(ceil((fx - sx)/ (double)bw), (double)maxb);
printf("sx=%i, fx=%i, nb=%i, nt=%i, bw=%i, bh=%i\n", sx, fx, nb, nt, bw, bh);
//lcs_length_row_cuda<<<nb, nt>>>( dgrid, d, bw, bh );
}
}
__global__
void lcs_backtrack_cuda(const grid_t* grid, int i, int j, const int maxlen, char* res) {
int pos = 0;
while(i > 0 && j > 0) {
cell_t* cell = grid_cell(grid, i, j);
if( cell->dir == upleft ) {
//printf(":%c @ %i/%i\n", grid->s1[i-1], i, j);
res[pos++] = grid->sx[i-1];
i--; j--;
} else if( cell->dir == up ) {
i--;
} else if( cell->dir == left) {
j--;
} else {
//printf("\n---WTF BAD CELL!--\n");
}
}
res[pos] = 0;
}
void lcs_cuda(const char* s1, const int m, const char* s2, const int n, const int maxlen, char* res) {
// create grid on device
grid_t* grid;
cudaMallocOrDie( (void**)&grid, sizeof(grid_t), "lcs_cuda 1");
grid_init( grid, s1, m, s2, n );
// run length algorithm (makes kernel calls)
lcs_length_cuda( grid, 20, 32 );
// create a string on the device for result, and calculate result
char* dres;
cudaMallocOrDie( (void**)&dres, maxlen*sizeof(char), "lcs_cuda 2");
// FIXME use grid h/w
//lcs_backtrack_cuda<<<1, 1>>>(grid, m, n, maxlen, dres);
// copy result back, and reverse it (calculated backwards).
cudaMemcpyOrDie( res, dres, maxlen, hipMemcpyDeviceToHost, "lcs_cuda 3" );
strrev(res);
}
int main() {
char* s1;
char* s2;
int m, n;
s1="MZJAWXUMZJAWXUMZJAWXUMZJAWXU";
s2="XMJYAUZXMJYAUZXMJYAUZXMJYAUZ";
m = strlen(s1);
n = strlen(s2);
read_string("small_test1.txt", &s1, &m);
read_string("small_test2.txt", &s2, &n);
// allocate a result string on the device.
int reslen;
char* res;
reslen = max(m, n) + 1;
res = (char*)malloc( reslen*sizeof(char) );
double start = get_timeofday();
lcs_cuda( s1, m, s2, n, reslen, res );
double end = get_timeofday();
printf("Time Taken: %f sec %i %s\n", end-start, strlen(res), res);
}
/*
int main() {
char* s1;
char* s2;
int m, n;
//s1="MZJAWXU";
//s2="XMJYAUZ";
//m = strlen(s1);
//n = strlen(s2);
read_string("large_test1.txt", &s1, &m);
read_string("large_test2.txt", &s2, &n);
// copy strings to device
char* ds1;
char* ds2;
hipMalloc( &ds1, m );
hipMalloc( &ds2, n );
hipMemcpy( ds1, s1, m, hipMemcpyHostToDevice );
hipMemcpy( ds2, s2, n, hipMemcpyHostToDevice );
// allocate space for the grid on the device
int d = m + n - 1;
int mh = d, mw = m;
size_t size = (mh + 1) * (mw + 1) * sizeof(cell_t);
cell_t* cells;
hipMalloc( &cells, size );
hipMemset( cells, 0, size );
// allocate a result string on the device.
int reslen;
char* res;
reslen = max(m, n) + 1;
hipMalloc(&res, reslen*sizeof(char));
double start = get_timeofday();
lcs_cuda( ds1, m, ds2, n, reslen, res );
double end = get_timeofday();
char* rres = (char*)malloc(reslen);
hipMemcpy( rres, res, reslen, hipMemcpyDeviceToHost );
printf("Time Taken: %f sec %i %s\n", end-start, strlen(rres), rres);
}
*/
| 6c9780aa4f63e6c8f30ecce628d5ab823daeda4c.cu | __global__ void vecAdd(float* A, float* B, float* C) {
int i = threadIdx.x;
A[i] = 0;
B[i] = i;
C[i] = A[i] + B[i];
}
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define SIZE 10
__device__ __host__
void map_on( int x, int y, int* mx, int* my ) {
*mx = x;
*my = x + y - 1;
}
__device__ __host__
void map_off( int mx, int my, int* x, int* y ) {
*x = mx;
*y = my - mx + 1;
}
/*
void transform(int m, int n) {
// alloc d x n grid
// copy each (x,y) => (d-y+1, d-x+1)
int num_diag = m + n - 1;
int nm = num_diag, nn = n;
printf("tranforming %i x %i to %i x %i\n", m, n, nm, nn);
int x, y;
for(y = 1; y <= m; y++) {
for(x = 1; x <= n; x++) {
int mx, my;
map_on( x, y, &mx, &my );
//int ox = 0;
//int oy = 0;
//map_off( nx, ny, &ox, &oy );
printf("(%i, %i) => (%i, %i) => (%i, %i)\n", x, y, mx, my);
}
}
}
*/
char* strrev(char * string) {
int length = strlen(string);
char * result = (char*)malloc(length+1);
if( result != NULL ) {
int i,j;
result[length] = '\0';
for ( i = length-1, j=0; i >= 0; i--, j++ )
result[j] = string[i];
}
strcpy(string, result);
free(result);
return string;
}
int read_string(char* filename, char** s, int* len) {
FILE* fd = fopen( filename, "r" );
if( fd == NULL ) {
fprintf( stderr, "Could not open file %s for reading\n", filename );
return -1;
}
fseek(fd, 0, SEEK_END);
*len = ftell(fd);
fseek(fd, 0, SEEK_SET);
*s = (char*)malloc( *len * sizeof(char) );
fread(*s, *len, 1, fd);
fclose( fd );
return 0;
}
double get_timeofday() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec+(tv.tv_usec/1000000.0);
}
// grid API
typedef enum { none = 0, up, left, upleft } dir_t;
typedef struct cell_t {
int len;
dir_t dir;
} cell_t;
typedef struct grid_t {
int w;
int h;
int d;
char* sx;
char* sy;
cell_t* cells;
} grid_t;
void cudaMallocOrDie( void** pptr, const size_t size, const char* code ) {
if( cudaMalloc(pptr, size) != cudaSuccess) {
printf("cudaMalloc Failed: %s\n", code);
exit(-1);
}
}
void cudaMemcpyOrDie( void* dst, const void* src, const size_t size, const enum cudaMemcpyKind kind, const char* code ) {
if( cudaMemcpy( dst, src, size, kind ) != cudaSuccess) {
printf("cudaMalloc Failed: %s\n", code);
exit(-1);
}
}
void cudaMemsetOrDie( void* dst, char value, size_t size, const char* code ) {
if( cudaMemset( dst, value, size ) != cudaSuccess) {
printf("cudaMemset Failed: %s\n", code);
exit(-1);
}
}
void grid_init(grid_t* dgrid, const char* s1, const int m, const char* s2, const int n) {
grid_t grid;
// set sizes
grid.w = m;
grid.h = n;
grid.d = grid.w + grid.h - 1;
// copy strings to device
cudaMallocOrDie( (void**)&(grid.sx), m*sizeof(char), "grid_init 1" );
cudaMemcpyOrDie( grid.sx, s1, m*sizeof(char), cudaMemcpyHostToDevice, "grid_init 2" );
cudaMallocOrDie( (void**)&(grid.sy), n*sizeof(char), "grid_init 3" );
cudaMemcpyOrDie( grid.sy, s2, n*sizeof(char), cudaMemcpyHostToDevice, "grid_init 4" );
// create cell grid on device
size_t size = (grid.d + 1) * (grid.w + 1) * sizeof(cell_t);
printf("size=%i\n", size);
cudaMallocOrDie( (void**)&(grid.cells), size, "grid_init 5" );
cudaMemsetOrDie( grid.cells, 0, size, "grid_init 6" );
// copy the stuff we just created to the device. NOTE all pointers
// are relative to device.
cudaMemcpyOrDie( dgrid, &grid, sizeof(grid_t), cudaMemcpyHostToDevice, "grid_init 7" );
}
__device__
cell_t* grid_cell(const grid_t* grid, const int x, const int y) {
int mx, my;
map_on( x, y, &mx, &my );
return &(grid->cells[ (grid->w + 1)*x + y ]);
}
/*
void build_table(int m, int n) {
int num_diag = m + n - 1;
int x, y;
for(y = 1; y <= m; y++) {
for(x = 1; x <= n; x++) {
int mx, my;
// get neighboring values
map_on( x, y - 1, &mx, &my );
int val_up = grid( mx, my );
map_on( x - 1, y, &mx, &my );
int val_left = grid( mx, my );
map_on( x - 1, y - 1, &mx, &my );
int val_upleft = grid( mx, my );
// set value of cell
int val;
if( s2[x-1] == s1[y-1] ) {
val = val_upleft + 1;
} else {
val = max( val_up, val_left );
}
map_on( x, y, &mx, &my );
grid( mx, my ) = val;
// same LCS build table code, but with nx and ny mapping.
// x = thread. maybe x/p loop.
}
}
}
*/
/*
Computes a block of the LCS length matrix. Before the call,
all cells immediately above and to the left of the block of interest
must be calculated. that is, the cells above as ([x, x+w], y-1) and
to the left as ([y, y+h], x-1). These are the cells represented by #.
x x+w
+-------+-------+-------+-------+
| * * * | * * * | * * * | |
| * * * | * * # | # # # | |
+-------+-------+-------+-------+ y
| * * * | * * # | 0 0 0 | |
| * * * | * * # | 0 0 0 | |
+-------+-------+-------+-------+ y+h
| | | | |
| | | | |
+-------+-------+-------+-------+
This structure allows for usage by either message passing or shared memory based
formulations.
*/
__global__
void lcs_length_row_cuda( const grid_t* grid, int d, int bw, int bh ) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y;
for(y = d; y < d + bh; y++) {
int i, j;
map_off( x, y, &i, &j );
//printf( "process [%i, %i] => [%i, %i]\n", d, x, j, i );
cell_t* cell = grid_cell( grid, i, j );
//printf("(%i,%i)\n", i, j);
if(grid->sx[i-1] == grid->sy[j-1]) {
cell->len = grid_cell( grid, i-1, j-1 )->len + 1;
cell->dir = upleft;
//printf("ul: [%i, %i] = %c\n", j, i, grid->sx[i-1]);
} else {
cell_t* cell_up = grid_cell( grid, i-1, j );
cell_t* cell_left = grid_cell( grid, i, j-1 );
if(cell_up->len >= cell_left->len) {
cell->len = cell_up->len;
cell->dir = up;
} else {
cell->len = cell_left->len;
cell->dir = left;
}
}
}
}
void lcs_length_cuda( const grid_t* dgrid, int maxb, int maxt ) {
// FIXME
grid_t lgrid;
grid_t* grid = &lgrid;
cudaMemcpyOrDie( grid, dgrid, sizeof(grid_t), cudaMemcpyDeviceToHost, "lcs_length_cuda 1" );
int bw = ceil(grid->w / (double)maxb);
int bh = 10;
int nd = grid->d/bh;
int d;
for(d = 0; d < nd; d ++) {
int sx = max(d - grid->h, 0);
int fx = min(d, grid->w - 1);
int nt = min(bw, maxt);
int nb = min(ceil((fx - sx)/ (double)bw), (double)maxb);
printf("sx=%i, fx=%i, nb=%i, nt=%i, bw=%i, bh=%i\n", sx, fx, nb, nt, bw, bh);
//lcs_length_row_cuda<<<nb, nt>>>( dgrid, d, bw, bh );
}
}
__global__
void lcs_backtrack_cuda(const grid_t* grid, int i, int j, const int maxlen, char* res) {
int pos = 0;
while(i > 0 && j > 0) {
cell_t* cell = grid_cell(grid, i, j);
if( cell->dir == upleft ) {
//printf(":%c @ %i/%i\n", grid->s1[i-1], i, j);
res[pos++] = grid->sx[i-1];
i--; j--;
} else if( cell->dir == up ) {
i--;
} else if( cell->dir == left) {
j--;
} else {
//printf("\n---WTF BAD CELL!--\n");
}
}
res[pos] = 0;
}
void lcs_cuda(const char* s1, const int m, const char* s2, const int n, const int maxlen, char* res) {
// create grid on device
grid_t* grid;
cudaMallocOrDie( (void**)&grid, sizeof(grid_t), "lcs_cuda 1");
grid_init( grid, s1, m, s2, n );
// run length algorithm (makes kernel calls)
lcs_length_cuda( grid, 20, 32 );
// create a string on the device for result, and calculate result
char* dres;
cudaMallocOrDie( (void**)&dres, maxlen*sizeof(char), "lcs_cuda 2");
// FIXME use grid h/w
//lcs_backtrack_cuda<<<1, 1>>>(grid, m, n, maxlen, dres);
// copy result back, and reverse it (calculated backwards).
cudaMemcpyOrDie( res, dres, maxlen, cudaMemcpyDeviceToHost, "lcs_cuda 3" );
strrev(res);
}
int main() {
char* s1;
char* s2;
int m, n;
s1="MZJAWXUMZJAWXUMZJAWXUMZJAWXU";
s2="XMJYAUZXMJYAUZXMJYAUZXMJYAUZ";
m = strlen(s1);
n = strlen(s2);
read_string("small_test1.txt", &s1, &m);
read_string("small_test2.txt", &s2, &n);
// allocate a result string on the device.
int reslen;
char* res;
reslen = max(m, n) + 1;
res = (char*)malloc( reslen*sizeof(char) );
double start = get_timeofday();
lcs_cuda( s1, m, s2, n, reslen, res );
double end = get_timeofday();
printf("Time Taken: %f sec %i %s\n", end-start, strlen(res), res);
}
/*
int main() {
char* s1;
char* s2;
int m, n;
//s1="MZJAWXU";
//s2="XMJYAUZ";
//m = strlen(s1);
//n = strlen(s2);
read_string("large_test1.txt", &s1, &m);
read_string("large_test2.txt", &s2, &n);
// copy strings to device
char* ds1;
char* ds2;
cudaMalloc( &ds1, m );
cudaMalloc( &ds2, n );
cudaMemcpy( ds1, s1, m, cudaMemcpyHostToDevice );
cudaMemcpy( ds2, s2, n, cudaMemcpyHostToDevice );
// allocate space for the grid on the device
int d = m + n - 1;
int mh = d, mw = m;
size_t size = (mh + 1) * (mw + 1) * sizeof(cell_t);
cell_t* cells;
cudaMalloc( &cells, size );
cudaMemset( cells, 0, size );
// allocate a result string on the device.
int reslen;
char* res;
reslen = max(m, n) + 1;
cudaMalloc(&res, reslen*sizeof(char));
double start = get_timeofday();
lcs_cuda( ds1, m, ds2, n, reslen, res );
double end = get_timeofday();
char* rres = (char*)malloc(reslen);
cudaMemcpy( rres, res, reslen, cudaMemcpyDeviceToHost );
printf("Time Taken: %f sec %i %s\n", end-start, strlen(rres), rres);
}
*/
|
51a056e462dceb33efcf9fd967269633be96e33c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <df/prediction/raycast.h>
#include <assert.h>
#include <df/util/cudaHelpers.h>
namespace df {
template <typename Scalar, uint D>
__device__ inline Eigen::Matrix<Scalar,D,1> componentwiseMin(const Eigen::Matrix<Scalar,D,1> & a,
const Eigen::Matrix<Scalar,D,1> & b) {
const Eigen::Map<const Eigen::Array<Scalar,D,1> > aMap(a.data());
const Eigen::Map<const Eigen::Array<Scalar,D,1> > bMap(b.data());
return aMap.min(bMap);
}
template <typename Scalar, uint D>
__device__ inline Eigen::Matrix<Scalar,D,1> componentwiseMax(const Eigen::Matrix<Scalar,D,1> & a,
const Eigen::Matrix<Scalar,D,1> & b) {
const Eigen::Map<const Eigen::Array<Scalar,D,1> > aMap(a.data());
const Eigen::Map<const Eigen::Array<Scalar,D,1> > bMap(b.data());
return aMap.max(bMap);
}
template <typename Scalar>
__device__ inline Eigen::Matrix<Scalar,2,1> boxIntersections(const Eigen::Matrix<Scalar,3,1> & rayOrigin,
const Eigen::Matrix<Scalar,3,1> & rayDirection,
const Eigen::Matrix<Scalar,3,1> & boxMin,
const Eigen::Matrix<Scalar,3,1> & boxMax) {
typedef Eigen::Matrix<Scalar,2,1> Vec2;
typedef Eigen::Matrix<Scalar,3,1> Vec3;
const Vec3 inverseRayDirection = rayDirection.unaryExpr([](const Scalar val){ return Scalar(1) / val; });
const Vec3 boxMinIntersections = inverseRayDirection.cwiseProduct(boxMin - rayOrigin);
const Vec3 boxMaxIntersections = inverseRayDirection.cwiseProduct(boxMax - rayOrigin);
const Vec3 minTimeIntersections = componentwiseMin<Scalar,3>(boxMinIntersections,boxMaxIntersections);
const Vec3 maxTimeIntersections = componentwiseMax<Scalar,3>(boxMinIntersections,boxMaxIntersections);
const Scalar maximalEntranceTime = minTimeIntersections.maxCoeff();
const Scalar minimalExitTime = maxTimeIntersections.minCoeff();
return Vec2(maximalEntranceTime,minimalExitTime);
}
template <typename Scalar,
typename VoxelT,
typename CameraModelT>
__global__ void raycastKernel(Tensor<3,Scalar,DeviceResident> predictedVertices,
Tensor<3,Scalar,DeviceResident> predictedNormals,
const VoxelGrid<Scalar,VoxelT,DeviceResident> voxelGrid, // TODO: if we dont end up using scale/offset, we can just pass in the Tensor portion
const CameraModelT cameraModel,
const Sophus::SE3Group<Scalar> transformWorldToPrediction, // TODO: faster to pass in both or do in-kernel?
const Sophus::SE3Group<Scalar> transformPredictionToWorld) {
typedef Eigen::Matrix<Scalar,2,1> Vec2;
typedef Eigen::Matrix<Scalar,3,1> Vec3;
const uint x = threadIdx.x + blockDim.x * blockIdx.x;
const uint y = threadIdx.y + blockDim.y * blockIdx.y;
if ( (x < predictedVertices.dimensionSize(1)) && (y < predictedVertices.dimensionSize(2)) ) {
const Vec2 pixel(x,y);
const Vec3 predictionRayDirection = cameraModel.unproject(pixel,Scalar(1)).normalized();
// TODO: can we change the frame some of the computation is done in to eliminate the need for both transforms?
const Vec3 worldRayOrigin = transformPredictionToWorld.translation();
const Vec3 worldRayDirection = transformPredictionToWorld.rotationMatrix()*predictionRayDirection;
const Vec3 gridRayOrigin = voxelGrid.worldToGrid(worldRayOrigin);
const Vec3 & gridRayDirection = worldRayDirection;
const Vec3 volumeMin = Vec3(0,0,0);
const Vec3 volumeMax = voxelGrid.dimensions().template cast<Scalar>() - Vec3(1,1,1); //voxelGrid.max();
const Vec2 volumeEntranceExit = boxIntersections(gridRayOrigin,gridRayDirection,
volumeMin,volumeMax);
Eigen::Map<Vec3> predictedVertex(&predictedVertices(0,x,y));
Eigen::Map<Vec3> predictedNormal(&predictedNormals(0,x,y));
if (volumeEntranceExit(0) > volumeEntranceExit(1)) {
// the ray does not enter the volume at any time
predictedVertex(2) = 0;
return;
}
Scalar currentT = max(Scalar(0),volumeEntranceExit(0));
while (currentT < volumeEntranceExit(1)) {
const Vec3 currentPoint = gridRayOrigin + currentT * gridRayDirection;
// const VoxelT interpolatedVoxel = voxelGrid.grid().interpolate(currentPoint(0),currentPoint(1),currentPoint(2));
}
predictedVertex = worldRayOrigin + volumeEntranceExit(1) * worldRayDirection;
predictedNormal = Vec3(0,0,-1);
}
}
template <typename Scalar,
typename VoxelT,
typename CameraModelT>
void raycast(Tensor<3,Scalar,DeviceResident> & predictedVertices,
Tensor<3,Scalar,DeviceResident> & predictedNormals,
const VoxelGrid<Scalar,VoxelT,DeviceResident> & voxelGrid,
const CameraModelT & cameraModel,
const Sophus::SE3Group<Scalar> & transformWorldToPrediction) {
const uint width = predictedVertices.dimensionSize(1);
const uint height = predictedVertices.dimensionSize(2);
assert(predictedVertices.dimensionSize(0) == 3);
assert(width == predictedNormals.dimensionSize(1));
assert(height == predictedNormals.dimensionSize(2));
assert(predictedNormals.dimensionSize(0) == 3);
const dim3 block(32,16,1);
const dim3 grid(intDivideAndCeil(width,block.x),intDivideAndCeil(height,block.y),1);
hipLaunchKernelGGL(( raycastKernel), dim3(grid),dim3(block), 0, 0, predictedVertices,predictedNormals,
voxelGrid,cameraModel,
transformWorldToPrediction,
transformWorldToPrediction.inverse());
}
} // namespace df
#include <df/camera/poly3.h>
#include <df/voxel/tsdf.h>
namespace df {
template void raycast(Tensor<3,float,DeviceResident> &,
Tensor<3,float,DeviceResident> &,
const VoxelGrid<float,TsdfVoxel,DeviceResident> &,
const Poly3CameraModel<float> &,
const Sophus::SE3f &);
} // namespace df
| 51a056e462dceb33efcf9fd967269633be96e33c.cu | #include <df/prediction/raycast.h>
#include <assert.h>
#include <df/util/cudaHelpers.h>
namespace df {
template <typename Scalar, uint D>
__device__ inline Eigen::Matrix<Scalar,D,1> componentwiseMin(const Eigen::Matrix<Scalar,D,1> & a,
const Eigen::Matrix<Scalar,D,1> & b) {
const Eigen::Map<const Eigen::Array<Scalar,D,1> > aMap(a.data());
const Eigen::Map<const Eigen::Array<Scalar,D,1> > bMap(b.data());
return aMap.min(bMap);
}
template <typename Scalar, uint D>
__device__ inline Eigen::Matrix<Scalar,D,1> componentwiseMax(const Eigen::Matrix<Scalar,D,1> & a,
const Eigen::Matrix<Scalar,D,1> & b) {
const Eigen::Map<const Eigen::Array<Scalar,D,1> > aMap(a.data());
const Eigen::Map<const Eigen::Array<Scalar,D,1> > bMap(b.data());
return aMap.max(bMap);
}
template <typename Scalar>
__device__ inline Eigen::Matrix<Scalar,2,1> boxIntersections(const Eigen::Matrix<Scalar,3,1> & rayOrigin,
const Eigen::Matrix<Scalar,3,1> & rayDirection,
const Eigen::Matrix<Scalar,3,1> & boxMin,
const Eigen::Matrix<Scalar,3,1> & boxMax) {
typedef Eigen::Matrix<Scalar,2,1> Vec2;
typedef Eigen::Matrix<Scalar,3,1> Vec3;
const Vec3 inverseRayDirection = rayDirection.unaryExpr([](const Scalar val){ return Scalar(1) / val; });
const Vec3 boxMinIntersections = inverseRayDirection.cwiseProduct(boxMin - rayOrigin);
const Vec3 boxMaxIntersections = inverseRayDirection.cwiseProduct(boxMax - rayOrigin);
const Vec3 minTimeIntersections = componentwiseMin<Scalar,3>(boxMinIntersections,boxMaxIntersections);
const Vec3 maxTimeIntersections = componentwiseMax<Scalar,3>(boxMinIntersections,boxMaxIntersections);
const Scalar maximalEntranceTime = minTimeIntersections.maxCoeff();
const Scalar minimalExitTime = maxTimeIntersections.minCoeff();
return Vec2(maximalEntranceTime,minimalExitTime);
}
template <typename Scalar,
typename VoxelT,
typename CameraModelT>
__global__ void raycastKernel(Tensor<3,Scalar,DeviceResident> predictedVertices,
Tensor<3,Scalar,DeviceResident> predictedNormals,
const VoxelGrid<Scalar,VoxelT,DeviceResident> voxelGrid, // TODO: if we dont end up using scale/offset, we can just pass in the Tensor portion
const CameraModelT cameraModel,
const Sophus::SE3Group<Scalar> transformWorldToPrediction, // TODO: faster to pass in both or do in-kernel?
const Sophus::SE3Group<Scalar> transformPredictionToWorld) {
typedef Eigen::Matrix<Scalar,2,1> Vec2;
typedef Eigen::Matrix<Scalar,3,1> Vec3;
const uint x = threadIdx.x + blockDim.x * blockIdx.x;
const uint y = threadIdx.y + blockDim.y * blockIdx.y;
if ( (x < predictedVertices.dimensionSize(1)) && (y < predictedVertices.dimensionSize(2)) ) {
const Vec2 pixel(x,y);
const Vec3 predictionRayDirection = cameraModel.unproject(pixel,Scalar(1)).normalized();
// TODO: can we change the frame some of the computation is done in to eliminate the need for both transforms?
const Vec3 worldRayOrigin = transformPredictionToWorld.translation();
const Vec3 worldRayDirection = transformPredictionToWorld.rotationMatrix()*predictionRayDirection;
const Vec3 gridRayOrigin = voxelGrid.worldToGrid(worldRayOrigin);
const Vec3 & gridRayDirection = worldRayDirection;
const Vec3 volumeMin = Vec3(0,0,0);
const Vec3 volumeMax = voxelGrid.dimensions().template cast<Scalar>() - Vec3(1,1,1); //voxelGrid.max();
const Vec2 volumeEntranceExit = boxIntersections(gridRayOrigin,gridRayDirection,
volumeMin,volumeMax);
Eigen::Map<Vec3> predictedVertex(&predictedVertices(0,x,y));
Eigen::Map<Vec3> predictedNormal(&predictedNormals(0,x,y));
if (volumeEntranceExit(0) > volumeEntranceExit(1)) {
// the ray does not enter the volume at any time
predictedVertex(2) = 0;
return;
}
Scalar currentT = max(Scalar(0),volumeEntranceExit(0));
while (currentT < volumeEntranceExit(1)) {
const Vec3 currentPoint = gridRayOrigin + currentT * gridRayDirection;
// const VoxelT interpolatedVoxel = voxelGrid.grid().interpolate(currentPoint(0),currentPoint(1),currentPoint(2));
}
predictedVertex = worldRayOrigin + volumeEntranceExit(1) * worldRayDirection;
predictedNormal = Vec3(0,0,-1);
}
}
template <typename Scalar,
typename VoxelT,
typename CameraModelT>
void raycast(Tensor<3,Scalar,DeviceResident> & predictedVertices,
Tensor<3,Scalar,DeviceResident> & predictedNormals,
const VoxelGrid<Scalar,VoxelT,DeviceResident> & voxelGrid,
const CameraModelT & cameraModel,
const Sophus::SE3Group<Scalar> & transformWorldToPrediction) {
const uint width = predictedVertices.dimensionSize(1);
const uint height = predictedVertices.dimensionSize(2);
assert(predictedVertices.dimensionSize(0) == 3);
assert(width == predictedNormals.dimensionSize(1));
assert(height == predictedNormals.dimensionSize(2));
assert(predictedNormals.dimensionSize(0) == 3);
const dim3 block(32,16,1);
const dim3 grid(intDivideAndCeil(width,block.x),intDivideAndCeil(height,block.y),1);
raycastKernel<<<grid,block>>>(predictedVertices,predictedNormals,
voxelGrid,cameraModel,
transformWorldToPrediction,
transformWorldToPrediction.inverse());
}
} // namespace df
#include <df/camera/poly3.h>
#include <df/voxel/tsdf.h>
namespace df {
template void raycast(Tensor<3,float,DeviceResident> &,
Tensor<3,float,DeviceResident> &,
const VoxelGrid<float,TsdfVoxel,DeviceResident> &,
const Poly3CameraModel<float> &,
const Sophus::SE3f &);
} // namespace df
|
aac909b44802a14199ec72f0fdc617cd3a5fdbd7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "patchmatch2_conv_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *AP = NULL;
hipMalloc(&AP, XSIZE*YSIZE);
float *BP = NULL;
hipMalloc(&BP, XSIZE*YSIZE);
float *conv = NULL;
hipMalloc(&conv, XSIZE*YSIZE);
int *prev_corrAB_upsampled = NULL;
hipMalloc(&prev_corrAB_upsampled, XSIZE*YSIZE);
int patch = 1;
int s_rad = 1;
int c = 2;
int h = YSIZE;
int w = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
patchmatch2_conv_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,AP,BP,conv,prev_corrAB_upsampled,patch,s_rad,c,h,w);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
patchmatch2_conv_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,AP,BP,conv,prev_corrAB_upsampled,patch,s_rad,c,h,w);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
patchmatch2_conv_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,AP,BP,conv,prev_corrAB_upsampled,patch,s_rad,c,h,w);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | aac909b44802a14199ec72f0fdc617cd3a5fdbd7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "patchmatch2_conv_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *AP = NULL;
cudaMalloc(&AP, XSIZE*YSIZE);
float *BP = NULL;
cudaMalloc(&BP, XSIZE*YSIZE);
float *conv = NULL;
cudaMalloc(&conv, XSIZE*YSIZE);
int *prev_corrAB_upsampled = NULL;
cudaMalloc(&prev_corrAB_upsampled, XSIZE*YSIZE);
int patch = 1;
int s_rad = 1;
int c = 2;
int h = YSIZE;
int w = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
patchmatch2_conv_kernel<<<gridBlock,threadBlock>>>(A,B,AP,BP,conv,prev_corrAB_upsampled,patch,s_rad,c,h,w);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
patchmatch2_conv_kernel<<<gridBlock,threadBlock>>>(A,B,AP,BP,conv,prev_corrAB_upsampled,patch,s_rad,c,h,w);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
patchmatch2_conv_kernel<<<gridBlock,threadBlock>>>(A,B,AP,BP,conv,prev_corrAB_upsampled,patch,s_rad,c,h,w);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
93929b83d63f6218237d3caece5cb758c1a0e763.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define cfd_NBLOCKS 16*6*2
//#define cfd_SUPER_BLOCKS_PER_SM 5
#define cfd_BLOCK_SIZE 256
//const int cfd_BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = cfd_BLOCK_SIZE * cfd_NBLOCKS;
const int cfd_maxNeighbors = 8;
texture<float,1,hipReadModeElementType> tex_my;
texture<float,1,hipReadModeElementType> tex_mz;
texture<float,1,hipReadModeElementType> tex_energy;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<cfd_NBLOCKS*cfd_BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr,const int* __restrict__ elements_surrounding_elements, const float* __restrict__
normals, float* density, const float* __restrict__ mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = tex1Dfetch(tex_my,i);//my[i];
momentum_i.z = tex1Dfetch(tex_mz,i); mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = tex1Dfetch(tex_energy,i);//density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = tex1Dfetch(tex_my,nb);//my[nb];
momentum_nb.z = tex1Dfetch(tex_mz,nb);//mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = tex1Dfetch(tex_energy,nb);//density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
hipSetDevice(2);
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
hipHostMalloc((void**)&density, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mx, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&my, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mz, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&density_energy, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipHostMalloc((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
hipHostMalloc((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
hipMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
hipMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//hipMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
hipMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, cfd_BLOCK_SIZE);
hipMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), hipMemcpyHostToDevice);
// Copy data to GPU
hipMemcpy(d_density, density, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_my, my, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), hipMemcpyHostToDevice);
hipSetDeviceFlags(hipDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
hipHostMalloc((void**)&flag_cfd,sizeof( int),hipHostMallocMapped);
hipHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
hipBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float));
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
int cfd_gridSize = (cfd_nAtom-1+cfd_BLOCK_SIZE) / cfd_BLOCK_SIZE;
hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(cfd_BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
hipMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), hipMemcpyDeviceToHost);
check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
| 93929b83d63f6218237d3caece5cb758c1a0e763.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define cfd_NBLOCKS 16*6*2
//#define cfd_SUPER_BLOCKS_PER_SM 5
#define cfd_BLOCK_SIZE 256
//const int cfd_BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = cfd_BLOCK_SIZE * cfd_NBLOCKS;
const int cfd_maxNeighbors = 8;
texture<float,1,cudaReadModeElementType> tex_my;
texture<float,1,cudaReadModeElementType> tex_mz;
texture<float,1,cudaReadModeElementType> tex_energy;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<cfd_NBLOCKS*cfd_BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr,const int* __restrict__ elements_surrounding_elements, const float* __restrict__
normals, float* density, const float* __restrict__ mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = tex1Dfetch(tex_my,i);//my[i];
momentum_i.z = tex1Dfetch(tex_mz,i); mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = tex1Dfetch(tex_energy,i);//density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = tex1Dfetch(tex_my,nb);//my[nb];
momentum_nb.z = tex1Dfetch(tex_mz,nb);//mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = tex1Dfetch(tex_energy,nb);//density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
cudaSetDevice(2);
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
cudaMallocHost((void**)&density, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mx, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&my, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mz, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&density_energy, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMallocHost((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMallocHost((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
cudaMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//cudaMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
cudaMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, cfd_BLOCK_SIZE);
cudaMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), cudaMemcpyHostToDevice);
// Copy data to GPU
cudaMemcpy(d_density, density, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_my, my, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), cudaMemcpyHostToDevice);
cudaSetDeviceFlags(cudaDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
cudaHostAlloc((void**)&flag_cfd,sizeof( int),cudaHostAllocMapped);
cudaHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
cudaBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float));
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
int cfd_gridSize = (cfd_nAtom-1+cfd_BLOCK_SIZE) / cfd_BLOCK_SIZE;
cfd_kernel<<<cfd_gridSize, cfd_BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
cudaMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), cudaMemcpyDeviceToHost);
check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
|
c46ab6f6e4e76349ecd21a849ce4d6cd4f42958c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "custring_view.cuh"
#include "custring.cuh"
#include "NVText.h"
//static void printCudaError( hipError_t err, const char* prefix="\t" )
//{
// if( err != hipSuccess )
// fprintf(stderr,"%s: %s(%d):%s\n",prefix,hipGetErrorName(err),(int)err,hipGetErrorString(err));
//}
// return unique set of tokens within all the strings using the specified delimiter
NVStrings* NVText::unique_tokens(NVStrings& strs, const char* delimiter )
{
int bytes = (int)strlen(delimiter);
char* d_delimiter = nullptr;
auto execpol = rmm::exec_policy(0);
RMM_ALLOC(&d_delimiter,bytes,0);
hipMemcpy(d_delimiter,delimiter,bytes,hipMemcpyHostToDevice);
// need to count how many output strings per string
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
rmm::device_vector<int> counts(count,0);
int* d_counts = counts.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_delimiter, bytes, d_counts] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_counts[idx] = dstr->split_size(d_delimiter,bytes,0,-1);
});
int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() );
// build an index for each column and then sort/unique it
rmm::device_vector< thrust::pair<const char*,size_t> > vocab;
for( int col=0; col < columnsCount; ++col )
{
// first, build a vector of pair<char*,int>'s' for each column
// each pair points to a string for this column for each row
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, col, d_delimiter, bytes, d_counts, d_indexes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
d_indexes[idx].first = nullptr; // initialize to
d_indexes[idx].second = 0; // null string
if( !dstr )
return;
// dcount already accounts for the maxsplit value
int dcount = d_counts[idx];
if( col >= dcount )
return; // passed the end for this string
// skip delimiters until we reach this column
int spos = 0, nchars = dstr->chars_count();
int epos = nchars;
for( int c=0; c < (dcount-1); ++c )
{
epos = dstr->find(d_delimiter,bytes,spos);
if( epos < 0 )
{
epos = nchars;
break;
}
if( c==col ) // found our column
break;
spos = epos + bytes;
epos = nchars;
}
// this will be the string for this column
if( spos < epos )
{
spos = dstr->byte_offset_for(spos); // convert char pos
epos = dstr->byte_offset_for(epos); // to byte offset
d_indexes[idx].first = dstr->data() + spos;
d_indexes[idx].second = (epos-spos);
}
});
//hipError_t err = hipDeviceSynchronize();
//if( err != hipSuccess )
//{
// fprintf(stderr,"unique_tokens:col=%d\n",col);
// printCudaError(err);
//}
// add column values to vocab list
vocab.insert(vocab.end(),indexes.begin(),indexes.end());
//printf("vocab size = %lu\n",vocab.size());
thrust::pair<const char*,size_t>* d_vocab = vocab.data().get();
// sort the list
thrust::sort(execpol->on(0), d_vocab, d_vocab + vocab.size(),
[] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) {
if( lhs.first==0 || rhs.first==0 )
return lhs.first==0; // non-null > null
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second) < 0;
});
// unique the list
thrust::pair<const char*,size_t>* newend = thrust::unique(execpol->on(0), d_vocab, d_vocab + vocab.size(),
[] __device__ ( thrust::pair<const char*,size_t> lhs, thrust::pair<const char*,size_t> rhs ) {
if( lhs.first==rhs.first )
return true;
if( lhs.second != rhs.second )
return false;
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second)==0;
});
// truncate list to the unique set
// the above unique() call does an implicit dev-sync
vocab.resize((size_t)(newend - d_vocab));
}
// remove the inevitable 'null' token
thrust::pair<const char*,size_t>* d_vocab = vocab.data().get();
auto end = thrust::remove_if(execpol->on(0), d_vocab, d_vocab + vocab.size(), [] __device__ ( thrust::pair<const char*,size_t> w ) { return w.first==0; } );
unsigned int vsize = (unsigned int)(end - d_vocab); // may need new size
// done
RMM_FREE(d_delimiter,0);
// build strings object from vocab elements
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_vocab,vsize);
}
// return a count of the number of tokens for each string when applying the specified delimiter
unsigned int NVText::token_count( NVStrings& strs, const char* delimiter, unsigned int* results, bool bdevmem )
{
int bytes = (int)strlen(delimiter);
char* d_delimiter = nullptr;
auto execpol = rmm::exec_policy(0);
RMM_ALLOC(&d_delimiter,bytes,0);
hipMemcpy(d_delimiter,delimiter,bytes,hipMemcpyHostToDevice);
unsigned int count = strs.size();
unsigned int* d_counts = results;
if( !bdevmem )
RMM_ALLOC(&d_counts,count*sizeof(unsigned int),0);
// count how many strings per string
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_delimiter, bytes, d_counts] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
int tc = 0;
if( dstr )
tc = dstr->empty() ? 0 : dstr->split_size(d_delimiter,bytes,0,-1);
d_counts[idx] = tc;
});
//
if( !bdevmem )
{
hipMemcpy(results,d_counts,count*sizeof(unsigned int),hipMemcpyDeviceToHost);
RMM_FREE(d_counts,0);
}
RMM_FREE(d_delimiter,0);
return 0;
}
// return boolean value for each token if found in the provided strings
unsigned int NVText::contains_strings( NVStrings& strs, NVStrings& tkns, bool* results, bool todevice )
{
unsigned int count = strs.size();
unsigned int tcount = tkns.size();
if( results==0 || count==0 || tcount==0 )
return 0;
//
auto execpol = rmm::exec_policy(0);
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,tcount*count*sizeof(bool),0);
//
rmm::device_vector<custring_view*> strings(count,nullptr);
rmm::device_vector<custring_view*> tokens(tcount,nullptr);
custring_view** d_strings = strings.data().get();
custring_view** d_tokens = tokens.data().get();
strs.create_custring_index(d_strings);
tkns.create_custring_index(d_tokens);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tokens, tcount, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
for( int jdx=0; jdx < tcount; ++jdx )
{
custring_view* dtgt = d_tokens[jdx];
d_rtn[(idx*tcount)+jdx] = ((dstr && dtgt) ? dstr->find(*dtgt) : -2) >=0 ;
}
});
//
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(bool)*count*tcount,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
// return the number of occurrences of each string within a set of strings
// this will fill in the provided memory as a matrix:
// 'aa' 'bbb' 'c' ...
// "aaaabc" 2 0 1
// "aabbcc" 1 0 2
// "abbbbc" 0 1 1
// ...
unsigned int NVText::strings_counts( NVStrings& strs, NVStrings& tkns, unsigned int* results, bool todevice )
{
unsigned int count = strs.size();
unsigned int tcount = tkns.size();
if( results==0 || count==0 || tcount==0 )
return 0;
//
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,tcount*count*sizeof(unsigned int),0);
//
rmm::device_vector<custring_view*> strings(count,nullptr);
rmm::device_vector<custring_view*> tokens(tcount,nullptr);
custring_view** d_strings = strings.data().get();
custring_view** d_tokens = tokens.data().get();
strs.create_custring_index(d_strings);
tkns.create_custring_index(d_tokens);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tokens, tcount, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
for( int jdx=0; jdx < tcount; ++jdx )
{
custring_view* dtgt = d_tokens[jdx];
int fnd = 0;
if( dstr && dtgt )
{
int pos = dstr->find(*dtgt);
while( pos >= 0 )
{
pos = dstr->find(*dtgt,pos+dtgt->chars_count());
++fnd;
}
}
d_rtn[(idx*tcount)+jdx] = fnd;
}
});
//
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(unsigned int)*count*tcount,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
// return the number of occurrences of each string within a set of strings
// this will fill in the provided memory as a matrix:
// 'aa' 'bbb' 'c' ...
// "aa aa b c" 2 0 1
// "aa bb c c" 1 0 2
// "a bbb ccc" 0 1 0
// ...
unsigned int NVText::tokens_counts( NVStrings& strs, NVStrings& tkns, const char* delimiter, unsigned int* results, bool todevice )
{
unsigned int count = strs.size();
unsigned int tcount = tkns.size();
if( results==0 || count==0 || tcount==0 )
return 0;
//
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,tcount*count*sizeof(unsigned int),0);
int dellen = (int)strlen(delimiter);
char* d_delimiter = nullptr;
RMM_ALLOC(&d_delimiter,dellen,0);
hipMemcpy(d_delimiter,delimiter,dellen,hipMemcpyHostToDevice);
//
rmm::device_vector<custring_view*> strings(count,nullptr);
rmm::device_vector<custring_view*> tokens(tcount,nullptr);
custring_view** d_strings = strings.data().get();
custring_view** d_tokens = tokens.data().get();
strs.create_custring_index(d_strings);
tkns.create_custring_index(d_tokens);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tokens, tcount, d_delimiter, dellen, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
for( int jdx=0; jdx < tcount; ++jdx )
{
custring_view* dtgt = d_tokens[jdx];
int fnd = 0;
if( dstr && dtgt )
{
int pos = dstr->find(*dtgt);
while( pos >= 0 )
{
int epos = pos + dtgt->chars_count();
if( ((pos==0) || (dstr->find(d_delimiter,dellen,pos-1)==(pos-1))) &&
((epos>=dstr->chars_count()) || (dstr->find(d_delimiter,dellen,epos)==epos)) )
++fnd;
pos = dstr->find(*dtgt,pos+dtgt->chars_count());
}
}
d_rtn[(idx*tcount)+jdx] = fnd;
}
});
//
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(unsigned int)*count*tcount,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
// Documentation here: https://www.cuelogic.com/blog/the-levenshtein-algorithm
// And here: https://en.wikipedia.org/wiki/Levenshtein_distances
struct editdistance_levenshtein_algorithm
{
custring_view** d_strings; // trying match
custring_view* d_tgt; // match with this
custring_view** d_tgts; // or these
short* d_buffer; // compute buffer
size_t* d_offsets; // locate sub-buffer
unsigned int* d_results; // edit-distances
// single string
editdistance_levenshtein_algorithm( custring_view** strings, custring_view* tgt, short* buffer, size_t* offsets, unsigned int* results )
: d_strings(strings), d_tgt(tgt), d_tgts(0), d_buffer(buffer), d_offsets(offsets), d_results(results) {}
// multiple strings
editdistance_levenshtein_algorithm( custring_view** strings, custring_view** tgts, short* buffer, size_t* offsets, unsigned int* results )
: d_strings(strings), d_tgt(0), d_tgts(tgts), d_buffer(buffer), d_offsets(offsets), d_results(results) {}
__device__ void operator() (unsigned int idx)
{
custring_view* dstr = d_strings[idx];
short* buf = (short*)d_buffer + d_offsets[idx];
custring_view* dtgt = d_tgt;
if( !d_tgt )
dtgt = d_tgts[idx];
d_results[idx] = compute_distance(dstr,dtgt,buf);
}
__device__ unsigned int compute_distance( custring_view* dstr, custring_view* dtgt, short* buf )
{
if( !dstr || dstr->empty() )
return dtgt ? dtgt->chars_count() : 0;
if( !dtgt || dtgt->empty() )
return dstr->chars_count();
//
custring_view* strA = dstr;
custring_view* strB = dtgt;
int lenA = (int)dstr->chars_count();
int lenB = (int)dtgt->chars_count();
if( lenA > lenB )
{
lenB = lenA;
lenA = dtgt->chars_count();
strA = dtgt;
strB = dstr;
}
//
short* line2 = buf;
short* line1 = line2 + lenA;
short* line0 = line1 + lenA;
int range = lenA + lenB - 1;
for (int i = 0; i < range; i++)
{
short* tmp = line2;
line2 = line1;
line1 = line0;
line0 = tmp;
for(int x = (i < lenB ? 0 : i - lenB + 1); (x < lenA) && (x < i+1); x++)
{
int y = i - x;
short u = y > 0 ? line1[x] : x + 1;
short v = x > 0 ? line1[x - 1] : y + 1;
short w;
if((x > 0) && (y > 0))
w = line2[x - 1];
else if(x > y)
w = x;
else
w = y;
u++; v++;
Char c1 = strA->at(x);
Char c2 = strB->at(y);
if(c1 != c2)
w++;
short value = u;
if(v < value)
value = v;
if(w < value)
value = w;
line0[x] = value;
}
}
return (unsigned int)line0[lenA-1];
}
};
unsigned int NVText::edit_distance( distance_type algo, NVStrings& strs, const char* str, unsigned int* results, bool bdevmem )
{
if( algo != levenshtein || str==0 || results==0 )
throw std::invalid_argument("invalid algorithm");
unsigned int count = strs.size();
if( count==0 )
return 0; // nothing to do
auto execpol = rmm::exec_policy(0);
unsigned int len = strlen(str);
unsigned int alcsz = custring_view::alloc_size(str,len);
custring_view* d_tgt = nullptr;
RMM_ALLOC(&d_tgt,alcsz,0);
custring_view::create_from_host(d_tgt,str,len);
// setup results vector
unsigned int* d_rtn = results;
if( !bdevmem )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
// get the string pointers
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// calculate the size of the compute-buffer: 6 * length of string
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tgt, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int len = dstr->chars_count();
if( d_tgt->chars_count() < len )
len = d_tgt->chars_count();
d_sizes[idx] = len * 3;
});
//
size_t bufsize = thrust::reduce(execpol->on(0), d_sizes, d_sizes+count );
rmm::device_vector<short> buffer(bufsize,0);
short* d_buffer = buffer.data().get();
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0), sizes.begin(), sizes.end(), offsets.begin() );
// compute edit distance
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
editdistance_levenshtein_algorithm(d_strings, d_tgt, d_buffer, d_offsets, d_rtn));
//
if( !bdevmem )
{
hipMemcpy(results,d_rtn,count*sizeof(unsigned int),hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
RMM_FREE(d_tgt,0);
return 0;
}
unsigned int NVText::edit_distance( distance_type algo, NVStrings& strs1, NVStrings& strs2, unsigned int* results, bool bdevmem )
{
if( algo != levenshtein )
throw std::invalid_argument("invalid algorithm");
unsigned int count = strs1.size();
if( count != strs2.size() )
throw std::invalid_argument("sizes must match");
if( count==0 )
return 0; // nothing to do
// setup results vector
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !bdevmem )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
// get the string pointers
rmm::device_vector<custring_view*> strings1(count,nullptr);
custring_view** d_strings1 = strings1.data().get();
strs1.create_custring_index(d_strings1);
rmm::device_vector<custring_view*> strings2(count,nullptr);
custring_view** d_strings2 = strings2.data().get();
strs2.create_custring_index(d_strings2);
// calculate the size of the compute-buffer: 6 * length of string
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings1, d_strings2, d_sizes] __device__(unsigned int idx){
custring_view* dstr1 = d_strings1[idx];
custring_view* dstr2 = d_strings2[idx];
if( !dstr1 || !dstr2 )
return;
int len1 = dstr1->chars_count();
int len = dstr2->chars_count();
if( len1 < len )
len = len1;
d_sizes[idx] = len * 3;
});
//
size_t bufsize = thrust::reduce(execpol->on(0), d_sizes, d_sizes+count );
rmm::device_vector<short> buffer(bufsize,0);
short* d_buffer = buffer.data().get();
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0), sizes.begin(), sizes.end(), offsets.begin() );
// compute edit distance
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
editdistance_levenshtein_algorithm(d_strings1, d_strings2, d_buffer, d_offsets, d_rtn));
//
if( !bdevmem )
{
hipMemcpy(results,d_rtn,count*sizeof(unsigned int),hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
| c46ab6f6e4e76349ecd21a849ce4d6cd4f42958c.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "custring_view.cuh"
#include "custring.cuh"
#include "NVText.h"
//static void printCudaError( cudaError_t err, const char* prefix="\t" )
//{
// if( err != cudaSuccess )
// fprintf(stderr,"%s: %s(%d):%s\n",prefix,cudaGetErrorName(err),(int)err,cudaGetErrorString(err));
//}
// return unique set of tokens within all the strings using the specified delimiter
NVStrings* NVText::unique_tokens(NVStrings& strs, const char* delimiter )
{
int bytes = (int)strlen(delimiter);
char* d_delimiter = nullptr;
auto execpol = rmm::exec_policy(0);
RMM_ALLOC(&d_delimiter,bytes,0);
cudaMemcpy(d_delimiter,delimiter,bytes,cudaMemcpyHostToDevice);
// need to count how many output strings per string
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
rmm::device_vector<int> counts(count,0);
int* d_counts = counts.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_delimiter, bytes, d_counts] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_counts[idx] = dstr->split_size(d_delimiter,bytes,0,-1);
});
int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() );
// build an index for each column and then sort/unique it
rmm::device_vector< thrust::pair<const char*,size_t> > vocab;
for( int col=0; col < columnsCount; ++col )
{
// first, build a vector of pair<char*,int>'s' for each column
// each pair points to a string for this column for each row
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, col, d_delimiter, bytes, d_counts, d_indexes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
d_indexes[idx].first = nullptr; // initialize to
d_indexes[idx].second = 0; // null string
if( !dstr )
return;
// dcount already accounts for the maxsplit value
int dcount = d_counts[idx];
if( col >= dcount )
return; // passed the end for this string
// skip delimiters until we reach this column
int spos = 0, nchars = dstr->chars_count();
int epos = nchars;
for( int c=0; c < (dcount-1); ++c )
{
epos = dstr->find(d_delimiter,bytes,spos);
if( epos < 0 )
{
epos = nchars;
break;
}
if( c==col ) // found our column
break;
spos = epos + bytes;
epos = nchars;
}
// this will be the string for this column
if( spos < epos )
{
spos = dstr->byte_offset_for(spos); // convert char pos
epos = dstr->byte_offset_for(epos); // to byte offset
d_indexes[idx].first = dstr->data() + spos;
d_indexes[idx].second = (epos-spos);
}
});
//cudaError_t err = cudaDeviceSynchronize();
//if( err != cudaSuccess )
//{
// fprintf(stderr,"unique_tokens:col=%d\n",col);
// printCudaError(err);
//}
// add column values to vocab list
vocab.insert(vocab.end(),indexes.begin(),indexes.end());
//printf("vocab size = %lu\n",vocab.size());
thrust::pair<const char*,size_t>* d_vocab = vocab.data().get();
// sort the list
thrust::sort(execpol->on(0), d_vocab, d_vocab + vocab.size(),
[] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) {
if( lhs.first==0 || rhs.first==0 )
return lhs.first==0; // non-null > null
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second) < 0;
});
// unique the list
thrust::pair<const char*,size_t>* newend = thrust::unique(execpol->on(0), d_vocab, d_vocab + vocab.size(),
[] __device__ ( thrust::pair<const char*,size_t> lhs, thrust::pair<const char*,size_t> rhs ) {
if( lhs.first==rhs.first )
return true;
if( lhs.second != rhs.second )
return false;
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second)==0;
});
// truncate list to the unique set
// the above unique() call does an implicit dev-sync
vocab.resize((size_t)(newend - d_vocab));
}
// remove the inevitable 'null' token
thrust::pair<const char*,size_t>* d_vocab = vocab.data().get();
auto end = thrust::remove_if(execpol->on(0), d_vocab, d_vocab + vocab.size(), [] __device__ ( thrust::pair<const char*,size_t> w ) { return w.first==0; } );
unsigned int vsize = (unsigned int)(end - d_vocab); // may need new size
// done
RMM_FREE(d_delimiter,0);
// build strings object from vocab elements
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_vocab,vsize);
}
// return a count of the number of tokens for each string when applying the specified delimiter
unsigned int NVText::token_count( NVStrings& strs, const char* delimiter, unsigned int* results, bool bdevmem )
{
int bytes = (int)strlen(delimiter);
char* d_delimiter = nullptr;
auto execpol = rmm::exec_policy(0);
RMM_ALLOC(&d_delimiter,bytes,0);
cudaMemcpy(d_delimiter,delimiter,bytes,cudaMemcpyHostToDevice);
unsigned int count = strs.size();
unsigned int* d_counts = results;
if( !bdevmem )
RMM_ALLOC(&d_counts,count*sizeof(unsigned int),0);
// count how many strings per string
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_delimiter, bytes, d_counts] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
int tc = 0;
if( dstr )
tc = dstr->empty() ? 0 : dstr->split_size(d_delimiter,bytes,0,-1);
d_counts[idx] = tc;
});
//
if( !bdevmem )
{
cudaMemcpy(results,d_counts,count*sizeof(unsigned int),cudaMemcpyDeviceToHost);
RMM_FREE(d_counts,0);
}
RMM_FREE(d_delimiter,0);
return 0;
}
// return boolean value for each token if found in the provided strings
unsigned int NVText::contains_strings( NVStrings& strs, NVStrings& tkns, bool* results, bool todevice )
{
unsigned int count = strs.size();
unsigned int tcount = tkns.size();
if( results==0 || count==0 || tcount==0 )
return 0;
//
auto execpol = rmm::exec_policy(0);
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,tcount*count*sizeof(bool),0);
//
rmm::device_vector<custring_view*> strings(count,nullptr);
rmm::device_vector<custring_view*> tokens(tcount,nullptr);
custring_view** d_strings = strings.data().get();
custring_view** d_tokens = tokens.data().get();
strs.create_custring_index(d_strings);
tkns.create_custring_index(d_tokens);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tokens, tcount, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
for( int jdx=0; jdx < tcount; ++jdx )
{
custring_view* dtgt = d_tokens[jdx];
d_rtn[(idx*tcount)+jdx] = ((dstr && dtgt) ? dstr->find(*dtgt) : -2) >=0 ;
}
});
//
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(bool)*count*tcount,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
// return the number of occurrences of each string within a set of strings
// this will fill in the provided memory as a matrix:
// 'aa' 'bbb' 'c' ...
// "aaaabc" 2 0 1
// "aabbcc" 1 0 2
// "abbbbc" 0 1 1
// ...
unsigned int NVText::strings_counts( NVStrings& strs, NVStrings& tkns, unsigned int* results, bool todevice )
{
unsigned int count = strs.size();
unsigned int tcount = tkns.size();
if( results==0 || count==0 || tcount==0 )
return 0;
//
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,tcount*count*sizeof(unsigned int),0);
//
rmm::device_vector<custring_view*> strings(count,nullptr);
rmm::device_vector<custring_view*> tokens(tcount,nullptr);
custring_view** d_strings = strings.data().get();
custring_view** d_tokens = tokens.data().get();
strs.create_custring_index(d_strings);
tkns.create_custring_index(d_tokens);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tokens, tcount, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
for( int jdx=0; jdx < tcount; ++jdx )
{
custring_view* dtgt = d_tokens[jdx];
int fnd = 0;
if( dstr && dtgt )
{
int pos = dstr->find(*dtgt);
while( pos >= 0 )
{
pos = dstr->find(*dtgt,pos+dtgt->chars_count());
++fnd;
}
}
d_rtn[(idx*tcount)+jdx] = fnd;
}
});
//
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(unsigned int)*count*tcount,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
// return the number of occurrences of each string within a set of strings
// this will fill in the provided memory as a matrix:
// 'aa' 'bbb' 'c' ...
// "aa aa b c" 2 0 1
// "aa bb c c" 1 0 2
// "a bbb ccc" 0 1 0
// ...
unsigned int NVText::tokens_counts( NVStrings& strs, NVStrings& tkns, const char* delimiter, unsigned int* results, bool todevice )
{
unsigned int count = strs.size();
unsigned int tcount = tkns.size();
if( results==0 || count==0 || tcount==0 )
return 0;
//
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,tcount*count*sizeof(unsigned int),0);
int dellen = (int)strlen(delimiter);
char* d_delimiter = nullptr;
RMM_ALLOC(&d_delimiter,dellen,0);
cudaMemcpy(d_delimiter,delimiter,dellen,cudaMemcpyHostToDevice);
//
rmm::device_vector<custring_view*> strings(count,nullptr);
rmm::device_vector<custring_view*> tokens(tcount,nullptr);
custring_view** d_strings = strings.data().get();
custring_view** d_tokens = tokens.data().get();
strs.create_custring_index(d_strings);
tkns.create_custring_index(d_tokens);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tokens, tcount, d_delimiter, dellen, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
for( int jdx=0; jdx < tcount; ++jdx )
{
custring_view* dtgt = d_tokens[jdx];
int fnd = 0;
if( dstr && dtgt )
{
int pos = dstr->find(*dtgt);
while( pos >= 0 )
{
int epos = pos + dtgt->chars_count();
if( ((pos==0) || (dstr->find(d_delimiter,dellen,pos-1)==(pos-1))) &&
((epos>=dstr->chars_count()) || (dstr->find(d_delimiter,dellen,epos)==epos)) )
++fnd;
pos = dstr->find(*dtgt,pos+dtgt->chars_count());
}
}
d_rtn[(idx*tcount)+jdx] = fnd;
}
});
//
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(unsigned int)*count*tcount,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
// Documentation here: https://www.cuelogic.com/blog/the-levenshtein-algorithm
// And here: https://en.wikipedia.org/wiki/Levenshtein_distances
struct editdistance_levenshtein_algorithm
{
custring_view** d_strings; // trying match
custring_view* d_tgt; // match with this
custring_view** d_tgts; // or these
short* d_buffer; // compute buffer
size_t* d_offsets; // locate sub-buffer
unsigned int* d_results; // edit-distances
// single string
editdistance_levenshtein_algorithm( custring_view** strings, custring_view* tgt, short* buffer, size_t* offsets, unsigned int* results )
: d_strings(strings), d_tgt(tgt), d_tgts(0), d_buffer(buffer), d_offsets(offsets), d_results(results) {}
// multiple strings
editdistance_levenshtein_algorithm( custring_view** strings, custring_view** tgts, short* buffer, size_t* offsets, unsigned int* results )
: d_strings(strings), d_tgt(0), d_tgts(tgts), d_buffer(buffer), d_offsets(offsets), d_results(results) {}
__device__ void operator() (unsigned int idx)
{
custring_view* dstr = d_strings[idx];
short* buf = (short*)d_buffer + d_offsets[idx];
custring_view* dtgt = d_tgt;
if( !d_tgt )
dtgt = d_tgts[idx];
d_results[idx] = compute_distance(dstr,dtgt,buf);
}
__device__ unsigned int compute_distance( custring_view* dstr, custring_view* dtgt, short* buf )
{
if( !dstr || dstr->empty() )
return dtgt ? dtgt->chars_count() : 0;
if( !dtgt || dtgt->empty() )
return dstr->chars_count();
//
custring_view* strA = dstr;
custring_view* strB = dtgt;
int lenA = (int)dstr->chars_count();
int lenB = (int)dtgt->chars_count();
if( lenA > lenB )
{
lenB = lenA;
lenA = dtgt->chars_count();
strA = dtgt;
strB = dstr;
}
//
short* line2 = buf;
short* line1 = line2 + lenA;
short* line0 = line1 + lenA;
int range = lenA + lenB - 1;
for (int i = 0; i < range; i++)
{
short* tmp = line2;
line2 = line1;
line1 = line0;
line0 = tmp;
for(int x = (i < lenB ? 0 : i - lenB + 1); (x < lenA) && (x < i+1); x++)
{
int y = i - x;
short u = y > 0 ? line1[x] : x + 1;
short v = x > 0 ? line1[x - 1] : y + 1;
short w;
if((x > 0) && (y > 0))
w = line2[x - 1];
else if(x > y)
w = x;
else
w = y;
u++; v++;
Char c1 = strA->at(x);
Char c2 = strB->at(y);
if(c1 != c2)
w++;
short value = u;
if(v < value)
value = v;
if(w < value)
value = w;
line0[x] = value;
}
}
return (unsigned int)line0[lenA-1];
}
};
unsigned int NVText::edit_distance( distance_type algo, NVStrings& strs, const char* str, unsigned int* results, bool bdevmem )
{
if( algo != levenshtein || str==0 || results==0 )
throw std::invalid_argument("invalid algorithm");
unsigned int count = strs.size();
if( count==0 )
return 0; // nothing to do
auto execpol = rmm::exec_policy(0);
unsigned int len = strlen(str);
unsigned int alcsz = custring_view::alloc_size(str,len);
custring_view* d_tgt = nullptr;
RMM_ALLOC(&d_tgt,alcsz,0);
custring_view::create_from_host(d_tgt,str,len);
// setup results vector
unsigned int* d_rtn = results;
if( !bdevmem )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
// get the string pointers
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// calculate the size of the compute-buffer: 6 * length of string
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tgt, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int len = dstr->chars_count();
if( d_tgt->chars_count() < len )
len = d_tgt->chars_count();
d_sizes[idx] = len * 3;
});
//
size_t bufsize = thrust::reduce(execpol->on(0), d_sizes, d_sizes+count );
rmm::device_vector<short> buffer(bufsize,0);
short* d_buffer = buffer.data().get();
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0), sizes.begin(), sizes.end(), offsets.begin() );
// compute edit distance
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
editdistance_levenshtein_algorithm(d_strings, d_tgt, d_buffer, d_offsets, d_rtn));
//
if( !bdevmem )
{
cudaMemcpy(results,d_rtn,count*sizeof(unsigned int),cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
RMM_FREE(d_tgt,0);
return 0;
}
unsigned int NVText::edit_distance( distance_type algo, NVStrings& strs1, NVStrings& strs2, unsigned int* results, bool bdevmem )
{
if( algo != levenshtein )
throw std::invalid_argument("invalid algorithm");
unsigned int count = strs1.size();
if( count != strs2.size() )
throw std::invalid_argument("sizes must match");
if( count==0 )
return 0; // nothing to do
// setup results vector
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !bdevmem )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
// get the string pointers
rmm::device_vector<custring_view*> strings1(count,nullptr);
custring_view** d_strings1 = strings1.data().get();
strs1.create_custring_index(d_strings1);
rmm::device_vector<custring_view*> strings2(count,nullptr);
custring_view** d_strings2 = strings2.data().get();
strs2.create_custring_index(d_strings2);
// calculate the size of the compute-buffer: 6 * length of string
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings1, d_strings2, d_sizes] __device__(unsigned int idx){
custring_view* dstr1 = d_strings1[idx];
custring_view* dstr2 = d_strings2[idx];
if( !dstr1 || !dstr2 )
return;
int len1 = dstr1->chars_count();
int len = dstr2->chars_count();
if( len1 < len )
len = len1;
d_sizes[idx] = len * 3;
});
//
size_t bufsize = thrust::reduce(execpol->on(0), d_sizes, d_sizes+count );
rmm::device_vector<short> buffer(bufsize,0);
short* d_buffer = buffer.data().get();
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0), sizes.begin(), sizes.end(), offsets.begin() );
// compute edit distance
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
editdistance_levenshtein_algorithm(d_strings1, d_strings2, d_buffer, d_offsets, d_rtn));
//
if( !bdevmem )
{
cudaMemcpy(results,d_rtn,count*sizeof(unsigned int),cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
|
377f70f510c2b552daa8770a265eafe80ff84426.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<sys/time.h>
#include<cuda.h>
#define N 1024
__global__ void add( int *a, int *b, int *c ) {
if(threadIdx.x<N)
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
__global__ void add1( float *a, float *b, float *c ) {
if(threadIdx.x<N)
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
long getMicroSeconds();
double noofOperations,totalTime,IOPS,GFLOPS,FLOPS,GIOPS,totalTime1,IFLOPS;
int main( void ) {
float a,b,c;
int a1[N], b1[N], c1[N];
float *dev_a,*dev_b,*dev_c;
int *dev_a1, *dev_b1, *dev_c1;
double time=0;
int size=sizeof(int);
int fsize=sizeof(float);
double start,stop,end,start1,end1,time1;
int i;
int threadCount=0;
hipMalloc( (void**)&dev_a, N * sizeof(float) );
hipMalloc( (void**)&dev_b, N * sizeof(float) );
hipMalloc( (void**)&dev_c, N * sizeof(float) );
a=2.3;
b=4.2;
int istart = getMicroSeconds();
hipMemcpy(dev_a, &a,fsize, hipMemcpyHostToDevice);
stop=getMicroSeconds();
time=fsize/(stop-start);
printf("\n Read bandwidth for 1B%f\t\n",time);
hipMemcpy(dev_b, &b,fsize, hipMemcpyHostToDevice);
start = getMicroSeconds();hipLaunchKernelGGL((
add1), dim3(1),dim3(1), 0, 0, dev_a, dev_b, dev_c);
threadCount=1;
noofOperations=1;
stop=getMicroSeconds();
totalTime=fsize/(stop-start);
printf("\n Write Bandwidth of 1B %f\n",totalTime);
FLOPS=noofOperations/totalTime;
printf("\n FLOPS\t%f",FLOPS);
printf("\tThreadCount\t\%d\n",threadCount);
GFLOPS=FLOPS/(pow(10,9));
printf("time taken in gflops: %f\n",GFLOPS);
hipMemcpy( c, dev_c, N * sizeof(float), hipMemcpyDeviceToHost );
printf("c %f \n",c);
hipFree(dev_a); hipFree(dev_b); hipFree(dev_c);
//************1 KB byte************************
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a1, N * sizeof(int) ) ;
hipMalloc( (void**)&dev_b1, N * sizeof(int) ) ;
hipMalloc( (void**)&dev_c1, N * sizeof(int) ) ;
// fill the arrays 'a' and 'b' on the CPU
for (i=0; i<N; i++)
{
a1[i] = -i;
b1[i] = i * i;
}
time=0;
start=getMicroSeconds();
hipMemcpy( dev_a1, a1, N * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( dev_b1, b1, N * sizeof(int), hipMemcpyHostToDevice );
end=getMicroSeconds();
time=(2*size)/(end-start);
printf("\n Read bandwidth for 1KB%f\t\n",time);
start=getMicroSeconds();
hipLaunchKernelGGL(( add), dim3(1),dim3(N), 0, 0, dev_a1, dev_b1, dev_c1 );
stop=getMicroSeconds();
start1=getMicroSeconds();
hipMemcpy( c1, dev_c1, N * sizeof(int), hipMemcpyDeviceToHost );
end1=getMicroSeconds();
time1=(end1-start1);
totalTime1=(2*size)/time1;
printf("\n Write Bandwidthof 1KB %f\n",totalTime1);
time=(stop-start);
threadCount=N;
noofOperations=1;
totalTime=(2*size)/time;
IOPS=noofOperations/totalTime;
printf("\n IOPS\t%f\t",IOPS);
printf("\ThreadCount\t\%d\n",threadCount);
IFLOPS=IOPS/(pow(10,9));
printf("time taken in Iflops: %f\n",IFLOPS);
start1=getMicroSeconds();
hipMemcpy( c1, dev_c1, N * sizeof(int), hipMemcpyDeviceToHost );
end1=getMicroSeconds();
time1=(end1-start1);
totalTime1=(2*size)/time1;
printf("\n Write Bandwidth %f\n",totalTime1);
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a1[i], b1[i], c1[i] );
}
// free the memory allocated on the GPU
hipFree( dev_a1 );
hipFree( dev_b1 );
hipFree( dev_c1 );
return 0;
}
long getMicroSeconds(){
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
| 377f70f510c2b552daa8770a265eafe80ff84426.cu | #include<stdio.h>
#include<sys/time.h>
#include<cuda.h>
#define N 1024
__global__ void add( int *a, int *b, int *c ) {
if(threadIdx.x<N)
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
__global__ void add1( float *a, float *b, float *c ) {
if(threadIdx.x<N)
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
long getMicroSeconds();
double noofOperations,totalTime,IOPS,GFLOPS,FLOPS,GIOPS,totalTime1,IFLOPS;
int main( void ) {
float a,b,c;
int a1[N], b1[N], c1[N];
float *dev_a,*dev_b,*dev_c;
int *dev_a1, *dev_b1, *dev_c1;
double time=0;
int size=sizeof(int);
int fsize=sizeof(float);
double start,stop,end,start1,end1,time1;
int i;
int threadCount=0;
cudaMalloc( (void**)&dev_a, N * sizeof(float) );
cudaMalloc( (void**)&dev_b, N * sizeof(float) );
cudaMalloc( (void**)&dev_c, N * sizeof(float) );
a=2.3;
b=4.2;
int istart = getMicroSeconds();
cudaMemcpy(dev_a, &a,fsize, cudaMemcpyHostToDevice);
stop=getMicroSeconds();
time=fsize/(stop-start);
printf("\n Read bandwidth for 1B%f\t\n",time);
cudaMemcpy(dev_b, &b,fsize, cudaMemcpyHostToDevice);
start = getMicroSeconds();
add1<<<1,1>>>(dev_a, dev_b, dev_c);
threadCount=1;
noofOperations=1;
stop=getMicroSeconds();
totalTime=fsize/(stop-start);
printf("\n Write Bandwidth of 1B %f\n",totalTime);
FLOPS=noofOperations/totalTime;
printf("\n FLOPS\t%f",FLOPS);
printf("\tThreadCount\t\%d\n",threadCount);
GFLOPS=FLOPS/(pow(10,9));
printf("time taken in gflops: %f\n",GFLOPS);
cudaMemcpy( c, dev_c, N * sizeof(float), cudaMemcpyDeviceToHost );
printf("c %f \n",c);
cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c);
//************1 KB byte************************
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a1, N * sizeof(int) ) ;
cudaMalloc( (void**)&dev_b1, N * sizeof(int) ) ;
cudaMalloc( (void**)&dev_c1, N * sizeof(int) ) ;
// fill the arrays 'a' and 'b' on the CPU
for (i=0; i<N; i++)
{
a1[i] = -i;
b1[i] = i * i;
}
time=0;
start=getMicroSeconds();
cudaMemcpy( dev_a1, a1, N * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( dev_b1, b1, N * sizeof(int), cudaMemcpyHostToDevice );
end=getMicroSeconds();
time=(2*size)/(end-start);
printf("\n Read bandwidth for 1KB%f\t\n",time);
start=getMicroSeconds();
add<<<1,N>>>( dev_a1, dev_b1, dev_c1 );
stop=getMicroSeconds();
start1=getMicroSeconds();
cudaMemcpy( c1, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost );
end1=getMicroSeconds();
time1=(end1-start1);
totalTime1=(2*size)/time1;
printf("\n Write Bandwidthof 1KB %f\n",totalTime1);
time=(stop-start);
threadCount=N;
noofOperations=1;
totalTime=(2*size)/time;
IOPS=noofOperations/totalTime;
printf("\n IOPS\t%f\t",IOPS);
printf("\ThreadCount\t\%d\n",threadCount);
IFLOPS=IOPS/(pow(10,9));
printf("time taken in Iflops: %f\n",IFLOPS);
start1=getMicroSeconds();
cudaMemcpy( c1, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost );
end1=getMicroSeconds();
time1=(end1-start1);
totalTime1=(2*size)/time1;
printf("\n Write Bandwidth %f\n",totalTime1);
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a1[i], b1[i], c1[i] );
}
// free the memory allocated on the GPU
cudaFree( dev_a1 );
cudaFree( dev_b1 );
cudaFree( dev_c1 );
return 0;
}
long getMicroSeconds(){
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
|
949a13a730d83c9c2bec5d11086d40133c770623.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGenerator.h>
#include <ATen/native/UnaryOps.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <THH/THHGeneral.h>
#include <THH/THHApply.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
/**
* Note [Register spilling in hiprand call for CUDA < 10]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* For CUDA < 10, hiprandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth)
* when called to generate one random number at a time. This is because the line
* unsigned ret = (&state->output.x)[state->STATE++];
* in
* QUALIFIERS unsigned int hiprand(hiprandStatePhilox4_32_10_t *state)
* in hiprand/hiprand_kernel.h dynamically indexes into state.output, preventing the compiler from ever
* storing state.output in registers.
*
* CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels
* we are using hiprand distributions that utilize hiprand4 call. hiprand4 call doesn't have the
* register spilling problem.
*/
namespace {
// launch bounds used for kernels utilizing TensorIterator
const uint32_t block_size_bound = 256;
const uint32_t grid_size_bound = 4;
// number of randoms given by distributions like hiprand_uniform4, hiprand_uniform2_double
// used in calculating philox offset.
const uint32_t curand4_engine_calls = 4;
// utility function that calculates proper philox_offset
// for distributions utilizing TensorIterator. For distributions using
// TensorIterator, we are using a grid-stride loop with each
// thread yielding one element per thread. For the edge of the grid-stride
// loop, if the tensor size is large, the unroll loop will kick in and the float4
// from hiprand4 will start getting utilized (for common tensor sizes, we end up
// using rand.x from each thread). Hence, the philox_offset is
// (number of elements per thread * number of engine calls), which makes
// sure that philox offset increment is not less than the number of randoms used
// in each thread.
std::tuple<uint64_t, dim3, dim3> calc_execution_policy(int64_t total_elements) {
const uint64_t numel = static_cast<uint64_t>(total_elements);
const uint32_t block_size = block_size_bound;
const uint32_t unroll = curand4_engine_calls;
dim3 dim_block(block_size);
dim3 grid((numel + block_size - 1) / block_size);
uint32_t blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / block_size;
grid.x = ::min(
static_cast<uint32_t>(at::cuda::getCurrentDeviceProperties()->multiProcessorCount) * blocks_per_sm,
grid.x);
//number of times random will be generated per thread, to offset philox counter in thc random state
uint64_t counter_offset = ((numel - 1) / (block_size * grid.x * unroll) + 1)
* curand4_engine_calls;
return std::make_tuple(counter_offset, grid, dim_block);
}
// grid stride loop kernel for distributions
template<typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t>
C10_LAUNCH_BOUNDS_2(block_size_bound, grid_size_bound)
__global__ void distribution_elementwise_grid_stride_kernel(int numel,
std::pair<uint64_t, uint64_t> seeds,
const dist_t dist_func,
const transform_t transform_func) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
idx,
seeds.second,
&state);
int rounded_size = ((numel - 1)/(blockDim.x * gridDim.x * unroll_factor)+1) *
blockDim.x * gridDim.x * unroll_factor;
for(int linear_index = idx; linear_index < rounded_size; linear_index += blockDim.x * gridDim.x * unroll_factor) {
auto rand = dist_func(&state);
#pragma unroll
for (int ii = 0; ii < unroll_factor; ii++) {
int li = linear_index + blockDim.x * gridDim.x * ii;
if (li < numel) {
transform_func(li, static_cast<accscalar_t>((&rand.x)[ii]));
}
}
__syncthreads();
}
}
/**
* distribution_nullary_kernel is analogous to gpu_kernel in
* ATen/native/cuda/Loops.cuh. Like gpu_kernel, it uses
* TensorIterator to launch a kernel. However, the differences are
* - it launches a grid-stride loop based kernel. The kernel is not
* generic like elementwise_kernel in Loops.cuh and is specialized
* for the distribution kernels here.
* - For big size tensors, we can launch multiple kernels recursively
* (i.e. if (!iter.can_use_32bit_indexing())) and hence, the philox
* offset calculation is done in this function.
*
* FIXME: Can we specialize elementwise_kernel and launch_kernel in Loops.cuh
* to have grid-stride loop kernel and then use that to launch our distribution
* kernels? Note that we need a grid-stride loop kernel because, we found by testing
* that it achieves peak effective bandwidth.
*/
template<typename scalar_t,
typename accscalar_t,
int unroll_factor,
typename dist_t,
typename transform_t>
void distribution_nullary_kernel(at::TensorIterator& iter,
at::CUDAGenerator* gen,
const dist_t& dist_func,
const transform_t transform_func) {
static_assert(unroll_factor >= 1, "unroll_factor must be >= 1.");
int64_t numel = iter.numel();
if (numel == 0) {
return;
}
auto execution_policy = calc_execution_policy(numel);
auto counter_offset = std::get<0>(execution_policy);
auto grid = std::get<1>(execution_policy);
auto block = std::get<2>(execution_policy);
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
distribution_nullary_kernel<scalar_t, accscalar_t, unroll_factor>(sub_iter,
gen, dist_func, transform_func);
}
return;
}
char* out_data = (char*)iter.data_ptr(0);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (iter.is_trivial_1d()) {
auto strides = iter.get_inner_strides();
int stride0 = strides[0];
hipLaunchKernelGGL(( distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor>), dim3(grid), dim3(block), 0, stream,
numel,
rng_engine_inputs,
dist_func,
[=]__device__(int idx, accscalar_t rand) {
scalar_t* out = (scalar_t*)&out_data[stride0 * idx];
*out = transform_func(rand);
}
);
} else {
auto offset_calc = at::native::legacy::make_offset_calculator<1>(iter);
hipLaunchKernelGGL(( distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor>), dim3(grid), dim3(block), 0, stream,
numel,
rng_engine_inputs,
dist_func,
[=]__device__(int idx, accscalar_t rand) {
auto offsets = offset_calc.get(idx);
scalar_t* out = (scalar_t*)&out_data[offsets[0]];
*out = transform_func(rand);
}
);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <typename scalar_t>
void poisson_cuda_kernel(
at::Tensor& ret,
const at::Tensor& lambda,
std::pair<uint64_t, uint64_t> seeds) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
lambda,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& lambda) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
ret_val = static_cast<scalar_t>(hiprand_poisson(&state, lambda));
});
}
template <typename scalar_t>
void gamma_cuda_kernel(
at::Tensor& ret,
const at::Tensor& alpha,
std::pair<uint64_t, uint64_t> seeds) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
alpha,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& alpha) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
auto uniform_lambda = [&state] __device__ () {
return hiprand_uniform(&state);
};
BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda);
auto normal_lambda = [&state] __device__ () {
return hiprand_normal(&state);
};
BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda);
auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal);
auto min_value = std::numeric_limits<scalar_t>::min();
ret_val = (min_value > sample) ? min_value : sample;
});
}
template <typename scalar_t>
void gamma_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& self,
const at::Tensor& output) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
ret, self, output,
[] __device__ (scalar_t& ret_val, const scalar_t& self_val, const scalar_t &output_val) {
ret_val = standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val);
});
}
template <typename scalar_t>
void dirichlet_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& x,
const at::Tensor& alpha,
const at::Tensor& total) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply4<scalar_t, scalar_t, scalar_t, scalar_t>(
ret, x, alpha, total,
[] __device__ (scalar_t& ret_val, const scalar_t& x_val, const scalar_t& alpha_val, const scalar_t& total_val) {
ret_val = dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val);
});
}
template<typename scalar_t, typename prob_t>
void bernoulli_tensor_cuda_kernel(
at::Tensor& ret, const at::Tensor& p,
std::pair<uint64_t, uint64_t> seeds) {
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4>(
ret, p,
[seeds] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
// See Note [Register spilling in hiprand call for CUDA < 10]
float4 rand = hiprand_uniform4(&state);
switch (n) {
case 4: {
CUDA_KERNEL_ASSERT(0 <= p4 && p4 <= 1);
v4 = static_cast<scalar_t>(rand.w <= p4);
// fallthrough
}
case 3: {
CUDA_KERNEL_ASSERT(0 <= p3 && p3 <= 1);
v3 = static_cast<scalar_t>(rand.z <= p3);
// fallthrough
}
case 2: {
CUDA_KERNEL_ASSERT(0 <= p2 && p2 <= 1);
v2 = static_cast<scalar_t>(rand.y <= p2);
// fallthrough
}
case 1: {
CUDA_KERNEL_ASSERT(0 <= p1 && p1 <= 1);
v1 = static_cast<scalar_t>(rand.x <= p1);
}
}
}
);
}
template<typename scalar_t>
void dirichlet_scalar_cuda_kernel(
at::Tensor& ret,
const at::Tensor& gamma) {
auto gamma_sum = gamma.sum(-1, true).expand(ret.sizes());
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(ret, gamma, gamma_sum,
[] __device__(scalar_t &ret_val, const scalar_t &gamma, const scalar_t &gamma_sum) {
ret_val = gamma / gamma_sum;
auto min_value = std::numeric_limits<scalar_t>::min();
auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon();
ret_val = (min_value > ret_val) ? min_value : ret_val;
ret_val = (max_value < ret_val) ? max_value : ret_val;
});
}
} // namespace
namespace at { namespace native {
Tensor _s_poisson_cuda(const Tensor& lambda, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(20);
}
Tensor ret = at::empty(lambda.sizes(), lambda.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "poisson_cuda", [&] {
poisson_cuda_kernel<scalar_t>(ret, lambda, rng_engine_inputs);
});
return ret;
}
Tensor _s_gamma_cuda(const Tensor& alpha, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(10);
}
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "gamma_cuda", [&] {
gamma_cuda_kernel<scalar_t>(ret, alpha, rng_engine_inputs);
});
return ret;
}
Tensor _s_dirichlet_cuda(const Tensor& alpha, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(10);
}
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "dirichlet", [&] {
Tensor gamma = at::empty(alpha.sizes(), alpha.options());
gamma_cuda_kernel<scalar_t>(gamma, alpha, rng_engine_inputs);
dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma);
});
return ret;
}
Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) {
Tensor ret = at::empty(self.sizes(), self.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "_standard_gamma_grad_cuda", [&] {
gamma_grad_cuda_kernel<scalar_t>(ret, self, output);
});
return ret;
}
Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) {
Tensor ret = at::empty(x.sizes(), x.options());
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] {
dirichlet_grad_cuda_kernel<scalar_t>(ret, x, alpha, total);
});
return ret;
}
Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen_) {
NoNamesGuard guard;
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(10);
}
auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA)));
AT_DISPATCH_ALL_TYPES_AND3(
at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] {
using self_t = scalar_t;
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, p.scalar_type(), "bernoulli_tensor_cuda_p_", [&] {
using p_t = scalar_t;
return bernoulli_tensor_cuda_kernel<self_t, p_t>(self, p, rng_engine_inputs);
});
});
return self;
}
void uniform_kernel_cuda(TensorIterator& iter, double from_, double to_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "uniform_cuda", [&] {
auto from = static_cast<scalar_t>(from_);
auto to = static_cast<scalar_t>(to_);
TORCH_CHECK(from <= to,
"uniform_ expects to return a [from, to) range, but found from=", from,
" > to=", to);
TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
"uniform_ expects to-from <= std::numeric_limits<", toString(iter.dtype()),
">::max(), but found to=", to, " and from=", from,
" which result in to-from to exceed the limit");
using accscalar_t = at::acc_type<scalar_t, true>;
auto range = static_cast<accscalar_t>(to-from);
from = static_cast<accscalar_t>(from);
// define lambda to reverse bounds, multiply 'range' and add 'from_'
auto uniform_func = [range, from] __device__ (accscalar_t rand) {
// reverse the bounds of hiprand4 from (0, 1] to [0, 1)
// Note that this method is from legacy THCTensorRandom and is likely to give
// you more 0-s, since, the probability of gettings 1-s is higher than 0-s and
// by reversing the bounds, we are flipping the probabilities of 1-s and 0-s.
auto reverse_bound_rand = rand == static_cast<accscalar_t>(1.0) ? static_cast<accscalar_t>(0.0) : rand;
return static_cast<scalar_t>(reverse_bound_rand * range + from);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
uniform_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
uniform_func);
}
});
}
void random_kernel_cuda(TensorIterator& iter, uint64_t range, int64_t base, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "random_cuda", [&] {
if (std::is_same<scalar_t, double>::value || std::is_same<scalar_t, int64_t>::value) {
// define lambda to mod with range and add base
auto random_func = [range, base] __device__ (uint64_t rand) {
return static_cast<int64_t>(rand % range + base);
};
distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) -> ulonglong2 {
ulonglong2 ret;
uint4 rand_val = hiprand4(state);
ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
return ret;
},
random_func);
} else {
auto random_func = [range, base] __device__ (uint32_t rand) {
return static_cast<int32_t>(rand % static_cast<uint32_t>(range) + static_cast<int32_t>(base));
};
distribution_nullary_kernel<scalar_t, uint32_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) {
return hiprand4(state);
},
random_func);
}
});
}
void normal_kernel_cuda(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
// define lambda to multiply std and add mean
auto normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(rand * std + mean);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal2_double(state); },
normal_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal4(state); },
normal_func);
}
});
}
void cauchy_kernel(TensorIterator& iter, double median_, double sigma_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "cauchy_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto median = static_cast<accscalar_t>(median_);
auto sigma = static_cast<accscalar_t>(sigma_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for cauchy transformation
auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(median + sigma *
::tan(static_cast<accscalar_t>(M_PI) * (rand-static_cast<accscalar_t>(0.5))));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
cauchy_func);
} else {
// use __tanf fast approximation for peak bandwidth
auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(median + sigma *
__tanf(static_cast<accscalar_t>(M_PI) * (rand-static_cast<accscalar_t>(0.5))));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
cauchy_func);
}
});
}
void exponential_kernel(TensorIterator& iter, double lambda_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
// Note that HIP doesn't support std::nextafter in device code.
auto nextafter_1_0_float = std::nextafter(1.0f, 0.0f);
auto nextafter_1_0_double = std::nextafter(1.0, 0.0);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exponential_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto lambda = static_cast<accscalar_t>(lambda_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for exponential transformation
auto exponential_func = [lambda, nextafter_1_0_double] __device__ (accscalar_t rand) {
if (lambda == static_cast<accscalar_t>(0.0)) {
return static_cast<scalar_t>(0.0);
}
accscalar_t sample;
// hiprand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0.
// Hence, squash the 1 to just below 1.
if(rand == static_cast<accscalar_t>(1.0)) {
sample = ::log(nextafter_1_0_double);
} else {
sample = ::log(rand);
}
return static_cast<scalar_t>(static_cast<accscalar_t>(-1.0) / lambda * sample);
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
exponential_func);
} else {
// use __logf fast approximation for peak bandwidth
auto exponential_func = [lambda, nextafter_1_0_float] __device__ (accscalar_t rand) {
if (lambda == static_cast<accscalar_t>(0.0)) {
return static_cast<scalar_t>(0.0);
}
accscalar_t sample;
if(rand == static_cast<accscalar_t>(1.0)) {
sample = __logf(nextafter_1_0_float);
} else {
sample = __logf(rand);
}
return static_cast<scalar_t>(static_cast<accscalar_t>(-1.0) / lambda * sample);
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
exponential_func);
}
});
}
void geometric_kernel_cuda(TensorIterator& iter, double p_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "geometric_cuda", [&] {
if (std::is_same<scalar_t, double>::value) {
// define lambda for geometric transformation
auto geometric_func = [p_] __device__ (double rand) {
return static_cast<scalar_t>(::ceil(::log(rand) / ::log(static_cast<double>(1.0)-p_)));
};
distribution_nullary_kernel<scalar_t, double, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
geometric_func);
} else {
auto p = static_cast<float>(p_);
auto geometric_func = [p] __device__ (float rand) {
// use __logf fast approximation for peak bandwidth
return static_cast<scalar_t>(::ceil(__logf(rand) / __logf(static_cast<float>(1.0)-p)));
};
distribution_nullary_kernel<scalar_t, float, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
geometric_func);
}
});
}
void log_normal_kernel(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "log_normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for log_normal transformation
auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(::exp(rand * std + mean));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal2_double(state); },
log_normal_func);
} else {
auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
// use __expf fast approximation for peak bandwidth
return static_cast<scalar_t>(__expf(rand * std + mean));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal4(state); },
log_normal_func);
}
});
}
void bernoulli_scalar_cuda_kernel(TensorIterator& iter, double p_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_ALL_TYPES_AND3(
at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "bernoulli_scalar_cuda_", [&] {
if (std::is_same<scalar_t, double>::value) {
// define lambda for bernoulli transformation
auto bernoulli_func = [p_] __device__ (double rand) {
return static_cast<scalar_t>(rand <= p_);
};
distribution_nullary_kernel<scalar_t, double, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
bernoulli_func);
} else {
auto p = static_cast<float>(p_);
auto bernoulli_func = [p] __device__ (float rand) {
return static_cast<scalar_t>(rand <= p);
};
distribution_nullary_kernel<scalar_t, float, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
bernoulli_func);
}
});
}
Tensor& uniform_cuda_(Tensor& self, double from, double to, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uniform_kernel_cuda(iter, from, to, gen);
return self;
}
Tensor& random_cuda_(Tensor& self, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uint64_t range;
auto iter_scalar_type = iter.dtype();
if (isFloatingType(iter_scalar_type)) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter_scalar_type, "random_cuda_range_calc", [&] {
range = static_cast<uint64_t>((1ULL << std::numeric_limits<scalar_t>::digits) + 1);
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter_scalar_type, "random_cuda_range_calc", [&] {
range = static_cast<uint64_t>(std::numeric_limits<scalar_t>::max()) + 1;
});
}
random_kernel_cuda(iter, range, 0, gen);
return self;
}
Tensor& clamped_random_cuda_(Tensor& self, int64_t from, int64_t to, Generator* gen) {
TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to);
auto iter = TensorIterator::nullary_op(self);
uint64_t range = to - from;
random_kernel_cuda(iter, range, from, gen);
return self;
}
Tensor& capped_random_cuda_(Tensor& self, int64_t to, Generator* gen) {
return clamped_random_cuda_(self, 0, to, gen);
}
Tensor& normal_cuda_(Tensor& self, double mean, double std, Generator* gen) {
TORCH_CHECK(std > 0.0, "normal_ expects std > 0.0, but found std=", std);
auto iter = TensorIterator::nullary_op(self);
normal_kernel_cuda(iter, mean, std, gen);
return self;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, double std, Generator* gen) {
normal_cuda_(output, 0, std, gen);
output.add_(mean);
return output;
}
Tensor& normal_out_cuda(Tensor& output, double mean, const Tensor& std, Generator* gen) {
normal_cuda_(output, 0, 1, gen);
auto mean_tensor = at::full({}, mean, output.options());
// NB: addcmul_out copies the tensor to be added into the output.
// Please look at aten/src/THC/generic/THCTensorMathPointwise.cu
// The previous function here was addcmul_out(output, mean_tensor, output, std, 1);
// The third argument is not a constant reference and hence the samples in output are overwritten.
// Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std
output.mul_(std).add_(mean_tensor);
return output;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, const Tensor& std, Generator* gen) {
bool expandable = are_expandable(mean.sizes(), std.sizes());
bool empty_output = output.numel() == 0;
if (expandable) {
auto shape = at::infer_size(mean.sizes(), std.sizes());
TORCH_CHECK(
empty_output || output.sizes().equals(shape),
"inconsistent tensor, output size (", output.sizes(), ") is not the same as broadcasted mean and std size (", shape, ")");
if (empty_output) {
at::native::resize_(output, shape);
}
}
else {
TORCH_CHECK(
mean.numel() == std.numel(),
"inconsistent tensor, std and mean are not broadcastable and have different number of elements, "
"expected mean ", mean.sizes(), " and std ", std.sizes(), " to have same number of elements)");
TORCH_CHECK(
empty_output || output.sizes().equals(mean.sizes()),
"inconsistent tensor, std and mean are not broadcastable, output size (", output.sizes(), ") is not the same as mean size (", mean.sizes(), ")");
TORCH_WARN_ONCE(
"std and mean have the same number of elements, but are not broadcastable. This was previously a "
"supported mode of operation, but is now deprecated and the support will be removed in a later release. "
"Note that the current implementation reshapes std to the shape of mean, which may be incur data copies. "
"Please ensure that std and mean are broadcastable to avoid these issues.");
if (empty_output) {
at::native::resize_(output, mean.sizes());
}
}
normal_cuda_(output, 0, 1, gen);
// NB: addcmul_out copies the tensor to be added into the output.
// Please look at aten/src/THC/generic/THCTensorMathPointwise.cu
// The previous function here was addcmul_out(output, mean, output, std, 1);
// The third argument is not a constant reference and hence the samples in output are overwritten.
// Consequently, the computation performed is mean + mean * std instead of mean + output * std
if (!expandable) {
output.mul_(std.reshape(mean.sizes())).add_(mean);
}
else {
output.mul_(std).add_(mean);
}
return output;
}
Tensor normal_cuda(const Tensor& mean, double std, Generator* gen) {
Tensor ret = at::empty_like(mean, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(double mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty_like(std, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(const Tensor& mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty({0}, mean.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor& bernoulli_scalar_cuda_(Tensor &self, double p, Generator* gen) {
TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
auto iter = TensorIterator::nullary_op(self);
bernoulli_scalar_cuda_kernel(iter, p, gen);
return self;
}
REGISTER_DISPATCH(cauchy_stub, &cauchy_kernel);
REGISTER_DISPATCH(exponential_stub, &exponential_kernel);
REGISTER_DISPATCH(geometric_stub, &geometric_kernel_cuda);
REGISTER_DISPATCH(log_normal_stub, &log_normal_kernel);
}} // namespace at::native
| 949a13a730d83c9c2bec5d11086d40133c770623.cu | #include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGenerator.h>
#include <ATen/native/UnaryOps.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <THC/THCGeneral.h>
#include <THC/THCApply.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
/**
* Note [Register spilling in curand call for CUDA < 10]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* For CUDA < 10, curandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth)
* when called to generate one random number at a time. This is because the line
* unsigned ret = (&state->output.x)[state->STATE++];
* in
* QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state)
* in curand_kernel.h dynamically indexes into state.output, preventing the compiler from ever
* storing state.output in registers.
*
* CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels
* we are using curand distributions that utilize curand4 call. curand4 call doesn't have the
* register spilling problem.
*/
namespace {
// launch bounds used for kernels utilizing TensorIterator
const uint32_t block_size_bound = 256;
const uint32_t grid_size_bound = 4;
// number of randoms given by distributions like curand_uniform4, curand_uniform2_double
// used in calculating philox offset.
const uint32_t curand4_engine_calls = 4;
// utility function that calculates proper philox_offset
// for distributions utilizing TensorIterator. For distributions using
// TensorIterator, we are using a grid-stride loop with each
// thread yielding one element per thread. For the edge of the grid-stride
// loop, if the tensor size is large, the unroll loop will kick in and the float4
// from curand4 will start getting utilized (for common tensor sizes, we end up
// using rand.x from each thread). Hence, the philox_offset is
// (number of elements per thread * number of engine calls), which makes
// sure that philox offset increment is not less than the number of randoms used
// in each thread.
std::tuple<uint64_t, dim3, dim3> calc_execution_policy(int64_t total_elements) {
const uint64_t numel = static_cast<uint64_t>(total_elements);
const uint32_t block_size = block_size_bound;
const uint32_t unroll = curand4_engine_calls;
dim3 dim_block(block_size);
dim3 grid((numel + block_size - 1) / block_size);
uint32_t blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / block_size;
grid.x = std::min(
static_cast<uint32_t>(at::cuda::getCurrentDeviceProperties()->multiProcessorCount) * blocks_per_sm,
grid.x);
//number of times random will be generated per thread, to offset philox counter in thc random state
uint64_t counter_offset = ((numel - 1) / (block_size * grid.x * unroll) + 1)
* curand4_engine_calls;
return std::make_tuple(counter_offset, grid, dim_block);
}
// grid stride loop kernel for distributions
template<typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t>
C10_LAUNCH_BOUNDS_2(block_size_bound, grid_size_bound)
__global__ void distribution_elementwise_grid_stride_kernel(int numel,
std::pair<uint64_t, uint64_t> seeds,
const dist_t dist_func,
const transform_t transform_func) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
idx,
seeds.second,
&state);
int rounded_size = ((numel - 1)/(blockDim.x * gridDim.x * unroll_factor)+1) *
blockDim.x * gridDim.x * unroll_factor;
for(int linear_index = idx; linear_index < rounded_size; linear_index += blockDim.x * gridDim.x * unroll_factor) {
auto rand = dist_func(&state);
#pragma unroll
for (int ii = 0; ii < unroll_factor; ii++) {
int li = linear_index + blockDim.x * gridDim.x * ii;
if (li < numel) {
transform_func(li, static_cast<accscalar_t>((&rand.x)[ii]));
}
}
__syncthreads();
}
}
/**
* distribution_nullary_kernel is analogous to gpu_kernel in
* ATen/native/cuda/Loops.cuh. Like gpu_kernel, it uses
* TensorIterator to launch a kernel. However, the differences are
* - it launches a grid-stride loop based kernel. The kernel is not
* generic like elementwise_kernel in Loops.cuh and is specialized
* for the distribution kernels here.
* - For big size tensors, we can launch multiple kernels recursively
* (i.e. if (!iter.can_use_32bit_indexing())) and hence, the philox
* offset calculation is done in this function.
*
* FIXME: Can we specialize elementwise_kernel and launch_kernel in Loops.cuh
* to have grid-stride loop kernel and then use that to launch our distribution
* kernels? Note that we need a grid-stride loop kernel because, we found by testing
* that it achieves peak effective bandwidth.
*/
template<typename scalar_t,
typename accscalar_t,
int unroll_factor,
typename dist_t,
typename transform_t>
void distribution_nullary_kernel(at::TensorIterator& iter,
at::CUDAGenerator* gen,
const dist_t& dist_func,
const transform_t transform_func) {
static_assert(unroll_factor >= 1, "unroll_factor must be >= 1.");
int64_t numel = iter.numel();
if (numel == 0) {
return;
}
auto execution_policy = calc_execution_policy(numel);
auto counter_offset = std::get<0>(execution_policy);
auto grid = std::get<1>(execution_policy);
auto block = std::get<2>(execution_policy);
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
distribution_nullary_kernel<scalar_t, accscalar_t, unroll_factor>(sub_iter,
gen, dist_func, transform_func);
}
return;
}
char* out_data = (char*)iter.data_ptr(0);
auto stream = at::cuda::getCurrentCUDAStream();
if (iter.is_trivial_1d()) {
auto strides = iter.get_inner_strides();
int stride0 = strides[0];
distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>(
numel,
rng_engine_inputs,
dist_func,
[=]__device__(int idx, accscalar_t rand) {
scalar_t* out = (scalar_t*)&out_data[stride0 * idx];
*out = transform_func(rand);
}
);
} else {
auto offset_calc = at::native::legacy::make_offset_calculator<1>(iter);
distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>(
numel,
rng_engine_inputs,
dist_func,
[=]__device__(int idx, accscalar_t rand) {
auto offsets = offset_calc.get(idx);
scalar_t* out = (scalar_t*)&out_data[offsets[0]];
*out = transform_func(rand);
}
);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <typename scalar_t>
void poisson_cuda_kernel(
at::Tensor& ret,
const at::Tensor& lambda,
std::pair<uint64_t, uint64_t> seeds) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
lambda,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& lambda) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
ret_val = static_cast<scalar_t>(curand_poisson(&state, lambda));
});
}
template <typename scalar_t>
void gamma_cuda_kernel(
at::Tensor& ret,
const at::Tensor& alpha,
std::pair<uint64_t, uint64_t> seeds) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
alpha,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& alpha) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
auto uniform_lambda = [&state] __device__ () {
return curand_uniform(&state);
};
BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda);
auto normal_lambda = [&state] __device__ () {
return curand_normal(&state);
};
BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda);
auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal);
auto min_value = std::numeric_limits<scalar_t>::min();
ret_val = (min_value > sample) ? min_value : sample;
});
}
template <typename scalar_t>
void gamma_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& self,
const at::Tensor& output) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
ret, self, output,
[] __device__ (scalar_t& ret_val, const scalar_t& self_val, const scalar_t &output_val) {
ret_val = standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val);
});
}
template <typename scalar_t>
void dirichlet_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& x,
const at::Tensor& alpha,
const at::Tensor& total) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply4<scalar_t, scalar_t, scalar_t, scalar_t>(
ret, x, alpha, total,
[] __device__ (scalar_t& ret_val, const scalar_t& x_val, const scalar_t& alpha_val, const scalar_t& total_val) {
ret_val = dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val);
});
}
template<typename scalar_t, typename prob_t>
void bernoulli_tensor_cuda_kernel(
at::Tensor& ret, const at::Tensor& p,
std::pair<uint64_t, uint64_t> seeds) {
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4>(
ret, p,
[seeds] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
// See Note [Register spilling in curand call for CUDA < 10]
float4 rand = curand_uniform4(&state);
switch (n) {
case 4: {
CUDA_KERNEL_ASSERT(0 <= p4 && p4 <= 1);
v4 = static_cast<scalar_t>(rand.w <= p4);
// fallthrough
}
case 3: {
CUDA_KERNEL_ASSERT(0 <= p3 && p3 <= 1);
v3 = static_cast<scalar_t>(rand.z <= p3);
// fallthrough
}
case 2: {
CUDA_KERNEL_ASSERT(0 <= p2 && p2 <= 1);
v2 = static_cast<scalar_t>(rand.y <= p2);
// fallthrough
}
case 1: {
CUDA_KERNEL_ASSERT(0 <= p1 && p1 <= 1);
v1 = static_cast<scalar_t>(rand.x <= p1);
}
}
}
);
}
template<typename scalar_t>
void dirichlet_scalar_cuda_kernel(
at::Tensor& ret,
const at::Tensor& gamma) {
auto gamma_sum = gamma.sum(-1, true).expand(ret.sizes());
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(ret, gamma, gamma_sum,
[] __device__(scalar_t &ret_val, const scalar_t &gamma, const scalar_t &gamma_sum) {
ret_val = gamma / gamma_sum;
auto min_value = std::numeric_limits<scalar_t>::min();
auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon();
ret_val = (min_value > ret_val) ? min_value : ret_val;
ret_val = (max_value < ret_val) ? max_value : ret_val;
});
}
} // namespace
namespace at { namespace native {
Tensor _s_poisson_cuda(const Tensor& lambda, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(20);
}
Tensor ret = at::empty(lambda.sizes(), lambda.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "poisson_cuda", [&] {
poisson_cuda_kernel<scalar_t>(ret, lambda, rng_engine_inputs);
});
return ret;
}
Tensor _s_gamma_cuda(const Tensor& alpha, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(10);
}
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "gamma_cuda", [&] {
gamma_cuda_kernel<scalar_t>(ret, alpha, rng_engine_inputs);
});
return ret;
}
Tensor _s_dirichlet_cuda(const Tensor& alpha, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(10);
}
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "dirichlet", [&] {
Tensor gamma = at::empty(alpha.sizes(), alpha.options());
gamma_cuda_kernel<scalar_t>(gamma, alpha, rng_engine_inputs);
dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma);
});
return ret;
}
Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) {
Tensor ret = at::empty(self.sizes(), self.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "_standard_gamma_grad_cuda", [&] {
gamma_grad_cuda_kernel<scalar_t>(ret, self, output);
});
return ret;
}
Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) {
Tensor ret = at::empty(x.sizes(), x.options());
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] {
dirichlet_grad_cuda_kernel<scalar_t>(ret, x, alpha, total);
});
return ret;
}
Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen_) {
NoNamesGuard guard;
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(10);
}
auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA)));
AT_DISPATCH_ALL_TYPES_AND3(
at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] {
using self_t = scalar_t;
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, p.scalar_type(), "bernoulli_tensor_cuda_p_", [&] {
using p_t = scalar_t;
return bernoulli_tensor_cuda_kernel<self_t, p_t>(self, p, rng_engine_inputs);
});
});
return self;
}
void uniform_kernel_cuda(TensorIterator& iter, double from_, double to_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "uniform_cuda", [&] {
auto from = static_cast<scalar_t>(from_);
auto to = static_cast<scalar_t>(to_);
TORCH_CHECK(from <= to,
"uniform_ expects to return a [from, to) range, but found from=", from,
" > to=", to);
TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
"uniform_ expects to-from <= std::numeric_limits<", toString(iter.dtype()),
">::max(), but found to=", to, " and from=", from,
" which result in to-from to exceed the limit");
using accscalar_t = at::acc_type<scalar_t, true>;
auto range = static_cast<accscalar_t>(to-from);
from = static_cast<accscalar_t>(from);
// define lambda to reverse bounds, multiply 'range' and add 'from_'
auto uniform_func = [range, from] __device__ (accscalar_t rand) {
// reverse the bounds of curand4 from (0, 1] to [0, 1)
// Note that this method is from legacy THCTensorRandom and is likely to give
// you more 0-s, since, the probability of gettings 1-s is higher than 0-s and
// by reversing the bounds, we are flipping the probabilities of 1-s and 0-s.
auto reverse_bound_rand = rand == static_cast<accscalar_t>(1.0) ? static_cast<accscalar_t>(0.0) : rand;
return static_cast<scalar_t>(reverse_bound_rand * range + from);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
uniform_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
uniform_func);
}
});
}
void random_kernel_cuda(TensorIterator& iter, uint64_t range, int64_t base, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "random_cuda", [&] {
if (std::is_same<scalar_t, double>::value || std::is_same<scalar_t, int64_t>::value) {
// define lambda to mod with range and add base
auto random_func = [range, base] __device__ (uint64_t rand) {
return static_cast<int64_t>(rand % range + base);
};
distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 {
ulonglong2 ret;
uint4 rand_val = curand4(state);
ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
return ret;
},
random_func);
} else {
auto random_func = [range, base] __device__ (uint32_t rand) {
return static_cast<int32_t>(rand % static_cast<uint32_t>(range) + static_cast<int32_t>(base));
};
distribution_nullary_kernel<scalar_t, uint32_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) {
return curand4(state);
},
random_func);
}
});
}
void normal_kernel_cuda(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
// define lambda to multiply std and add mean
auto normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(rand * std + mean);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal2_double(state); },
normal_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal4(state); },
normal_func);
}
});
}
void cauchy_kernel(TensorIterator& iter, double median_, double sigma_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "cauchy_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto median = static_cast<accscalar_t>(median_);
auto sigma = static_cast<accscalar_t>(sigma_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for cauchy transformation
auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(median + sigma *
::tan(static_cast<accscalar_t>(M_PI) * (rand-static_cast<accscalar_t>(0.5))));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
cauchy_func);
} else {
// use __tanf fast approximation for peak bandwidth
auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(median + sigma *
__tanf(static_cast<accscalar_t>(M_PI) * (rand-static_cast<accscalar_t>(0.5))));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
cauchy_func);
}
});
}
void exponential_kernel(TensorIterator& iter, double lambda_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
// Note that HIP doesn't support std::nextafter in device code.
auto nextafter_1_0_float = std::nextafter(1.0f, 0.0f);
auto nextafter_1_0_double = std::nextafter(1.0, 0.0);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exponential_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto lambda = static_cast<accscalar_t>(lambda_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for exponential transformation
auto exponential_func = [lambda, nextafter_1_0_double] __device__ (accscalar_t rand) {
if (lambda == static_cast<accscalar_t>(0.0)) {
return static_cast<scalar_t>(0.0);
}
accscalar_t sample;
// curand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0.
// Hence, squash the 1 to just below 1.
if(rand == static_cast<accscalar_t>(1.0)) {
sample = ::log(nextafter_1_0_double);
} else {
sample = ::log(rand);
}
return static_cast<scalar_t>(static_cast<accscalar_t>(-1.0) / lambda * sample);
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
exponential_func);
} else {
// use __logf fast approximation for peak bandwidth
auto exponential_func = [lambda, nextafter_1_0_float] __device__ (accscalar_t rand) {
if (lambda == static_cast<accscalar_t>(0.0)) {
return static_cast<scalar_t>(0.0);
}
accscalar_t sample;
if(rand == static_cast<accscalar_t>(1.0)) {
sample = __logf(nextafter_1_0_float);
} else {
sample = __logf(rand);
}
return static_cast<scalar_t>(static_cast<accscalar_t>(-1.0) / lambda * sample);
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
exponential_func);
}
});
}
void geometric_kernel_cuda(TensorIterator& iter, double p_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "geometric_cuda", [&] {
if (std::is_same<scalar_t, double>::value) {
// define lambda for geometric transformation
auto geometric_func = [p_] __device__ (double rand) {
return static_cast<scalar_t>(::ceil(::log(rand) / ::log(static_cast<double>(1.0)-p_)));
};
distribution_nullary_kernel<scalar_t, double, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
geometric_func);
} else {
auto p = static_cast<float>(p_);
auto geometric_func = [p] __device__ (float rand) {
// use __logf fast approximation for peak bandwidth
return static_cast<scalar_t>(::ceil(__logf(rand) / __logf(static_cast<float>(1.0)-p)));
};
distribution_nullary_kernel<scalar_t, float, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
geometric_func);
}
});
}
void log_normal_kernel(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "log_normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for log_normal transformation
auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(::exp(rand * std + mean));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal2_double(state); },
log_normal_func);
} else {
auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
// use __expf fast approximation for peak bandwidth
return static_cast<scalar_t>(__expf(rand * std + mean));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal4(state); },
log_normal_func);
}
});
}
void bernoulli_scalar_cuda_kernel(TensorIterator& iter, double p_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_ALL_TYPES_AND3(
at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "bernoulli_scalar_cuda_", [&] {
if (std::is_same<scalar_t, double>::value) {
// define lambda for bernoulli transformation
auto bernoulli_func = [p_] __device__ (double rand) {
return static_cast<scalar_t>(rand <= p_);
};
distribution_nullary_kernel<scalar_t, double, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
bernoulli_func);
} else {
auto p = static_cast<float>(p_);
auto bernoulli_func = [p] __device__ (float rand) {
return static_cast<scalar_t>(rand <= p);
};
distribution_nullary_kernel<scalar_t, float, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
bernoulli_func);
}
});
}
Tensor& uniform_cuda_(Tensor& self, double from, double to, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uniform_kernel_cuda(iter, from, to, gen);
return self;
}
Tensor& random_cuda_(Tensor& self, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uint64_t range;
auto iter_scalar_type = iter.dtype();
if (isFloatingType(iter_scalar_type)) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter_scalar_type, "random_cuda_range_calc", [&] {
range = static_cast<uint64_t>((1ULL << std::numeric_limits<scalar_t>::digits) + 1);
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter_scalar_type, "random_cuda_range_calc", [&] {
range = static_cast<uint64_t>(std::numeric_limits<scalar_t>::max()) + 1;
});
}
random_kernel_cuda(iter, range, 0, gen);
return self;
}
Tensor& clamped_random_cuda_(Tensor& self, int64_t from, int64_t to, Generator* gen) {
TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to);
auto iter = TensorIterator::nullary_op(self);
uint64_t range = to - from;
random_kernel_cuda(iter, range, from, gen);
return self;
}
Tensor& capped_random_cuda_(Tensor& self, int64_t to, Generator* gen) {
return clamped_random_cuda_(self, 0, to, gen);
}
Tensor& normal_cuda_(Tensor& self, double mean, double std, Generator* gen) {
TORCH_CHECK(std > 0.0, "normal_ expects std > 0.0, but found std=", std);
auto iter = TensorIterator::nullary_op(self);
normal_kernel_cuda(iter, mean, std, gen);
return self;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, double std, Generator* gen) {
normal_cuda_(output, 0, std, gen);
output.add_(mean);
return output;
}
Tensor& normal_out_cuda(Tensor& output, double mean, const Tensor& std, Generator* gen) {
normal_cuda_(output, 0, 1, gen);
auto mean_tensor = at::full({}, mean, output.options());
// NB: addcmul_out copies the tensor to be added into the output.
// Please look at aten/src/THC/generic/THCTensorMathPointwise.cu
// The previous function here was addcmul_out(output, mean_tensor, output, std, 1);
// The third argument is not a constant reference and hence the samples in output are overwritten.
// Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std
output.mul_(std).add_(mean_tensor);
return output;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, const Tensor& std, Generator* gen) {
bool expandable = are_expandable(mean.sizes(), std.sizes());
bool empty_output = output.numel() == 0;
if (expandable) {
auto shape = at::infer_size(mean.sizes(), std.sizes());
TORCH_CHECK(
empty_output || output.sizes().equals(shape),
"inconsistent tensor, output size (", output.sizes(), ") is not the same as broadcasted mean and std size (", shape, ")");
if (empty_output) {
at::native::resize_(output, shape);
}
}
else {
TORCH_CHECK(
mean.numel() == std.numel(),
"inconsistent tensor, std and mean are not broadcastable and have different number of elements, "
"expected mean ", mean.sizes(), " and std ", std.sizes(), " to have same number of elements)");
TORCH_CHECK(
empty_output || output.sizes().equals(mean.sizes()),
"inconsistent tensor, std and mean are not broadcastable, output size (", output.sizes(), ") is not the same as mean size (", mean.sizes(), ")");
TORCH_WARN_ONCE(
"std and mean have the same number of elements, but are not broadcastable. This was previously a "
"supported mode of operation, but is now deprecated and the support will be removed in a later release. "
"Note that the current implementation reshapes std to the shape of mean, which may be incur data copies. "
"Please ensure that std and mean are broadcastable to avoid these issues.");
if (empty_output) {
at::native::resize_(output, mean.sizes());
}
}
normal_cuda_(output, 0, 1, gen);
// NB: addcmul_out copies the tensor to be added into the output.
// Please look at aten/src/THC/generic/THCTensorMathPointwise.cu
// The previous function here was addcmul_out(output, mean, output, std, 1);
// The third argument is not a constant reference and hence the samples in output are overwritten.
// Consequently, the computation performed is mean + mean * std instead of mean + output * std
if (!expandable) {
output.mul_(std.reshape(mean.sizes())).add_(mean);
}
else {
output.mul_(std).add_(mean);
}
return output;
}
Tensor normal_cuda(const Tensor& mean, double std, Generator* gen) {
Tensor ret = at::empty_like(mean, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(double mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty_like(std, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(const Tensor& mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty({0}, mean.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor& bernoulli_scalar_cuda_(Tensor &self, double p, Generator* gen) {
TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
auto iter = TensorIterator::nullary_op(self);
bernoulli_scalar_cuda_kernel(iter, p, gen);
return self;
}
REGISTER_DISPATCH(cauchy_stub, &cauchy_kernel);
REGISTER_DISPATCH(exponential_stub, &exponential_kernel);
REGISTER_DISPATCH(geometric_stub, &geometric_kernel_cuda);
REGISTER_DISPATCH(log_normal_stub, &log_normal_kernel);
}} // namespace at::native
|
d572daa747408be89d00c1e01af9fa6da9791bf6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// filename: ax.cu
// a simple CUDA kernel to add two vectors
extern "C" // ensure function name to be exactly "ax"
{
}
__global__ void CalpahGaxpGy(const double alpha, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = alpha*a[0]*b[i]+c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!!
} | d572daa747408be89d00c1e01af9fa6da9791bf6.cu | #include "includes.h"
// filename: ax.cu
// a simple CUDA kernel to add two vectors
extern "C" // ensure function name to be exactly "ax"
{
}
__global__ void CalpahGaxpGy(const double alpha, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = alpha*a[0]*b[i]+c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!!
} |
1ee24d2a8f33dd6689c1628d83ebb993e8cb4453.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust\device_vector.h>
#include <thrust\copy.h>
#include <thrust\reduce.h>
#include <thrust\fill.h>
#include <thrust\device_ptr.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <string>
#include <iterator>
#include <queue>
#include <ctime>
#include <algorithm>
#include <climits>
#include "hip/hip_runtime_api.h"
#include <windows.h>
#include "kerneli.cuh"
#define F(x) cout<<#x " = "<<x<<endl;
using namespace std;
ofstream _log1("costArrayProvjeraDebugOOO.log");
extern "C" void testRed(long *niz)
{
}
extern "C" void uhvatiNiz(long *niz, long size)
{
cout << "uhvati niz\n";
copy(niz, niz + size, ostream_iterator<long>(cout, ", "));
}
extern "C" double paralelniBFS(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_F(NULL), *h_X(NULL), *h_C(NULL);
h_F = (long*)malloc(sizeV*sizeof(long));
h_X = (long*)malloc(sizeV*sizeof(long));
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_F, 0, sizeV*sizeof(long));
memset(h_X, 0, sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL),
*d_F(NULL), *d_X(NULL), *d_C(NULL);
hipMalloc((void**)&d_E, sizeE*sizeof(long));
hipMalloc((void**)&d_V, sizeV*sizeof(long));
hipMalloc((void**)&d_F, sizeV*sizeof(long));
hipMalloc((void**)&d_X, sizeV*sizeof(long));
hipMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
hipMemcpy(d_E, h_E, sizeE*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_V, h_V, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_F, h_F, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_X, h_X, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, sizeV*sizeof(long), hipMemcpyHostToDevice);
//potrebno za BFS
thrust::device_ptr<long> dev_ptr(d_F);
//bool paralelno = true, seq = false;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
hipProfilerStart();
hipEventRecord(start);
while (thrust::reduce(dev_ptr, dev_ptr + sizeV))
{
//cout << "While petlja\n";
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
prazno << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_F, d_X, d_C);
//vectorAdd << <blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_C, numElements);
//break;
hipDeviceSynchronize();
}
hipEventRecord(stop);
hipDeviceSynchronize();
hipProfilerStop();
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
double sec;
hipEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
hipMemcpy(h_C, d_C, sizeV*sizeof(long), hipMemcpyDeviceToHost);
_log1 << " ; " << endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
hipFree(d_E);
hipFree(d_V);
hipFree(d_F);
hipFree(d_X);
hipFree(d_C);
free(h_F);
free(h_X);
free(h_C);
hipDeviceSynchronize();
return sec;
}
extern "C" double paralelniBFS_64(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_F(NULL), *h_X(NULL), *h_C(NULL);
h_F = (long*)malloc(sizeV*sizeof(long));
h_X = (long*)malloc(sizeV*sizeof(long));
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_F, 0, sizeV*sizeof(long));
memset(h_X, 0, sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL),
*d_F(NULL), *d_X(NULL), *d_C(NULL);
hipMalloc((void**)&d_E, sizeE*sizeof(long));
hipMalloc((void**)&d_V, sizeV*sizeof(long));
hipMalloc((void**)&d_F, sizeV*sizeof(long));
hipMalloc((void**)&d_X, sizeV*sizeof(long));
hipMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
hipMemcpy(d_E, h_E, sizeE*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_V, h_V, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_F, h_F, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_X, h_X, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, sizeV*sizeof(long), hipMemcpyHostToDevice);
//potrebno za BFS
thrust::device_ptr<long> dev_ptr(d_F);
//bool paralelno = true, seq = false;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
hipEventRecord(start);
while (thrust::reduce(dev_ptr, dev_ptr + sizeV))
{
//cout << "While petlja\n";
int m = 65;
int threadsPerBlock = m<sizeV ? m : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
prazno << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_F, d_X, d_C);
//vectorAdd << <blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_C, numElements);
//break;
//hipDeviceSynchronize();
}
hipEventRecord(stop);
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
double sec;
hipEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
//cout << "Copy C to host\n";
//hipMemcpy(h_C, d_C, sizeV*sizeof(long), hipMemcpyDeviceToHost);
//copy(h_C, h_C + sizeV, ostream_iterator<long>(cout, ","));
cout << "Oslobadjanje memorije\n";
hipFree(d_E);
hipFree(d_V);
hipFree(d_F);
hipFree(d_X);
hipFree(d_C);
free(h_F);
free(h_X);
free(h_C);
return sec;
}
extern "C" double paralelniBFS_1(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL),*d_C(NULL);
hipMalloc((void**)&d_E, sizeE*sizeof(long));
hipMalloc((void**)&d_V, sizeV*sizeof(long));
hipMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
hipMemcpy(d_E, h_E, sizeE*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_V, h_V, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, sizeV*sizeof(long), hipMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
hipProfilerStart();
hipEventRecord(start);
do
{
done = true;
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
kernel_1 << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
hipDeviceSynchronize();
iteration++;
} while (!done);
hipEventRecord(stop);
hipDeviceSynchronize();
hipProfilerStop();
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
double sec;
hipEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
hipMemcpy(h_C, d_C, sizeV*sizeof(long), hipMemcpyDeviceToHost);
//_log1 << diff << ";" << sec << endl;
_log1 << " ; "<< endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
hipFree(d_E);
hipFree(d_V);
hipFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
}
extern "C" double paralelniBFS_1_Share(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL), *d_C(NULL);
hipMalloc((void**)&d_E, sizeE*sizeof(long));
hipMalloc((void**)&d_V, sizeV*sizeof(long));
hipMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
hipMemcpy(d_E, h_E, sizeE*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_V, h_V, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, sizeV*sizeof(long), hipMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
hipProfilerStart();
hipEventRecord(start);
do
{
done = true;
//doneI = 0;
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//kernel_1_Share<< <blocksPerGrid,threadsPerBlock> >>(d_V, sizeV, d_E, sizeE, d_C);
kernel_1_Share << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
hipDeviceSynchronize();
iteration++;
} while (!done);
hipEventRecord(stop);
hipDeviceSynchronize();
hipProfilerStop();
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
double sec;
hipEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
//cout << "Copy C to host\n";
//hipMemcpy(h_C, d_C, sizeV*sizeof(long), hipMemcpyDeviceToHost);
_log1 << " ; "<< endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
hipFree(d_E);
hipFree(d_V);
hipFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
}
extern "C" double paralelniBFS_1_ShareAtomics(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL), *d_C(NULL);
hipMalloc((void**)&d_E, sizeE*sizeof(long));
hipMalloc((void**)&d_V, sizeV*sizeof(long));
hipMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
hipMemcpy(d_E, h_E, sizeE*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_V, h_V, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, sizeV*sizeof(long), hipMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
hipEventRecord(start);
do
{
//done = true;
doneI = 1;
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//kernel_1_Share<< <blocksPerGrid,threadsPerBlock> >>(d_V, sizeV, d_E, sizeE, d_C);
kernel_1_ShareAtomics<< <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
hipDeviceSynchronize();
iteration++;
} while (!doneI);
hipEventRecord(stop);
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
double sec;
hipEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
hipMemcpy(h_C, d_C, sizeV*sizeof(long), hipMemcpyDeviceToHost);
_log1 << " ; " << endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
hipFree(d_E);
hipFree(d_V);
hipFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
}
extern "C" double paralelniBFSEdge(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_F(NULL), *h_X(NULL), *h_C(NULL);
h_F = (long*)malloc(sizeV*sizeof(long));
h_X = (long*)malloc(sizeV*sizeof(long));
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_F, 0, sizeV*sizeof(long));
memset(h_X, 0, sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL),
*d_F(NULL), *d_X(NULL), *d_C(NULL);
hipMalloc((void**)&d_E, sizeE*sizeof(long));
hipMalloc((void**)&d_V, sizeV*sizeof(long));
hipMalloc((void**)&d_F, sizeV*sizeof(long));
hipMalloc((void**)&d_X, sizeV*sizeof(long));
hipMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
hipMemcpy(d_E, h_E, sizeE*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_V, h_V, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_F, h_F, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_X, h_X, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, sizeV*sizeof(long), hipMemcpyHostToDevice);
//potrebno za BFS
thrust::device_ptr<long> dev_ptr(d_F);
//bool paralelno = true, seq = false;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
hipEventRecord(start);
while (thrust::reduce(dev_ptr, dev_ptr + sizeV))
{
//cout << "While petlja\n";
int threadsPerBlock = 256<sizeE ? 256 : sizeE;
int blocksPerGrid = (sizeE + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
edge << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_F, d_X, d_C);
//vectorAdd << <blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_C, numElements);
//break;
//hipDeviceSynchronize();
}
hipEventRecord(stop);
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
double sec;
hipEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
hipMemcpy(h_C, d_C, sizeV*sizeof(long), hipMemcpyDeviceToHost);
_log1 << " ; " << endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
hipFree(d_E);
hipFree(d_V);
hipFree(d_F);
hipFree(d_X);
hipFree(d_C);
free(h_F);
free(h_X);
free(h_C);
return sec;
}
extern "C" double paralelniBFS_1_Edge(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL),*d_C(NULL);
hipMalloc((void**)&d_E, sizeE*sizeof(long));
hipMalloc((void**)&d_V, sizeV*sizeof(long));
hipMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
hipMemcpy(d_E, h_E, sizeE*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_V, h_V, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, sizeV*sizeof(long), hipMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
hipEventRecord(start);
do
{
done = true;
int threadsPerBlock = 256<sizeE ? 256 : sizeE;
int blocksPerGrid = (sizeE + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
kernel_1_Edge<< <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
hipDeviceSynchronize();
iteration++;
} while (!done);
hipEventRecord(stop);
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
double sec;
hipEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
hipMemcpy(h_C, d_C, sizeV*sizeof(long), hipMemcpyDeviceToHost);
_log1 << " ; " << endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
hipFree(d_E);
hipFree(d_V);
hipFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
}
extern "C" double paralelniBFS_1_Atomics(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL), *d_C(NULL);
hipMalloc((void**)&d_E, sizeE*sizeof(long));
hipMalloc((void**)&d_V, sizeV*sizeof(long));
hipMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
hipMemcpy(d_E, h_E, sizeE*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_V, h_V, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, sizeV*sizeof(long), hipMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
hipEventRecord(start);
do
{
//done = true;
doneI = 1;
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//kernel_1_Share<< <blocksPerGrid,threadsPerBlock> >>(d_V, sizeV, d_E, sizeE, d_C);
kernel_1_Atomics << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
hipDeviceSynchronize();
iteration++;
} while (!doneI);
hipEventRecord(stop);
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
double sec;
hipEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
hipMemcpy(h_C, d_C, sizeV*sizeof(long), hipMemcpyDeviceToHost);
_log1 << " ; " << endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
hipFree(d_E);
hipFree(d_V);
hipFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
}
extern "C" double paralelniBFS_1_Dynamic(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL), *d_C(NULL);
hipMalloc((void**)&d_E, sizeE*sizeof(long));
hipMalloc((void**)&d_V, sizeV*sizeof(long));
hipMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
hipMemcpy(d_E, h_E, sizeE*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_V, h_V, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, sizeV*sizeof(long), hipMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
hipProfilerStart();
hipEventRecord(start);
//int *h_Data(NULL), *d_Data(NULL);
//// h_C = (int*)malloc(sizeV*sizeof(int));
////memset(h_C, 127, sizeV*sizeof(int));
//h_Data = (int*)malloc(256 * sizeof(int));
//memset(h_Data, 0, 256 * sizeof(int));
//hipMemcpy(d_Data, h_Data, 256*sizeof(int), hipMemcpyHostToDevice);
//parent_launch << < 1, 256 >> >(d_Data);
//hipDeviceSynchronize();
//free(h_Data);
//hipFree(d_Data);
do
{
done = true;
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
kernel_1_Dynamic << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
hipDeviceSynchronize();
iteration++;
} while (!done);
hipEventRecord(stop);
hipDeviceSynchronize();
hipProfilerStop();
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
double sec;
hipEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
hipMemcpy(h_C, d_C, sizeV*sizeof(long), hipMemcpyDeviceToHost);
//_log1 << diff << ";" << sec << endl;
_log1 << " ; "<< endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
hipFree(d_E);
hipFree(d_V);
hipFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
}
extern "C" double paralelniBFS_Red(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL), *d_C(NULL);
hipMalloc((void**)&d_E, sizeE*sizeof(long));
hipMalloc((void**)&d_V, sizeV*sizeof(long));
hipMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
hipMemcpy(d_E, h_E, sizeE*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_V, h_V, sizeV*sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, sizeV*sizeof(long), hipMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
hipProfilerStart();
hipEventRecord(start);
do
{
done = true;
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
kernel_1 << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
hipDeviceSynchronize();
iteration++;
} while (!done);
hipEventRecord(stop);
hipDeviceSynchronize();
hipProfilerStop();
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
double sec;
hipEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
hipMemcpy(h_C, d_C, sizeV*sizeof(long), hipMemcpyDeviceToHost);
//_log1 << diff << ";" << sec << endl;
_log1 << " ; " << endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
hipFree(d_E);
hipFree(d_V);
hipFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
} | 1ee24d2a8f33dd6689c1628d83ebb993e8cb4453.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust\device_vector.h>
#include <thrust\copy.h>
#include <thrust\reduce.h>
#include <thrust\fill.h>
#include <thrust\device_ptr.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <string>
#include <iterator>
#include <queue>
#include <ctime>
#include <algorithm>
#include <climits>
#include "cuda_profiler_api.h"
#include <windows.h>
#include "kerneli.cuh"
#define F(x) cout<<#x " = "<<x<<endl;
using namespace std;
ofstream _log1("costArrayProvjeraDebugOOO.log");
extern "C" void testRed(long *niz)
{
}
extern "C" void uhvatiNiz(long *niz, long size)
{
cout << "uhvati niz\n";
copy(niz, niz + size, ostream_iterator<long>(cout, ", "));
}
extern "C" double paralelniBFS(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_F(NULL), *h_X(NULL), *h_C(NULL);
h_F = (long*)malloc(sizeV*sizeof(long));
h_X = (long*)malloc(sizeV*sizeof(long));
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_F, 0, sizeV*sizeof(long));
memset(h_X, 0, sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL),
*d_F(NULL), *d_X(NULL), *d_C(NULL);
cudaMalloc((void**)&d_E, sizeE*sizeof(long));
cudaMalloc((void**)&d_V, sizeV*sizeof(long));
cudaMalloc((void**)&d_F, sizeV*sizeof(long));
cudaMalloc((void**)&d_X, sizeV*sizeof(long));
cudaMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
cudaMemcpy(d_E, h_E, sizeE*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_V, h_V, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_F, h_F, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_X, h_X, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, sizeV*sizeof(long), cudaMemcpyHostToDevice);
//potrebno za BFS
thrust::device_ptr<long> dev_ptr(d_F);
//bool paralelno = true, seq = false;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
cudaProfilerStart();
cudaEventRecord(start);
while (thrust::reduce(dev_ptr, dev_ptr + sizeV))
{
//cout << "While petlja\n";
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
prazno << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_F, d_X, d_C);
//vectorAdd << <blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_C, numElements);
//break;
cudaDeviceSynchronize();
}
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaProfilerStop();
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
double sec;
cudaEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
cudaMemcpy(h_C, d_C, sizeV*sizeof(long), cudaMemcpyDeviceToHost);
_log1 << " ; " << endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
cudaFree(d_E);
cudaFree(d_V);
cudaFree(d_F);
cudaFree(d_X);
cudaFree(d_C);
free(h_F);
free(h_X);
free(h_C);
cudaDeviceSynchronize();
return sec;
}
extern "C" double paralelniBFS_64(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_F(NULL), *h_X(NULL), *h_C(NULL);
h_F = (long*)malloc(sizeV*sizeof(long));
h_X = (long*)malloc(sizeV*sizeof(long));
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_F, 0, sizeV*sizeof(long));
memset(h_X, 0, sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL),
*d_F(NULL), *d_X(NULL), *d_C(NULL);
cudaMalloc((void**)&d_E, sizeE*sizeof(long));
cudaMalloc((void**)&d_V, sizeV*sizeof(long));
cudaMalloc((void**)&d_F, sizeV*sizeof(long));
cudaMalloc((void**)&d_X, sizeV*sizeof(long));
cudaMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
cudaMemcpy(d_E, h_E, sizeE*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_V, h_V, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_F, h_F, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_X, h_X, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, sizeV*sizeof(long), cudaMemcpyHostToDevice);
//potrebno za BFS
thrust::device_ptr<long> dev_ptr(d_F);
//bool paralelno = true, seq = false;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
cudaEventRecord(start);
while (thrust::reduce(dev_ptr, dev_ptr + sizeV))
{
//cout << "While petlja\n";
int m = 65;
int threadsPerBlock = m<sizeV ? m : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
prazno << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_F, d_X, d_C);
//vectorAdd << <blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_C, numElements);
//break;
//cudaDeviceSynchronize();
}
cudaEventRecord(stop);
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
double sec;
cudaEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
//cout << "Copy C to host\n";
//cudaMemcpy(h_C, d_C, sizeV*sizeof(long), cudaMemcpyDeviceToHost);
//copy(h_C, h_C + sizeV, ostream_iterator<long>(cout, ","));
cout << "Oslobadjanje memorije\n";
cudaFree(d_E);
cudaFree(d_V);
cudaFree(d_F);
cudaFree(d_X);
cudaFree(d_C);
free(h_F);
free(h_X);
free(h_C);
return sec;
}
extern "C" double paralelniBFS_1(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL),*d_C(NULL);
cudaMalloc((void**)&d_E, sizeE*sizeof(long));
cudaMalloc((void**)&d_V, sizeV*sizeof(long));
cudaMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
cudaMemcpy(d_E, h_E, sizeE*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_V, h_V, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, sizeV*sizeof(long), cudaMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
cudaProfilerStart();
cudaEventRecord(start);
do
{
done = true;
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
kernel_1 << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
cudaDeviceSynchronize();
iteration++;
} while (!done);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaProfilerStop();
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
double sec;
cudaEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
cudaMemcpy(h_C, d_C, sizeV*sizeof(long), cudaMemcpyDeviceToHost);
//_log1 << diff << ";" << sec << endl;
_log1 << " ; "<< endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
cudaFree(d_E);
cudaFree(d_V);
cudaFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
}
extern "C" double paralelniBFS_1_Share(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL), *d_C(NULL);
cudaMalloc((void**)&d_E, sizeE*sizeof(long));
cudaMalloc((void**)&d_V, sizeV*sizeof(long));
cudaMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
cudaMemcpy(d_E, h_E, sizeE*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_V, h_V, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, sizeV*sizeof(long), cudaMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
cudaProfilerStart();
cudaEventRecord(start);
do
{
done = true;
//doneI = 0;
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//kernel_1_Share<< <blocksPerGrid,threadsPerBlock> >>(d_V, sizeV, d_E, sizeE, d_C);
kernel_1_Share << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
cudaDeviceSynchronize();
iteration++;
} while (!done);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaProfilerStop();
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
double sec;
cudaEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
//cout << "Copy C to host\n";
//cudaMemcpy(h_C, d_C, sizeV*sizeof(long), cudaMemcpyDeviceToHost);
_log1 << " ; "<< endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
cudaFree(d_E);
cudaFree(d_V);
cudaFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
}
extern "C" double paralelniBFS_1_ShareAtomics(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL), *d_C(NULL);
cudaMalloc((void**)&d_E, sizeE*sizeof(long));
cudaMalloc((void**)&d_V, sizeV*sizeof(long));
cudaMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
cudaMemcpy(d_E, h_E, sizeE*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_V, h_V, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, sizeV*sizeof(long), cudaMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
cudaEventRecord(start);
do
{
//done = true;
doneI = 1;
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//kernel_1_Share<< <blocksPerGrid,threadsPerBlock> >>(d_V, sizeV, d_E, sizeE, d_C);
kernel_1_ShareAtomics<< <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
cudaDeviceSynchronize();
iteration++;
} while (!doneI);
cudaEventRecord(stop);
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
double sec;
cudaEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
cudaMemcpy(h_C, d_C, sizeV*sizeof(long), cudaMemcpyDeviceToHost);
_log1 << " ; " << endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
cudaFree(d_E);
cudaFree(d_V);
cudaFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
}
extern "C" double paralelniBFSEdge(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_F(NULL), *h_X(NULL), *h_C(NULL);
h_F = (long*)malloc(sizeV*sizeof(long));
h_X = (long*)malloc(sizeV*sizeof(long));
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_F, 0, sizeV*sizeof(long));
memset(h_X, 0, sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL),
*d_F(NULL), *d_X(NULL), *d_C(NULL);
cudaMalloc((void**)&d_E, sizeE*sizeof(long));
cudaMalloc((void**)&d_V, sizeV*sizeof(long));
cudaMalloc((void**)&d_F, sizeV*sizeof(long));
cudaMalloc((void**)&d_X, sizeV*sizeof(long));
cudaMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
cudaMemcpy(d_E, h_E, sizeE*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_V, h_V, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_F, h_F, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_X, h_X, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, sizeV*sizeof(long), cudaMemcpyHostToDevice);
//potrebno za BFS
thrust::device_ptr<long> dev_ptr(d_F);
//bool paralelno = true, seq = false;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
cudaEventRecord(start);
while (thrust::reduce(dev_ptr, dev_ptr + sizeV))
{
//cout << "While petlja\n";
int threadsPerBlock = 256<sizeE ? 256 : sizeE;
int blocksPerGrid = (sizeE + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
edge << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_F, d_X, d_C);
//vectorAdd << <blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_C, numElements);
//break;
//cudaDeviceSynchronize();
}
cudaEventRecord(stop);
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
double sec;
cudaEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
cudaMemcpy(h_C, d_C, sizeV*sizeof(long), cudaMemcpyDeviceToHost);
_log1 << " ; " << endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
cudaFree(d_E);
cudaFree(d_V);
cudaFree(d_F);
cudaFree(d_X);
cudaFree(d_C);
free(h_F);
free(h_X);
free(h_C);
return sec;
}
extern "C" double paralelniBFS_1_Edge(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL),*d_C(NULL);
cudaMalloc((void**)&d_E, sizeE*sizeof(long));
cudaMalloc((void**)&d_V, sizeV*sizeof(long));
cudaMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
cudaMemcpy(d_E, h_E, sizeE*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_V, h_V, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, sizeV*sizeof(long), cudaMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
cudaEventRecord(start);
do
{
done = true;
int threadsPerBlock = 256<sizeE ? 256 : sizeE;
int blocksPerGrid = (sizeE + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
kernel_1_Edge<< <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
cudaDeviceSynchronize();
iteration++;
} while (!done);
cudaEventRecord(stop);
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
double sec;
cudaEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
cudaMemcpy(h_C, d_C, sizeV*sizeof(long), cudaMemcpyDeviceToHost);
_log1 << " ; " << endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
cudaFree(d_E);
cudaFree(d_V);
cudaFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
}
extern "C" double paralelniBFS_1_Atomics(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL), *d_C(NULL);
cudaMalloc((void**)&d_E, sizeE*sizeof(long));
cudaMalloc((void**)&d_V, sizeV*sizeof(long));
cudaMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
cudaMemcpy(d_E, h_E, sizeE*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_V, h_V, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, sizeV*sizeof(long), cudaMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
cudaEventRecord(start);
do
{
//done = true;
doneI = 1;
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//kernel_1_Share<< <blocksPerGrid,threadsPerBlock> >>(d_V, sizeV, d_E, sizeE, d_C);
kernel_1_Atomics << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
cudaDeviceSynchronize();
iteration++;
} while (!doneI);
cudaEventRecord(stop);
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
double sec;
cudaEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
cudaMemcpy(h_C, d_C, sizeV*sizeof(long), cudaMemcpyDeviceToHost);
_log1 << " ; " << endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
cudaFree(d_E);
cudaFree(d_V);
cudaFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
}
extern "C" double paralelniBFS_1_Dynamic(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL), *d_C(NULL);
cudaMalloc((void**)&d_E, sizeE*sizeof(long));
cudaMalloc((void**)&d_V, sizeV*sizeof(long));
cudaMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
cudaMemcpy(d_E, h_E, sizeE*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_V, h_V, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, sizeV*sizeof(long), cudaMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
cudaProfilerStart();
cudaEventRecord(start);
//int *h_Data(NULL), *d_Data(NULL);
//// h_C = (int*)malloc(sizeV*sizeof(int));
////memset(h_C, 127, sizeV*sizeof(int));
//h_Data = (int*)malloc(256 * sizeof(int));
//memset(h_Data, 0, 256 * sizeof(int));
//cudaMemcpy(d_Data, h_Data, 256*sizeof(int), cudaMemcpyHostToDevice);
//parent_launch << < 1, 256 >> >(d_Data);
//cudaDeviceSynchronize();
//free(h_Data);
//cudaFree(d_Data);
do
{
done = true;
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
kernel_1_Dynamic << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
cudaDeviceSynchronize();
iteration++;
} while (!done);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaProfilerStop();
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
double sec;
cudaEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
cudaMemcpy(h_C, d_C, sizeV*sizeof(long), cudaMemcpyDeviceToHost);
//_log1 << diff << ";" << sec << endl;
_log1 << " ; "<< endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
cudaFree(d_E);
cudaFree(d_V);
cudaFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
}
extern "C" double paralelniBFS_Red(long *h_V, long *h_E, long sizeV, long sizeE)
{
cout << "paralelniBFS" << endl;
cout << "Alokacija host\n";
long *h_C(NULL);
h_C = (long*)malloc(sizeV*sizeof(long));
memset(h_C, 127, sizeV*sizeof(long));
//pocetne postavke za BFS
long pocetniCvor = 0;
//h_F[pocetniCvor] = 1;
h_C[pocetniCvor] = 0;
//alokacija na device
cout << "Alokacija device\n";
long *d_E(NULL), *d_V(NULL), *d_C(NULL);
cudaMalloc((void**)&d_E, sizeE*sizeof(long));
cudaMalloc((void**)&d_V, sizeV*sizeof(long));
cudaMalloc((void**)&d_C, sizeV*sizeof(long));
//kopiranje na device
cout << "Kopiranje na device\n";
cudaMemcpy(d_E, h_E, sizeE*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_V, h_V, sizeV*sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, sizeV*sizeof(long), cudaMemcpyHostToDevice);
//potrebno za BFS
iteration = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cout << "pocinje BFS paralelni\n";
clock_t p1;
p1 = clock();
cudaProfilerStart();
cudaEventRecord(start);
do
{
done = true;
int threadsPerBlock = 256<sizeV ? 256 : sizeV;
int blocksPerGrid = (sizeV + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
kernel_1 << <blocksPerGrid, threadsPerBlock >> >(d_V, sizeV, d_E, sizeE, d_C);
cudaDeviceSynchronize();
iteration++;
} while (!done);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaProfilerStop();
double diff = (double)(clock() - p1) / CLOCKS_PER_SEC;
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
double sec;
cudaEventElapsedTime(&milliseconds, start, stop);
sec = milliseconds / 1000.0;
//_log1 << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": The time taken for paralel Breadth first search: " << diff << endl;
cout << endl << ": event: " << sec << " s\n";
cout << "Copy C to host\n";
cudaMemcpy(h_C, d_C, sizeV*sizeof(long), cudaMemcpyDeviceToHost);
//_log1 << diff << ";" << sec << endl;
_log1 << " ; " << endl;
copy(h_C, h_C + sizeV, ostream_iterator<long>(_log1, " "));
_log1 << endl;
cout << "Oslobadjanje memorije\n";
cudaFree(d_E);
cudaFree(d_V);
cudaFree(d_C);
free(h_C);
cout << "done" << endl;
_log1.close();
return sec;
} |
96858bff65cbcbbc41ff61679858550a877c9f2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/features2d/features2d.hpp>
#include "cuda_akaze.h"
#include "cudautils.h"
#include <hip/hip_fp16.h>
#define CONVROW_W 160
#define CONVCOL_W 32
#define CONVCOL_H 40
#define CONVCOL_S 8
#define SCHARR_W 32
#define SCHARR_H 16
#define NLDSTEP_W 32
#define NLDSTEP_H 13
#define ORIENT_S (13 * 16)
#define EXTRACT_S 64
__device__ __constant__ float d_Kernel[21];
__device__ unsigned int d_PointCounter[1];
__device__ unsigned int d_ExtremaIdx[16];
__device__ __constant__ int comp_idx_1[61 * 8];
__device__ __constant__ int comp_idx_2[61 * 8];
hipStream_t copyStream;
//__device__ __constant__ float norm_factors[29];
#if 1
#define CHK
#else
#define CHK hipDeviceSynchronize(); \
{ \
hipError_t cuerr = hipGetLastError(); \
if (cuerr) { \
std::cout << "Cuda error " << hipGetErrorString(cuerr) << ". at " << __FILE__ << ":" << __LINE__ << std::endl; \
} \
}
#endif
void WaitCuda() {
hipStreamSynchronize(copyStream);
}
struct Conv_t {
float *d_Result;
float *d_Data;
int width;
int pitch;
int height;
};
template <int RADIUS>
__global__ void ConvRowGPU(struct Conv_t s) {
//__global__ void ConvRowGPU(float *d_Result, float *d_Data, int width, int
//pitch, int height) {
__shared__ float data[CONVROW_W + 2 * RADIUS];
const int tx = threadIdx.x;
const int minx = blockIdx.x * CONVROW_W;
const int maxx = min(minx + CONVROW_W, s.width);
const int yptr = blockIdx.y * s.pitch;
const int loadPos = minx + tx - RADIUS;
const int writePos = minx + tx;
if (loadPos < 0)
data[tx] = s.d_Data[yptr];
else if (loadPos >= s.width)
data[tx] = s.d_Data[yptr + s.width - 1];
else
data[tx] = s.d_Data[yptr + loadPos];
__syncthreads();
if (writePos < maxx && tx < CONVROW_W) {
float sum = 0.0f;
for (int i = 0; i <= (2 * RADIUS); i++) sum += data[tx + i] * d_Kernel[i];
s.d_Result[yptr + writePos] = sum;
}
}
///////////////////////////////////////////////////////////////////////////////
// Column convolution filter
///////////////////////////////////////////////////////////////////////////////
template <int RADIUS>
__global__ void ConvColGPU(struct Conv_t s) {
//__global__ void ConvColGPU(float *d_Result, float *d_Data, int width, int
//pitch, int height) {
__shared__ float data[CONVCOL_W * (CONVCOL_H + 2 * RADIUS)];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int miny = blockIdx.y * CONVCOL_H;
const int maxy = min(miny + CONVCOL_H, s.height) - 1;
const int totStart = miny - RADIUS;
const int totEnd = maxy + RADIUS;
const int colStart = blockIdx.x * CONVCOL_W + tx;
const int colEnd = colStart + (s.height - 1) * s.pitch;
const int smemStep = CONVCOL_W * CONVCOL_S;
const int gmemStep = s.pitch * CONVCOL_S;
if (colStart < s.width) {
int smemPos = ty * CONVCOL_W + tx;
int gmemPos = colStart + (totStart + ty) * s.pitch;
for (int y = totStart + ty; y <= totEnd; y += blockDim.y) {
if (y < 0)
data[smemPos] = s.d_Data[colStart];
else if (y >= s.height)
data[smemPos] = s.d_Data[colEnd];
else
data[smemPos] = s.d_Data[gmemPos];
smemPos += smemStep;
gmemPos += gmemStep;
}
}
__syncthreads();
if (colStart < s.width) {
int smemPos = ty * CONVCOL_W + tx;
int gmemPos = colStart + (miny + ty) * s.pitch;
for (int y = miny + ty; y <= maxy; y += blockDim.y) {
float sum = 0.0f;
for (int i = 0; i <= 2 * RADIUS; i++)
sum += data[smemPos + i * CONVCOL_W] * d_Kernel[i];
s.d_Result[gmemPos] = sum;
smemPos += smemStep;
gmemPos += gmemStep;
}
}
}
template <int RADIUS>
double SeparableFilter(CudaImage &inimg, CudaImage &outimg, CudaImage &temp,
float *h_Kernel) {
int width = inimg.width;
int pitch = inimg.pitch;
int height = inimg.height;
float *d_DataA = inimg.d_data;
float *d_DataB = outimg.d_data;
float *d_Temp = temp.d_data;
if (d_DataA == NULL || d_DataB == NULL || d_Temp == NULL) {
printf("SeparableFilter: missing data\n");
return 0.0;
}
// TimerGPU timer0(0);
const unsigned int kernelSize = (2 * RADIUS + 1) * sizeof(float);
safeCall(hipMemcpyToSymbolAsync(d_Kernel, h_Kernel, kernelSize));
dim3 blockGridRows(iDivUp(width, CONVROW_W), height);
dim3 threadBlockRows(CONVROW_W + 2 * RADIUS);
struct Conv_t s;
s.d_Result = d_Temp;
s.d_Data = d_DataA;
s.width = width;
s.pitch = pitch;
s.height = height;
ConvRowGPU<RADIUS> << <blockGridRows, threadBlockRows>>> (s);
// checkMsg("ConvRowGPU() execution failed\n");
// safeCall(hipDeviceSynchronize());
dim3 blockGridColumns(iDivUp(width, CONVCOL_W), iDivUp(height, CONVCOL_H));
dim3 threadBlockColumns(CONVCOL_W, CONVCOL_S);
s.d_Result = d_DataB;
s.d_Data = d_Temp;
ConvColGPU<RADIUS> << <blockGridColumns, threadBlockColumns>>> (s);
// checkMsg("ConvColGPU() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("SeparableFilter time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
template <int RADIUS>
double LowPass(CudaImage &inimg, CudaImage &outimg, CudaImage &temp,
double var) {
float kernel[2 * RADIUS + 1];
float kernelSum = 0.0f;
for (int j = -RADIUS; j <= RADIUS; j++) {
kernel[j + RADIUS] = (float)expf(-(double)j * j / 2.0 / var);
kernelSum += kernel[j + RADIUS];
}
for (int j = -RADIUS; j <= RADIUS; j++) kernel[j + RADIUS] /= kernelSum;
return SeparableFilter<RADIUS>(inimg, outimg, temp, kernel);
}
double LowPass(CudaImage &inimg, CudaImage &outimg, CudaImage &temp, double var,
int kernsize) {
if (kernsize <= 5)
return LowPass<2>(inimg, outimg, temp, var);
else if (kernsize <= 7)
return LowPass<3>(inimg, outimg, temp, var);
else if (kernsize <= 9)
return LowPass<4>(inimg, outimg, temp, var);
else {
if (kernsize > 11)
std::cerr << "Kernels larger than 11 not implemented" << std::endl;
return LowPass<5>(inimg, outimg, temp, var);
}
}
__global__ void Scharr(float *imgd, float *lxd, float *lyd, int width,
int pitch, int height) {
#define BW (SCHARR_W + 2)
__shared__ float buffer[BW * (SCHARR_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * SCHARR_W + tx;
int y = blockIdx.y * SCHARR_H + ty;
int xp = (x == 0 ? 1 : (x > width ? width - 2 : x - 1));
int yp = (y == 0 ? 1 : (y > height ? height - 2 : y - 1));
buffer[ty * BW + tx] = imgd[yp * pitch + xp];
__syncthreads();
if (x < width && y < height && tx < SCHARR_W && ty < SCHARR_H) {
float *b = buffer + (ty + 1) * BW + (tx + 1);
float ul = b[-BW - 1];
float ur = b[-BW + 1];
float ll = b[+BW - 1];
float lr = b[+BW + 1];
lxd[y * pitch + x] = 3.0f * (lr - ll + ur - ul) + 10.0f * (b[+1] - b[-1]);
lyd[y * pitch + x] = 3.0f * (lr + ll - ur - ul) + 10.0f * (b[BW] - b[-BW]);
}
}
double Scharr(CudaImage &img, CudaImage &lx, CudaImage &ly) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, SCHARR_W), iDivUp(img.height, SCHARR_H));
dim3 threads(SCHARR_W + 2, SCHARR_H + 2);
Scharr << <blocks, threads>>>
(img.d_data, lx.d_data, ly.d_data, img.width, img.pitch, img.height);
// checkMsg("Scharr() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("Scharr time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void Flow(float *imgd, float *flowd, int width, int pitch,
int height, DIFFUSIVITY_TYPE type, float invk) {
#define BW (SCHARR_W + 2)
__shared__ float buffer[BW * (SCHARR_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * SCHARR_W + tx;
int y = blockIdx.y * SCHARR_H + ty;
int xp = (x == 0 ? 1 : (x > width ? width - 2 : x - 1));
int yp = (y == 0 ? 1 : (y > height ? height - 2 : y - 1));
buffer[ty * BW + tx] = imgd[yp * pitch + xp];
__syncthreads();
if (x < width && y < height && tx < SCHARR_W && ty < SCHARR_H) {
float *b = buffer + (ty + 1) * BW + (tx + 1);
float ul = b[-BW - 1];
float ur = b[-BW + 1];
float ll = b[+BW - 1];
float lr = b[+BW + 1];
float lx = 3.0f * (lr - ll + ur - ul) + 10.0f * (b[+1] - b[-1]);
float ly = 3.0f * (lr + ll - ur - ul) + 10.0f * (b[BW] - b[-BW]);
float dif2 = invk * (lx * lx + ly * ly);
if (type == PM_G1)
flowd[y * pitch + x] = exp(-dif2);
else if (type == PM_G2)
flowd[y * pitch + x] = 1.0f / (1.0f + dif2);
else if (type == WEICKERT)
flowd[y * pitch + x] = 1.0f - exp(-3.315 / (dif2 * dif2 * dif2 * dif2));
else
flowd[y * pitch + x] = 1.0f / sqrt(1.0f + dif2);
}
}
double Flow(CudaImage &img, CudaImage &flow, DIFFUSIVITY_TYPE type,
float kcontrast) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, SCHARR_W), iDivUp(img.height, SCHARR_H));
dim3 threads(SCHARR_W + 2, SCHARR_H + 2);
Flow << <blocks, threads>>> (img.d_data, flow.d_data, img.width, img.pitch,
img.height, type,
1.0f / (kcontrast * kcontrast));
// checkMsg("Flow() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // = timer0.read();
#ifdef VERBOSE
printf("Flow time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
struct NLDStep_t {
float *imgd;
float *flod;
float *temd;
int width;
int pitch;
int height;
float stepsize;
};
//__global__ void NLDStep(float *imgd, float *flod, float *temd, int width, int
// pitch, int height, float stepsize)
__global__ void NLDStep(NLDStep_t s) {
#undef BW
#define BW (NLDSTEP_W + 2)
__shared__ float ibuff[BW * (NLDSTEP_H + 2)];
__shared__ float fbuff[BW * (NLDSTEP_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * NLDSTEP_W + tx;
int y = blockIdx.y * NLDSTEP_H + ty;
int xp = (x == 0 ? 0 : (x > s.width ? s.width - 1 : x - 1));
int yp = (y == 0 ? 0 : (y > s.height ? s.height - 1 : y - 1));
ibuff[ty * BW + tx] = s.imgd[yp * s.pitch + xp];
fbuff[ty * BW + tx] = s.flod[yp * s.pitch + xp];
__syncthreads();
if (tx < NLDSTEP_W && ty < NLDSTEP_H && x < s.width && y < s.height) {
float *ib = ibuff + (ty + 1) * BW + (tx + 1);
float *fb = fbuff + (ty + 1) * BW + (tx + 1);
float ib0 = ib[0];
float fb0 = fb[0];
float xpos = (fb0 + fb[+1]) * (ib[+1] - ib0);
float xneg = (fb0 + fb[-1]) * (ib0 - ib[-1]);
float ypos = (fb0 + fb[+BW]) * (ib[+BW] - ib0);
float yneg = (fb0 + fb[-BW]) * (ib0 - ib[-BW]);
s.temd[y * s.pitch + x] = s.stepsize * (xpos - xneg + ypos - yneg);
}
}
struct NLDUpdate_t {
float *imgd;
float *temd;
int width;
int pitch;
int height;
};
//__global__ void NLDUpdate(float *imgd, float *temd, int width, int pitch, int
// height)
__global__ void NLDUpdate(NLDUpdate_t s) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x < s.width && y < s.height) {
int p = y * s.pitch + x;
s.imgd[p] = s.imgd[p] + s.temd[p];
}
}
double NLDStep(CudaImage &img, CudaImage &flow, CudaImage &temp,
float stepsize) {
// TimerGPU timer0(0);
dim3 blocks0(iDivUp(img.width, NLDSTEP_W), iDivUp(img.height, NLDSTEP_H));
dim3 threads0(NLDSTEP_W + 2, NLDSTEP_H + 2);
NLDStep_t s;
s.imgd = img.d_data;
s.flod = flow.d_data;
s.temd = temp.d_data;
s.width = img.width;
s.pitch = img.pitch;
s.height = img.height;
s.stepsize = 0.5 * stepsize;
// NLDStep<<<blocks0, threads0>>>(img.d_data, flow.d_data, temp.d_data,
// img.width, img.pitch, img.height, 0.5f*stepsize);
NLDStep << <blocks0, threads0>>> (s);
// checkMsg("NLDStep() execution failed\n");
// safeCall(hipDeviceSynchronize());
dim3 blocks1(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads1(32, 16);
NLDUpdate_t su;
su.imgd = img.d_data;
su.temd = temp.d_data;
su.width = img.width;
su.height = img.height;
su.pitch = img.pitch;
// NLDUpdate<<<blocks1, threads1>>>(img.d_data, temp.d_data, img.width,
// img.pitch, img.height);
NLDUpdate << <blocks1, threads1>>> (su);
// checkMsg("NLDUpdate() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // = timer0.read();
#ifdef VERBOSE
printf("NLDStep time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void HalfSample(float *iimd, float *oimd, int iwidth, int iheight,
int ipitch, int owidth, int oheight, int opitch) {
__shared__ float buffer[16 * 33];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * 16 + tx;
int y = blockIdx.y * 16 + ty;
if (x >= owidth || y >= oheight) return;
float *ptri = iimd + (2 * y) * ipitch + (2 * x);
if (2 * owidth == iwidth) {
buffer[ty * 32 + tx] = owidth * (ptri[0] + ptri[1]);
ptri += ipitch;
buffer[ty * 32 + tx + 16] = owidth * (ptri[0] + ptri[1]);
if (ty == 15) {
ptri += ipitch;
buffer[tx + 32 * 16] = owidth * (ptri[0] + ptri[1]);
} else if (y * 2 + 3 == iheight) {
ptri += ipitch;
buffer[tx + 32 * (ty + 1)] = owidth * (ptri[0] + ptri[1]);
}
} else {
float f0 = owidth - x;
float f2 = 1 + x;
buffer[ty * 32 + tx] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
ptri += ipitch;
buffer[ty * 32 + tx + 16] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
if (ty == 15 && 2 * oheight != iheight) {
ptri += ipitch;
buffer[tx + 32 * 16] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[1];
} else if (y * 2 + 3 == iheight && 2 * oheight != iheight) {
ptri += ipitch;
buffer[tx + 32 * (ty + 1)] =
f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
}
}
__syncthreads();
float *buff = buffer + 32 * ty + tx;
if (2 * oheight == iheight)
oimd[y * opitch + x] = oheight * (buff[0] + buff[16]) / (iwidth * iheight);
else {
float f0 = oheight - y;
float f2 = 1 + y;
oimd[y * opitch + x] = (f0 * buff[0] + oheight * buff[16] + f2 * buff[32]) /
(iwidth * iheight);
}
}
__global__ void HalfSample2(float *iimd, float *oimd, int ipitch, int owidth,
int oheight, int opitch) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= owidth || y >= oheight) return;
float *ptr = iimd + (2 * y) * ipitch + (2 * x);
oimd[y * opitch + x] =
0.25f * (ptr[0] + ptr[1] + ptr[ipitch + 0] + ptr[ipitch + 1]);
}
double HalfSample(CudaImage &inimg, CudaImage &outimg) {
// TimerGPU timer0(0);
if (inimg.width == 2 * outimg.width && inimg.height == 2 * outimg.height) {
dim3 blocks(iDivUp(outimg.width, 32), iDivUp(outimg.height, 16));
dim3 threads(32, 16);
HalfSample2 << <blocks, threads>>> (inimg.d_data, outimg.d_data,
inimg.pitch, outimg.width,
outimg.height, outimg.pitch);
} else {
dim3 blocks(iDivUp(outimg.width, 16), iDivUp(outimg.height, 16));
dim3 threads(16, 16);
HalfSample << <blocks, threads>>> (inimg.d_data, outimg.d_data, inimg.width,
inimg.height, inimg.pitch, outimg.width,
outimg.height, outimg.pitch);
}
// checkMsg("HalfSample() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("HalfSample time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double Copy(CudaImage &inimg, CudaImage &outimg) {
// TimerGPU timer0(0);
double gpuTime = 0; // timer0.read();
safeCall(hipMemcpy2DAsync(outimg.d_data, sizeof(float) * outimg.pitch,
inimg.d_data, sizeof(float) * outimg.pitch,
sizeof(float) * inimg.width, inimg.height,
hipMemcpyDeviceToDevice));
#ifdef VERBOSE
printf("Copy time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
float *AllocBuffers(int width, int height, int num, int omax, int &maxpts,
std::vector<CudaImage> &buffers, cv::KeyPoint *&pts,
cv::KeyPoint *&ptsbuffer, int *&ptindices, unsigned char *&desc, float *&descbuffer, CudaImage *&ims) {
maxpts = 4 * ((maxpts+3)/4);
buffers.resize(omax * num);
int w = width;
int h = height;
int p = iAlignUp(w, 128);
int size = 0;
for (int i = 0; i < omax; i++) {
for (int j = 0; j < num; j++) {
CudaImage &buf = buffers[i * num + j];
buf.width = w;
buf.height = h;
buf.pitch = p;
buf.d_data = (float *)((long)size);
size += h * p;
}
w /= 2;
h /= 2;
p = iAlignUp(w, 128);
}
int ptsstart = size;
size += sizeof(cv::KeyPoint) * maxpts / sizeof(float);
int ptsbufferstart = size;
size += sizeof(cv::KeyPoint) * maxpts / sizeof(float);
int descstart = size;
size += sizeof(unsigned char)*maxpts*61/sizeof(float);
int descbufferstart = size;
size += sizeof(float)*3*29*maxpts / sizeof(float);
int indicesstart = size;
size += 21*21*sizeof(int)*maxpts/sizeof(float);
int imgstart = size;
size += sizeof(CudaImage) * (num * omax + sizeof(float) - 1) / sizeof(float);
float *memory = NULL;
size_t pitch;
std::cout << "allocating " << size/1024./1024. << " Mbytes of gpu memory\n";
safeCall(hipMallocPitch((void **)&memory, &pitch, (size_t)4096,
(size + 4095) / 4096 * sizeof(float)));
for (int i = 0; i < omax * num; i++) {
CudaImage &buf = buffers[i];
buf.d_data = memory + (long)buf.d_data;
}
pts = (cv::KeyPoint *)(memory + ptsstart);
ptsbuffer = (cv::KeyPoint *)(memory + ptsbufferstart);
desc = (unsigned char *)(memory + descstart);
descbuffer = (float*)(memory + descbufferstart);
ptindices = (int*)(memory + indicesstart);
ims = (CudaImage *)(memory + imgstart);
InitCompareIndices();
hipStreamCreate(©Stream);
return memory;
}
void FreeBuffers(float *buffers) { safeCall(hipFree(buffers)); }
__device__ unsigned int d_Maxval[1];
__device__ int d_Histogram[512];
#define CONTRAST_W 64
#define CONTRAST_H 7
#define HISTCONT_W 64
#define HISTCONT_H 8
#define HISTCONT_R 4
__global__ void MaxContrast(float *imgd, float *cond, int width, int pitch,
int height) {
#define WID (CONTRAST_W + 2)
__shared__ float buffer[WID * (CONTRAST_H + 2)];
__shared__ unsigned int maxval[32];
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx < 32 && !ty) maxval[tx] = 0.0f;
__syncthreads();
int x = blockIdx.x * CONTRAST_W + tx;
int y = blockIdx.y * CONTRAST_H + ty;
if (x >= width || y >= height) return;
float *b = buffer + ty * WID + tx;
b[0] = imgd[y * pitch + x];
__syncthreads();
if (tx < CONTRAST_W && ty < CONTRAST_H && x < width - 2 && y < height - 2) {
float dx = 3.0f * (b[0] - b[2] + b[2 * WID] - b[2 * WID + 2]) +
10.0f * (b[WID] - b[WID + 2]);
float dy = 3.0f * (b[0] + b[2] - b[2 * WID] - b[2 * WID + 2]) +
10.0f * (b[1] - b[2 * WID + 1]);
float grad = sqrt(dx * dx + dy * dy);
cond[(y + 1) * pitch + (x + 1)] = grad;
unsigned int *gradi = (unsigned int *)&grad;
atomicMax(maxval + (tx & 31), *gradi);
}
__syncthreads();
if (tx < 32 && !ty) atomicMax(d_Maxval, maxval[tx]);
}
__global__ void HistContrast(float *cond, int width, int pitch, int height,
float imaxval, int nbins) {
__shared__ int hist[512];
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = ty * HISTCONT_W + tx;
if (i < nbins) hist[i] = 0;
__syncthreads();
int x = blockIdx.x * HISTCONT_W + tx;
int y = blockIdx.y * HISTCONT_H * HISTCONT_R + ty;
if (x > 0 && x < width - 1) {
for (int i = 0; i < HISTCONT_R; i++) {
if (y > 0 && y < height - 1) {
int idx = min((int)(nbins * cond[y * pitch + x] * imaxval), nbins - 1);
atomicAdd(hist + idx, 1);
}
y += HISTCONT_H;
}
}
__syncthreads();
if (i < nbins && hist[i] > 0) atomicAdd(d_Histogram + i, hist[i]);
}
double ContrastPercentile(CudaImage &img, CudaImage &temp, CudaImage &blur,
float perc, int nbins, float &contrast) {
// TimerGPU timer0(0);
LowPass(img, blur, temp, 1.0f, 5);
float h_Maxval = 0.0f;
safeCall(hipMemcpyToSymbolAsync(d_Maxval, &h_Maxval, sizeof(float)));
dim3 blocks1(iDivUp(img.width, CONTRAST_W), iDivUp(img.height, CONTRAST_H));
dim3 threads1(CONTRAST_W + 2, CONTRAST_H + 2);
MaxContrast << <blocks1, threads1>>>
(blur.d_data, temp.d_data, blur.width, blur.pitch, blur.height);
// checkMsg("MaxContrast() execution failed\n");
// safeCall(hipDeviceSynchronize());
safeCall(hipMemcpyFromSymbolAsync(&h_Maxval, d_Maxval, sizeof(float)));
if (nbins > 512) {
printf(
"Warning: Largest number of possible bins in ContrastPercentile() is "
"512\n");
nbins = 512;
}
int h_Histogram[512];
memset(h_Histogram, 0, nbins * sizeof(int));
safeCall(
hipMemcpyToSymbolAsync(d_Histogram, h_Histogram, nbins * sizeof(int)));
dim3 blocks2(iDivUp(temp.width, HISTCONT_W),
iDivUp(temp.height, HISTCONT_H * HISTCONT_R));
dim3 threads2(HISTCONT_W, HISTCONT_H);
HistContrast << <blocks2, threads2>>> (temp.d_data, temp.width, temp.pitch,
temp.height, 1.0f / h_Maxval, nbins);
safeCall(
hipMemcpyFromSymbolAsync(h_Histogram, d_Histogram, nbins * sizeof(int)));
int npoints = (temp.width - 2) * (temp.height - 2);
int nthreshold = (int)(npoints * perc);
int k = 0, nelements = 0;
for (k = 0; nelements < nthreshold && k < nbins; k++)
nelements += h_Histogram[k];
contrast = (nelements < nthreshold ? 0.03f : h_Maxval * ((float)k / nbins));
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("ContrastPercentile time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void Derivate(float *imd, float *lxd, float *lyd, int width,
int pitch, int height, int step, float fac1,
float fac2) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= width || y >= height) return;
int xl = (x < step ? step - x : x - step);
int xh = (x >= width - step ? 2 * width - x - step - 2 : x + step);
int yl = (y < step ? step - y : y - step);
int yh = (y >= height - step ? 2 * height - y - step - 2 : y + step);
float ul = imd[yl * pitch + xl];
float ur = imd[yl * pitch + xh];
float ll = imd[yh * pitch + xl];
float lr = imd[yh * pitch + xh];
float cl = imd[y * pitch + xl];
float cr = imd[y * pitch + xh];
lxd[y * pitch + x] = fac1 * (ur + lr - ul - ll) + fac2 * (cr - cl);
float uc = imd[yl * pitch + x];
float lc = imd[yh * pitch + x];
lyd[y * pitch + x] = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
}
__global__ void HessianDeterminant(float *lxd, float *lyd, float *detd,
int width, int pitch, int height, int step,
float fac1, float fac2) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= width || y >= height) return;
int xl = (x < step ? step - x : x - step);
int xh = (x >= width - step ? 2 * width - x - step - 2 : x + step);
int yl = (y < step ? step - y : y - step);
int yh = (y >= height - step ? 2 * height - y - step - 2 : y + step);
float ul = lxd[yl * pitch + xl];
float ur = lxd[yl * pitch + xh];
float ll = lxd[yh * pitch + xl];
float lr = lxd[yh * pitch + xh];
float cl = lxd[y * pitch + xl];
float cr = lxd[y * pitch + xh];
float lxx = fac1 * (ur + lr - ul - ll) + fac2 * (cr - cl);
float uc = lxd[yl * pitch + x];
float lc = lxd[yh * pitch + x];
float lyx = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
ul = lyd[yl * pitch + xl];
ur = lyd[yl * pitch + xh];
ll = lyd[yh * pitch + xl];
lr = lyd[yh * pitch + xh];
uc = lyd[yl * pitch + x];
lc = lyd[yh * pitch + x];
float lyy = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
detd[y * pitch + x] = lxx * lyy - lyx * lyx;
}
double HessianDeterminant(CudaImage &img, CudaImage &lx, CudaImage &ly,
int step) {
// TimerGPU timer0(0);
float w = 10.0 / 3.0;
float fac1 = 1.0 / (2.0 * (w + 2.0));
float fac2 = w * fac1;
dim3 blocks(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads(32, 16);
Derivate << <blocks, threads>>> (img.d_data, lx.d_data, ly.d_data, img.width,
img.pitch, img.height, step, fac1, fac2);
// checkMsg("Derivate() execution failed\n");
// safeCall(hipDeviceSynchronize());
HessianDeterminant << <blocks, threads>>> (lx.d_data, ly.d_data, img.d_data,
img.width, img.pitch, img.height,
step, fac1, fac2);
// checkMsg("HessianDeterminant() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("HessianDeterminant time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void FindExtrema(float *imd, float *imp, float *imn, int maxx,
int pitch, int maxy, float border, float dthreshold,
int scale, int octave, float size,
cv::KeyPoint *pts, int maxpts) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
int left_x = (int)(x - border + 0.5f) - 1;
int right_x = (int)(x + border + 0.5f) + 1;
int up_y = (int)(y - border + 0.5f) - 1;
int down_y = (int)(y + border + 0.5f) + 1;
if (left_x < 0 || right_x >= maxx || up_y < 0 || down_y >= maxy) return;
int p = y * pitch + x;
float v = imd[p];
if (v > dthreshold && v > imd[p - pitch - 1] && v > imd[p + pitch + 1] &&
v > imd[p + pitch - 1] && v > imd[p - pitch + 1] && v > imd[p - 1] &&
v > imd[p + 1] && v > imd[p + pitch] && v > imd[p - pitch]) {
float dx = 0.5f * (imd[p + 1] - imd[p - 1]);
float dy = 0.5f * (imd[p + pitch] - imd[p - pitch]);
float dxx = imd[p + 1] + imd[p - 1] - 2.0f * v;
float dyy = imd[p + pitch] + imd[p - pitch] - 2.0f * v;
float dxy = 0.25f * (imd[p + pitch + 1] + imd[p - pitch - 1] -
imd[p + pitch - 1] - imd[p - pitch + 1]);
float det = dxx * dyy - dxy * dxy;
float idet = (det != 0.0f ? 1.0f / det : 0.0f);
float dst0 = idet * (dxy * dy - dyy * dx);
float dst1 = idet * (dxy * dx - dxx * dy);
bool weak = true;
if (dst0 >= -1.0f && dst0 <= 1.0f && dst1 >= -1.0f && dst1 <= 1.0f) {
weak = 0;
}
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
if (idx < maxpts) {
cv::KeyPoint &point = pts[idx];
point.response = v;
point.size = (weak ? -1 : 1) * 2.0 * size;
float octsub = (dst0 < 0 ? -1 : 1) * (octave + fabs(dst0));
*(float *)(&point.octave) = (weak ? octave : octsub);
point.class_id = scale;
int ratio = (1 << octave);
point.pt.x = ratio * (x);
point.pt.y = ratio * (y);
point.angle = dst1;
} else {
atomicAdd(d_PointCounter,-1);
}
}
}
__global__ void CopyIdxArray(int scale) {
d_ExtremaIdx[scale] = d_PointCounter[0];
}
double FindExtrema(CudaImage &img, CudaImage &imgp, CudaImage &imgn,
float border, float dthreshold, int scale, int octave,
float size, cv::KeyPoint *pts, int maxpts) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads(32, 16);
float b = border;
FindExtrema << <blocks, threads>>>
(img.d_data, imgp.d_data, imgn.d_data, img.width, img.pitch, img.height,
b, dthreshold, scale, octave, size, pts, maxpts);
CopyIdxArray << <1, 1>>> (scale);
CHK
// checkMsg("FindExtrema() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("FindExtrema time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
void ClearPoints() {
int totPts = 0;
safeCall(hipMemcpyToSymbolAsync(d_PointCounter, &totPts, sizeof(int)));
}
__forceinline__ __device__ void atomicSort(int *pts, int shmidx, int offset,
int sortdir) {
int &p0 = pts[shmidx + sortdir];
int &p1 = pts[shmidx + (offset - sortdir)];
if (p0 < p1) {
int t = p0;
p0 = p1;
p1 = t;
}
}
__forceinline__ __device__ bool atomicCompare(const cv::KeyPoint &i,
const cv::KeyPoint &j) {
float t = i.pt.x * j.pt.x;
if (t == 0) {
if (j.pt.x != 0) {
return false;
} else {
return true;
}
}
if (i.pt.y < j.pt.y) return true;
if (i.pt.y == j.pt.y && i.pt.x < j.pt.x) return true;
return false;
}
template <typename T>
struct sortstruct_t {
T idx;
short x;
short y;
};
template <typename T>
__forceinline__ __device__ bool atomicCompare(const sortstruct_t<T> &i,
const sortstruct_t<T> &j) {
int t = i.x * j.x;
if (t == 0) {
if (j.x != 0) {
return false;
} else {
return true;
}
}
if (i.y < j.y) return true;
if (i.y == j.y && i.x < j.x) return true;
return false;
}
template <typename T>
__forceinline__ __device__ void atomicSort(sortstruct_t<T> *pts, int shmidx,
int offset, int sortdir) {
sortstruct_t<T> &p0 = pts[(shmidx + sortdir)];
sortstruct_t<T> &p1 = pts[(shmidx + (offset - sortdir))];
if (atomicCompare(p0, p1)) {
int idx = p0.idx;
short ptx = p0.x;
short pty = p0.y;
p0.idx = p1.idx;
p0.x = p1.x;
p0.y = p1.y;
p1.idx = idx;
p1.x = ptx;
p1.y = pty;
}
}
#define BitonicSortThreads 1024
template <class T>
__global__ void bitonicSort(const T *pts, T *newpts) {
int scale = blockIdx.x;
__shared__ struct sortstruct_t<short> shm[8192];
int first = scale == 0 ? 0 : d_ExtremaIdx[scale - 1];
int last = d_ExtremaIdx[scale];
int nkpts = last - first;
const cv::KeyPoint *tmpg = &pts[first];
for (int i = threadIdx.x; i < 8192;
i += BitonicSortThreads) {
if (i < nkpts) {
shm[i].idx = i;
shm[i].y = (short)tmpg[i].pt.y;
shm[i].x = (short)tmpg[i].pt.x;
} else {
shm[i].idx = -1;
shm[i].y = 0;
shm[i].x = 0;
}
}
__syncthreads();
for (int i=1; i<8192; i <<= 1) {
for (int j=i; j>0; j >>= 1) {
int tx = threadIdx.x;
int mask = 0x0fffffff * j;
for (int idx=0; idx<4096; idx+=BitonicSortThreads) {
int sortdir = (tx & i) > 0 ? 0 : 1;
int tidx = ((tx & mask) << 1) + (tx & ~mask);
atomicSort(shm, tidx, j, j*sortdir);
tx += BitonicSortThreads;
__syncthreads();
}
}
}
cv::KeyPoint *tmpnewg = &newpts[first];
for (int i = 0; i < 8192; i += BitonicSortThreads) {
if (i + threadIdx.x < nkpts) {
tmpnewg[i + threadIdx.x].angle = tmpg[shm[i + threadIdx.x].idx].angle;
tmpnewg[i + threadIdx.x].class_id = tmpg[shm[i + threadIdx.x].idx].class_id;
tmpnewg[i + threadIdx.x].octave = tmpg[shm[i + threadIdx.x].idx].octave;
tmpnewg[i + threadIdx.x].pt.y = tmpg[shm[i + threadIdx.x].idx].pt.y;
tmpnewg[i + threadIdx.x].pt.x = tmpg[shm[i + threadIdx.x].idx].pt.x;
tmpnewg[i + threadIdx.x].response =
tmpg[shm[i + threadIdx.x].idx].response;
tmpnewg[i + threadIdx.x].size = tmpg[shm[i + threadIdx.x].idx].size;
}
}
}
template <class T>
__global__ void bitonicSort_global(const T *pts, T *newpts, sortstruct_t<int>* _shm, int _sz) {
int scale = blockIdx.x;
//__shared__ struct sortstruct_t shm[8192];
int first = scale == 0 ? 0 : d_ExtremaIdx[scale - 1];
int last = d_ExtremaIdx[scale];
int nkpts = last - first;
const cv::KeyPoint *tmpg = &pts[first];
int nkpts_ceil = 1;
while (nkpts_ceil < nkpts) nkpts_ceil *= 2;
sortstruct_t<int> *shm = &(_shm[_sz*blockIdx.x]);
for (int i = threadIdx.x; i < nkpts_ceil;
i += BitonicSortThreads) {
if (i < nkpts) {
shm[i].idx = i;
shm[i].y = (short)tmpg[i].pt.y;
shm[i].x = (short)tmpg[i].pt.x;
} else {
shm[i].idx = -1;
shm[i].y = 0;
shm[i].x = 0;
}
}
__syncthreads();
for (int i=1; i<nkpts_ceil; i <<= 1) {
for (int j=i; j>0; j >>= 1) {
int tx = threadIdx.x;
int mask = 0x0fffffff * j;
for (int idx=0; idx<nkpts_ceil/2; idx+=BitonicSortThreads) {
int sortdir = (tx & i) > 0 ? 0 : 1;
int tidx = ((tx & mask) << 1) + (tx & ~mask);
atomicSort(shm, tidx, j, j*sortdir);
tx += BitonicSortThreads;
__syncthreads();
}
}
}
cv::KeyPoint *tmpnewg = &newpts[first];
for (int i = 0; i < nkpts_ceil; i += BitonicSortThreads) {
if (i + threadIdx.x < nkpts) {
tmpnewg[i + threadIdx.x].angle = tmpg[shm[i + threadIdx.x].idx].angle;
tmpnewg[i + threadIdx.x].class_id = tmpg[shm[i + threadIdx.x].idx].class_id;
tmpnewg[i + threadIdx.x].octave = tmpg[shm[i + threadIdx.x].idx].octave;
tmpnewg[i + threadIdx.x].pt.y = tmpg[shm[i + threadIdx.x].idx].pt.y;
tmpnewg[i + threadIdx.x].pt.x = tmpg[shm[i + threadIdx.x].idx].pt.x;
tmpnewg[i + threadIdx.x].response =
tmpg[shm[i + threadIdx.x].idx].response;
tmpnewg[i + threadIdx.x].size = tmpg[shm[i + threadIdx.x].idx].size;
}
}
}
#define FindNeighborsThreads 32
__global__ void FindNeighbors(cv::KeyPoint *pts, int *kptindices, int width) {
__shared__ int gidx[1];
// which scale?
int scale = pts[blockIdx.x].class_id;
int cmpIdx = scale < 1 ? 0 : d_ExtremaIdx[scale - 1];
float size = pts[blockIdx.x].size;
gidx[0] = 1;
__syncthreads();
// One keypoint per block.
cv::KeyPoint &kpt = pts[blockIdx.x];
// Key point to compare. Only compare with smaller than current
// Iterate backwards instead and break as soon as possible!
//for (int i = cmpIdx + threadIdx.x; i < blockIdx.x; i += FindNeighborsThreads) {
for (int i=blockIdx.x-threadIdx.x-1; i >= cmpIdx; i -= FindNeighborsThreads) {
cv::KeyPoint &kpt_cmp = pts[i];
if (kpt.pt.y-kpt_cmp.pt.y > size*.5f) break;
//if (fabs(kpt.pt.y-kpt_cmp.pt.y) > size*.5f) continue;
float dist = (kpt.pt.x - kpt_cmp.pt.x) * (kpt.pt.x - kpt_cmp.pt.x) +
(kpt.pt.y - kpt_cmp.pt.y) * (kpt.pt.y - kpt_cmp.pt.y);
if (dist < size * size * 0.25) {
int idx = atomicAdd(gidx, 1);
kptindices[blockIdx.x * width + idx] = i;
}
}
if (scale > 0) {
int startidx = d_ExtremaIdx[scale-1];
cmpIdx = scale < 2 ? 0 : d_ExtremaIdx[scale - 2];
for (int i=startidx-threadIdx.x-1; i >= cmpIdx; i -= FindNeighborsThreads) {
cv::KeyPoint &kpt_cmp = pts[i];
if (kpt_cmp.pt.y-kpt.pt.y > size*.5f) continue;
if (kpt.pt.y-kpt_cmp.pt.y > size*.5f) break;
float dist = (kpt.pt.x - kpt_cmp.pt.x) * (kpt.pt.x - kpt_cmp.pt.x) +
(kpt.pt.y - kpt_cmp.pt.y) * (kpt.pt.y - kpt_cmp.pt.y);
if (dist < size * size * 0.25) {
int idx = atomicAdd(gidx, 1);
kptindices[blockIdx.x * width + idx] = i;
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
kptindices[blockIdx.x * width] = gidx[0];
}
}
// TODO Intermediate storage of memberarray and minneighbor
#define FilterExtremaThreads 1024
__global__ void FilterExtrema_kernel(cv::KeyPoint *kpts, cv::KeyPoint *newkpts,
int *kptindices, int width,
int *memberarray,
int *minneighbor,
char *shouldAdd) {
// -1 means not processed
// -2 means added but replaced
// >=0 means added
__shared__ bool shouldBreak[1];
int nump = d_PointCounter[0];
// Initially all points are unprocessed
for (int i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
memberarray[i] = -1;
}
if (threadIdx.x == 0) {
shouldBreak[0] = true;
}
__syncthreads();
// Loop until there are no more points to process
for (int xx=0; xx<10000; ++xx) {
//while (true) {
// Outer loop to handle more than 8*1024 points
// Start by restoring memberarray
// Make sure to add appropriate offset to indices
// for (int offset=0; offset<nump; offset += 8*1024) {
// memberarray[i] = storedmemberarray[i+offset];
//for (int offset=0; offset<nump; offset += 8*1024) {
// Mark all points for addition and no minimum neighbor
//int maxi = nump-offset >= 8*1024 ? 8*1024 : nump-offset;
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
minneighbor[i] = nump+1;
shouldAdd[i] = true;
}
__syncthreads();
// Look through all points. If there are points that have not been processed,
// disable breaking and check if it has no processed neighbors (add), has all processed
// neighbors (compare with neighbors) or has some unprocessed neighbor (wait)
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
int neighborsSize = kptindices[i * width] - 1;
int *neighbors = &(kptindices[i * width + 1]);
// Only do if we didn't process the point before
if (memberarray[i] == -1) {
// If we process at least one point we shouldn't break
// No need to sync. Only want to know if at least one thread wants to
// continue
shouldBreak[0] = false;
// Sort neighbors according to the order of currently added points
// (often very few)
// If the neighbor has been replaced, stick it to the back
// If any neighbor has not been processed, break;
bool shouldProcess = true;
for (int k = 0; k < neighborsSize; ++k) {
// If the point has one or more unprocessed neighbors, skip
if (memberarray[neighbors[k]] == -1) {
shouldProcess = false;
shouldAdd[i] = false;
break;
}
// If it has a neighbor that is in the list, we don't add, but process
if (memberarray[neighbors[k]] >= 0) {
shouldAdd[i] = false;
}
}
// We should process and potentially replace the neighbor
if (shouldProcess && !shouldAdd[i]) {
// Find the smallest neighbor. Often only one or two, so no ned for fancy algorithm
for (int k = 0; k < neighborsSize; ++k) {
for (int j = k + 1; j < neighborsSize; ++j) {
if (memberarray[neighbors[k]] == -2 ||
(memberarray[neighbors[j]] != -2 &&
memberarray[neighbors[j]] < memberarray[neighbors[k]])) {
int t = neighbors[k];
neighbors[k] = neighbors[j];
neighbors[j] = t;
}
}
}
// Pick the first neighbor
// We need to make sure, in case more than one point has this
// neighbor,
// That the point with lowest memberarrayindex processes it first
// Here minneighbor[i] is the target and i the neighbor
int nidx = neighbors[0];
minneighbor[nidx] = min(minneighbor[nidx], (int)i);
}
}
}
__syncthreads();
// Check which points we can add
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
if (memberarray[i] == -1) {
if (shouldAdd[i]) {
memberarray[i] = i;
}
}
}
__syncthreads();
// Look at the neighbors. If the response is higher, replace
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
if (minneighbor[i] != nump+1) {
if (memberarray[minneighbor[i]] == -1) {
if (!shouldAdd[minneighbor[i]]) {
const cv::KeyPoint &p0 = kpts[minneighbor[i]];
const cv::KeyPoint &p1 = kpts[i];
if (p0.response > p1.response) {
memberarray[minneighbor[i]] = i;
memberarray[i] = -2;
} else {
memberarray[minneighbor[i]] = -2;
}
}
}
}
}
__syncthreads();
// End outer loop
//for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
// storedmemberarray[i+offset] = memberarray[i];
// }
// __syncthreads();
//}
// Are we done?
if (shouldBreak[0]) break;
if (threadIdx.x == 0) {
shouldBreak[0] = true;
}
__syncthreads();
}
__syncthreads();
}
__global__ void sortFiltered_kernel(cv::KeyPoint *kpts, cv::KeyPoint *newkpts,
int *memberarray) {
__shared__ int minneighbor[2048];
__shared__ int curridx[1];
int nump = d_PointCounter[0];
if (threadIdx.x == 0) {
curridx[0] = 0;
}
// Sort array
const int upper = (nump + 2047) & (0xfffff800);
for (int i = threadIdx.x; i < upper; i += 2 * FilterExtremaThreads) {
minneighbor[threadIdx.x] =
i >= nump ? nump+1 : (memberarray[i] < 0 ? nump+1 : (kpts[memberarray[i]].size < 0 ? nump+1 : memberarray[i]));
minneighbor[threadIdx.x + 1024] =
i + 1024 >= nump ? nump+1
: (memberarray[i + 1024] < 0 ? nump+1 : (kpts[memberarray[i+1024]].size < 0 ? nump+1 : memberarray[i+1024]));
__syncthreads();
// Sort and store keypoints
#pragma unroll 1
for (int k = 1; k < 2048; k <<= 1) {
int sortdir = (threadIdx.x & k) > 0 ? 0 : 1;
#pragma unroll 1
for (int j = k; j > 0; j >>= 1) {
int mask = 0x0fffffff * j;
int tidx = ((threadIdx.x & mask) << 1) + (threadIdx.x & ~mask);
atomicSort(minneighbor, tidx, j, j * sortdir);
__syncthreads();
}
}
__syncthreads();
#pragma unroll 1
for (int k = threadIdx.x; k < 2048; k += 1024) {
if (minneighbor[k] < nump) {
// Restore subpixel component
cv::KeyPoint &okpt = kpts[minneighbor[k]];
float octsub = fabs(*(float*)(&kpts[minneighbor[k]].octave));
int octave = (int)octsub;
float subp = (*(float*)(&kpts[minneighbor[k]].octave) < 0 ? -1 : 1) * (octsub - octave);
float ratio = 1 << octave;
cv::KeyPoint &tkpt = newkpts[k + curridx[0]];
tkpt.pt.y = ratio * ((int)(0.5f+okpt.pt.y / ratio) + okpt.angle);
tkpt.pt.x = ratio * ((int)(0.5f+okpt.pt.x / ratio) + subp);
// newkpts[k + curridx[0] + threadIdx.x].angle = 0; // This will be set elsewhere
tkpt.class_id = okpt.class_id;
tkpt.octave = octave;
tkpt.response = okpt.response;
tkpt.size = okpt.size;
}
}
__syncthreads();
// How many did we add?
if (minneighbor[2047] < nump) {
curridx[0] += 2048;
} else {
if (minneighbor[1024] < nump) {
if (threadIdx.x < 1023 && minneighbor[1024 + threadIdx.x] < nump &&
minneighbor[1024 + threadIdx.x + 1] == nump+1) {
curridx[0] += 1024 + threadIdx.x + 1;
}
} else {
if (minneighbor[threadIdx.x] < nump &&
minneighbor[threadIdx.x + 1] == nump+1) {
curridx[0] += threadIdx.x + 1;
}
}
__syncthreads();
}
}
__syncthreads();
if (threadIdx.x == 0) {
d_PointCounter[0] = curridx[0];
}
}
void FilterExtrema(cv::KeyPoint *pts, cv::KeyPoint *newpts, int* kptindices, int& nump) {
//int nump;
hipMemcpyFromSymbol(&nump, d_PointCounter, sizeof(int));
unsigned int extremaidx_h[16];
hipMemcpyFromSymbol(extremaidx_h,d_ExtremaIdx,16*sizeof(unsigned int));
int maxnump = extremaidx_h[0];
for (int i=1; i<16; ++i) {
maxnump = max(maxnump,extremaidx_h[i]-extremaidx_h[i-1]);
}
int width = ceil(21) * ceil(21);
// Sort the list of points
dim3 blocks(16, 1, 1);
dim3 threads(BitonicSortThreads, 1, 1);
if (maxnump <= 8*1024) {
bitonicSort << <blocks, threads>>> (pts, newpts);
} else {
int nump_ceil = 1;
while (nump_ceil < nump) nump_ceil <<= 1;
std::cout << "numpceil: " << nump_ceil << std::endl;
sortstruct_t<int>* sortstruct;
hipMalloc((void**)&sortstruct, nump_ceil*16*sizeof(sortstruct_t<int>));
bitonicSort_global << <blocks, threads>>> (pts, newpts, sortstruct,nump_ceil);
hipFree(sortstruct);
}
CHK
/* cv::KeyPoint* newpts_h = new cv::KeyPoint[nump];
hipMemcpy(newpts_h,newpts,nump*sizeof(cv::KeyPoint),hipMemcpyDeviceToHost);
int scale = 0;
for (int i=1; i<nump; ++i) {
cv::KeyPoint &k0 = newpts_h[i-1];
cv::KeyPoint &k1 = newpts_h[i];
std::cout << i << ": " << newpts_h[i].class_id << ": " << newpts_h[i].pt.y << " " << newpts_h[i].pt.x << ", " << newpts_h[i].size;
if (!(k0.pt.y<k1.pt.y || (k0.pt.y==k1.pt.y && k0.pt.x<k1.pt.x))) std::cout << " <<<<";
if (k1.size < 0 ) std::cout << " ##############";
std::cout << "\n";
}
*/
// Find all neighbors
hipStreamSynchronize(copyStream);
blocks.x = nump;
threads.x = FindNeighborsThreads;
FindNeighbors << <blocks, threads>>> (newpts, kptindices, width);
CHK
//hipDeviceSynchronize();
//safeCall(hipGetLastError());
// Filter extrema
blocks.x = 1;
threads.x = FilterExtremaThreads;
int *buffer1, *buffer2;
hipMalloc((void**)&buffer1, nump*sizeof(int));
hipMalloc((void**)&buffer2, nump*sizeof(int));
char* buffer3;
hipMalloc((void**)&buffer3, nump);
FilterExtrema_kernel << <blocks, threads>>> (newpts, pts, kptindices, width,
buffer1, buffer2, buffer3);
threads.x = 1024;
sortFiltered_kernel << <blocks, threads>>> (newpts, pts, buffer1);
CHK
//hipDeviceSynchronize();
//safeCall(hipGetLastError());
hipFree(buffer1);
hipFree(buffer2);
hipFree(buffer3);
hipMemcpyFromSymbolAsync(&nump, d_PointCounter, sizeof(int));
}
int GetPoints(std::vector<cv::KeyPoint> &h_pts, cv::KeyPoint *d_pts, int numPts) {
h_pts.resize(numPts);
safeCall(hipMemcpyAsync((float *)&h_pts[0], d_pts,
sizeof(cv::KeyPoint) * numPts,
hipMemcpyDeviceToHost, copyStream));
return numPts;
}
void GetDescriptors(cv::Mat &h_desc, cv::Mat &d_desc, int numPts) {
h_desc = cv::Mat(numPts, 61, CV_8U);
hipMemcpyAsync(h_desc.data, d_desc.data, numPts*61, hipMemcpyDeviceToHost, copyStream);
}
__global__ void ExtractDescriptors(cv::KeyPoint *d_pts, CudaImage *d_imgs,
float *_vals, int size2, int size3,
int size4) {
__shared__ float acc_vals[3 * 30 * EXTRACT_S];
float *acc_vals_im = &acc_vals[0];
float *acc_vals_dx = &acc_vals[30 * EXTRACT_S];
float *acc_vals_dy = &acc_vals[2 * 30 * EXTRACT_S];
int p = blockIdx.x;
float *vals = &_vals[p * 3 * 29];
float iratio = 1.0f / (1 << d_pts[p].octave);
int scale = (int)(0.5f * d_pts[p].size * iratio + 0.5f);
float xf = d_pts[p].pt.x * iratio;
float yf = d_pts[p].pt.y * iratio;
float ang = d_pts[p].angle;
float co = cos(ang);
float si = sin(ang);
int tx = threadIdx.x;
int lev = d_pts[p].class_id;
float *imd = d_imgs[4 * lev + 0].d_data;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int winsize = max(3 * size3, 4 * size4);
for (int i = 0; i < 30; ++i) {
acc_vals_im[i * EXTRACT_S + tx] = 0.f;
acc_vals_dx[i * EXTRACT_S + tx] = 0.f;
acc_vals_dy[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float im = imd[pos];
float dx = dxd[pos];
float dy = dyd[pos];
float rx = -dx * si + dy * co;
float ry = dx * co + dy * si;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// Add 2x2
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx] += im;
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx + 2] += ry;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// Add 3x3
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx] += im;
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx + 2] += ry;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// Add 4x4
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx] += im;
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx + 2] += ry;
}
}
__syncthreads();
// Reduce stuff
float acc_reg;
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
for (int d = 0; d < 90; d += 30) {
if (tx_d < 32) {
acc_reg = acc_vals[3 * 30 * tx_d + offset + d] +
acc_vals[3 * 30 * (tx_d + 32) + offset + d];
acc_reg += __shfl_down(acc_reg, 1);
acc_reg += __shfl_down(acc_reg, 2);
acc_reg += __shfl_down(acc_reg, 4);
acc_reg += __shfl_down(acc_reg, 8);
acc_reg += __shfl_down(acc_reg, 16);
}
if (tx_d == 0) {
acc_vals[offset + d] = acc_reg;
}
}
}
__syncthreads();
// Have 29*3 values to store
// They are in acc_vals[0..28,64*30..64*30+28,64*60..64*60+28]
if (tx < 29) {
vals[tx] = acc_vals[tx];
vals[29 + tx] = acc_vals[29 + tx];
vals[2 * 29 + tx] = acc_vals[2 * 29 + tx];
}
}
__global__ void ExtractDescriptors_serial(cv::KeyPoint *d_pts,
CudaImage *d_imgs, float *_vals,
int size2, int size3, int size4) {
__shared__ float acc_vals[30 * EXTRACT_S];
__shared__ float final_vals[3 * 30];
int p = blockIdx.x;
float *vals = &_vals[p * 3 * 29];
float iratio = 1.0f / (1 << d_pts[p].octave);
int scale = (int)(0.5f * d_pts[p].size * iratio + 0.5f);
float xf = d_pts[p].pt.x * iratio;
float yf = d_pts[p].pt.y * iratio;
float ang = d_pts[p].angle;
float co = cos(ang);
float si = sin(ang);
int tx = threadIdx.x;
int lev = d_pts[p].class_id;
float *imd = d_imgs[4 * lev + 0].d_data;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int winsize = max(3 * size3, 4 * size4);
// IM
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float im = imd[pos];
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += im;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += im;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += im;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
// DX
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float dx = dxd[pos];
float dy = dyd[pos];
float rx = -dx * si + dy * co;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += rx;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += rx;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += rx;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
// DY
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float dx = dxd[pos];
float dy = dyd[pos];
float ry = dx * co + dy * si;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += ry;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += ry;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += ry;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
__syncthreads();
// Have 29*3 values to store
// They are in acc_vals[0..28,64*30..64*30+28,64*60..64*60+28]
if (tx < 29) {
vals[tx] = final_vals[tx];
vals[29 + tx] = final_vals[29 + tx];
vals[2 * 29 + tx] = final_vals[2 * 29 + tx];
}
}
__global__ void BuildDescriptor(float *_valsim, unsigned char *_desc) {
int p = blockIdx.x;
size_t idx = threadIdx.x;
if (idx < 61) {
float *valsim = &_valsim[3 * 29 * p];
unsigned char *desc = &_desc[61 * p];
unsigned char desc_r = 0;
#pragma unroll
for (int i = 0; i < (idx == 60 ? 6 : 8); ++i) {
int idx1 = comp_idx_1[idx * 8 + i];
int idx2 = comp_idx_2[idx * 8 + i];
desc_r |= (valsim[idx1] > valsim[idx2] ? 1 : 0) << i;
}
desc[idx] = desc_r;
}
}
double ExtractDescriptors(cv::KeyPoint *d_pts, std::vector<CudaImage> &h_imgs, CudaImage *d_imgs,
unsigned char *desc_d, float* vals_d, int patsize, int numPts) {
int size2 = patsize;
int size3 = ceil(2.0f * patsize / 3.0f);
int size4 = ceil(0.5f * patsize);
//int numPts;
//hipMemcpyFromSymbol(&numPts, d_PointCounter, sizeof(int));
// TimerGPU timer0(0);
dim3 blocks(numPts);
dim3 threads(EXTRACT_S);
ExtractDescriptors << <blocks, threads>>>(d_pts, d_imgs, vals_d, size2, size3, size4);
CHK;
hipMemsetAsync(desc_d, 0, numPts * 61);
BuildDescriptor << <blocks, 64>>> (vals_d, desc_d);
CHK;
////checkMsg("ExtractDescriptors() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("ExtractDescriptors time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
#define NTHREADS_MATCH 32
__global__ void MatchDescriptors(unsigned char *d1, unsigned char *d2,
int pitch, int nkpts_2, cv::DMatch *matches) {
int p = blockIdx.x;
int x = threadIdx.x;
__shared__ int idxBest[NTHREADS_MATCH];
__shared__ int idxSecondBest[NTHREADS_MATCH];
__shared__ int scoreBest[NTHREADS_MATCH];
__shared__ int scoreSecondBest[NTHREADS_MATCH];
idxBest[x] = 0;
idxSecondBest[x] = 0;
scoreBest[x] = 512;
scoreSecondBest[x] = 512;
__syncthreads();
// curent version fixed with popc, still not convinced
unsigned long long *d1i = (unsigned long long *)(d1 + pitch * p);
for (int i = 0; i < nkpts_2; i += NTHREADS_MATCH) {
unsigned long long *d2i = (unsigned long long *)(d2 + pitch * (x + i));
if (i + x < nkpts_2) {
// Check d1[p] with d2[i]
int score = 0;
#pragma unroll
for (int j = 0; j < 8; ++j) {
score += __popcll(d1i[j] ^ d2i[j]);
}
if (score < scoreBest[x]) {
scoreSecondBest[x] = scoreBest[x];
scoreBest[x] = score;
idxSecondBest[x] = idxBest[x];
idxBest[x] = i + x;
} else if (score < scoreSecondBest[x]) {
scoreSecondBest[x] = score;
idxSecondBest[x] = i + x;
}
}
}
// for( int i=16; i>=1; i/=2) {
// int tBest = __shfl_down(scoreBest,i);
// int tIdx = __shfl_down(idxBest,i);
// if(tBest < scoreBest) {
// scoreSecondBest = scoreBest;
// idxSecondBest = idxBest;
// scoreBest = tBest;
// idxBest = tIdx;
// }
// tBest = __shfl_down(scoreSecondBest,i);
// tIdx = __shfl_down(idxSecondBest,i);
// if(tBest < scoreSecondBest) {
// scoreSecondBest = tBest;
// idxSecondBest = tIdx;
// }
// }
__syncthreads();
for (int i = NTHREADS_MATCH / 2; i >= 1; i /= 2) {
if (x < i) {
if (scoreBest[x + i] < scoreBest[x]) {
scoreSecondBest[x] = scoreBest[x];
scoreBest[x] = scoreBest[x + i];
idxSecondBest[x] = idxBest[x];
idxBest[x] = idxBest[x + i];
} else if (scoreBest[x + i] < scoreSecondBest[x]) {
scoreSecondBest[x] = scoreBest[x + i];
idxSecondBest[x] = idxBest[x + i];
}
if (scoreSecondBest[x + i] < scoreSecondBest[x]) {
scoreSecondBest[x] = scoreSecondBest[x + i];
idxSecondBest[x] = idxSecondBest[x + i];
}
}
}
// if(i>16) __syncthreads();
// if(x<i) {
// if( scoreBest[x+i] < scoreSecondBest[x] ) {
// scoreSecondBest[x] = scoreBest[x+i];
// idxSecondBest[x] = idxBest[x+i];
// } else if (scoreSecondBest[x+i] < scoreSecondBest[x] ) {
// scoreSecondBest[x] = scoreSecondBest[x+i];
// idxSecondBest[x] = idxSecondBest[x+i];
// }
// }
// if(i>16) __syncthreads();
//}
/*for (int i = 1; i <= NTHREADS_MATCH; ++i) {
if (scoreBest[i] < scoreBest[0]) {
scoreSecondBest[0] = scoreBest[0];
scoreBest[0] = scoreBest[i];
idxSecondBest[0] = idxBest[0];
idxBest[0] = idxBest[i];
} else if( scoreBest[i] < scoreSecondBest[0] ) {
scoreSecondBest[0] = scoreBest[i];
idxSecondBest[0] = idxBest[i];
}
if(scoreSecondBest[i] < scoreSecondBest[0]) {
scoreSecondBest[0] = scoreSecondBest[i];
idxSecondBest[0] = idxSecondBest[i];
}
}*/
// if(x==0) {
// matches[2*p].queryIdx = p;
// matches[2*p].trainIdx = idxBest;
// matches[2*p].distance = scoreBest;
// matches[2*p+1].queryIdx = p;
// matches[2*p+1].trainIdx = idxSecondBest;
// matches[2*p+1].distance = scoreSecondBest;
// }
if (x == 0) {
matches[2 * p].queryIdx = p;
matches[2 * p].trainIdx = idxBest[x];
matches[2 * p].distance = scoreBest[x];
matches[2 * p + 1].queryIdx = p;
matches[2 * p + 1].trainIdx = idxSecondBest[x];
matches[2 * p + 1].distance = scoreSecondBest[x];
}
}
void MatchDescriptors(cv::Mat &desc_query, cv::Mat &desc_train,
std::vector<std::vector<cv::DMatch> > &dmatches,
size_t pitch,
unsigned char* descq_d, unsigned char* desct_d, cv::DMatch* dmatches_d, cv::DMatch* dmatches_h) {
dim3 block(desc_query.rows);
MatchDescriptors << <block, NTHREADS_MATCH>>>(descq_d, desct_d, pitch, desc_train.rows, dmatches_d);
hipMemcpy(dmatches_h, dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch),
hipMemcpyDeviceToHost);
for (int i = 0; i < desc_query.rows; ++i) {
std::vector<cv::DMatch> tdmatch;
//std::cout << dmatches_h[2*i].trainIdx << " - " << dmatches_h[2*i].queryIdx << std::endl;
tdmatch.push_back(dmatches_h[2 * i]);
tdmatch.push_back(dmatches_h[2 * i + 1]);
dmatches.push_back(tdmatch);
}
}
void MatchDescriptors(cv::Mat &desc_query, cv::Mat &desc_train,
std::vector<std::vector<cv::DMatch> > &dmatches) {
size_t pitch1, pitch2;
unsigned char *descq_d;
hipMallocPitch(&descq_d, &pitch1, 64, desc_query.rows);
hipMemset2D(descq_d, pitch1, 0, 64, desc_query.rows);
hipMemcpy2D(descq_d, pitch1, desc_query.data, desc_query.cols,
desc_query.cols, desc_query.rows, hipMemcpyHostToDevice);
unsigned char *desct_d;
hipMallocPitch(&desct_d, &pitch2, 64, desc_train.rows);
hipMemset2D(desct_d, pitch2, 0, 64, desc_train.rows);
hipMemcpy2D(desct_d, pitch2, desc_train.data, desc_train.cols,
desc_train.cols, desc_train.rows, hipMemcpyHostToDevice);
dim3 block(desc_query.rows);
cv::DMatch *dmatches_d;
hipMalloc(&dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch));
MatchDescriptors << <block, NTHREADS_MATCH>>>(descq_d, desct_d, pitch1, desc_train.rows, dmatches_d);
cv::DMatch *dmatches_h = new cv::DMatch[2 * desc_query.rows];
hipMemcpy(dmatches_h, dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch),
hipMemcpyDeviceToHost);
for (int i = 0; i < desc_query.rows; ++i) {
std::vector<cv::DMatch> tdmatch;
//std::cout << dmatches_h[2*i].trainIdx << " - " << dmatches_h[2*i].queryIdx << std::endl;
tdmatch.push_back(dmatches_h[2 * i]);
tdmatch.push_back(dmatches_h[2 * i + 1]);
dmatches.push_back(tdmatch);
}
hipFree(descq_d);
hipFree(desct_d);
hipFree(dmatches_d);
delete[] dmatches_h;
}
void InitCompareIndices() {
int comp_idx_1_h[61 * 8];
int comp_idx_2_h[61 * 8];
int cntr = 0;
for (int j = 0; j < 4; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 0; j < 3; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 0; j < 3; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
// 3x3
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
// 4x4
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
hipMemcpyToSymbol(comp_idx_1, comp_idx_1_h, 8 * 61 * sizeof(int));
hipMemcpyToSymbol(comp_idx_2, comp_idx_2_h, 8 * 61 * sizeof(int));
}
__global__ void FindOrientation(cv::KeyPoint *d_pts, CudaImage *d_imgs) {
__shared__ float resx[42], resy[42];
__shared__ float re8x[42], re8y[42];
int p = blockIdx.x;
int tx = threadIdx.x;
if (tx < 42) resx[tx] = resy[tx] = 0.0f;
__syncthreads();
int lev = d_pts[p].class_id;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int octave = d_pts[p].octave;
int step = (int)(0.5f * d_pts[p].size + 0.5f) >> octave;
int x = (int)(d_pts[p].pt.x + 0.5f) >> octave;
int y = (int)(d_pts[p].pt.y + 0.5f) >> octave;
int i = (tx & 15) - 6;
int j = (tx / 16) - 6;
int r2 = i * i + j * j;
if (r2 < 36) {
float gweight = exp(-r2 / (2.5f * 2.5f * 2.0f));
int pos = (y + step * j) * pitch + (x + step * i);
float dx = gweight * dxd[pos];
float dy = gweight * dyd[pos];
float angle = atan2(dy, dx);
int a = max(min((int)(angle * (21 / CV_PI)) + 21, 41), 0);
atomicAdd(resx + a, dx);
atomicAdd(resy + a, dy);
}
__syncthreads();
if (tx < 42) {
re8x[tx] = resx[tx];
re8y[tx] = resy[tx];
for (int k = tx + 1; k < tx + 7; k++) {
re8x[tx] += resx[k < 42 ? k : k - 42];
re8y[tx] += resy[k < 42 ? k : k - 42];
}
}
__syncthreads();
if (tx == 0) {
float maxr = 0.0f;
int maxk = 0;
for (int k = 0; k < 42; k++) {
float r = re8x[k] * re8x[k] + re8y[k] * re8y[k];
if (r > maxr) {
maxr = r;
maxk = k;
}
}
float angle = atan2(re8y[maxk], re8x[maxk]);
d_pts[p].angle = (angle < 0.0f ? angle + 2.0f * CV_PI : angle);
// printf("XXX %.2f %.2f %.2f\n", d_pts[p].pt.x, d_pts[p].pt.y,
// d_pts[p].angle/CV_PI*180.0f);
}
}
double FindOrientation(cv::KeyPoint *d_pts, std::vector<CudaImage> &h_imgs, CudaImage *d_imgs, int numPts) {
safeCall(hipMemcpyAsync(d_imgs, (float *)&h_imgs[0],
sizeof(CudaImage) * h_imgs.size(),
hipMemcpyHostToDevice));
// TimerGPU timer0(0);
hipStreamSynchronize(0);
dim3 blocks(numPts);
dim3 threads(ORIENT_S);
FindOrientation << <blocks, threads>>> (d_pts, d_imgs);
CHK
// checkMsg("FindOrientation() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("FindOrientation time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
| 96858bff65cbcbbc41ff61679858550a877c9f2a.cu | #include <opencv2/features2d/features2d.hpp>
#include "cuda_akaze.h"
#include "cudautils.h"
#include <cuda_fp16.h>
#define CONVROW_W 160
#define CONVCOL_W 32
#define CONVCOL_H 40
#define CONVCOL_S 8
#define SCHARR_W 32
#define SCHARR_H 16
#define NLDSTEP_W 32
#define NLDSTEP_H 13
#define ORIENT_S (13 * 16)
#define EXTRACT_S 64
__device__ __constant__ float d_Kernel[21];
__device__ unsigned int d_PointCounter[1];
__device__ unsigned int d_ExtremaIdx[16];
__device__ __constant__ int comp_idx_1[61 * 8];
__device__ __constant__ int comp_idx_2[61 * 8];
cudaStream_t copyStream;
//__device__ __constant__ float norm_factors[29];
#if 1
#define CHK
#else
#define CHK cudaDeviceSynchronize(); \
{ \
cudaError_t cuerr = cudaGetLastError(); \
if (cuerr) { \
std::cout << "Cuda error " << cudaGetErrorString(cuerr) << ". at " << __FILE__ << ":" << __LINE__ << std::endl; \
} \
}
#endif
void WaitCuda() {
cudaStreamSynchronize(copyStream);
}
struct Conv_t {
float *d_Result;
float *d_Data;
int width;
int pitch;
int height;
};
template <int RADIUS>
__global__ void ConvRowGPU(struct Conv_t s) {
//__global__ void ConvRowGPU(float *d_Result, float *d_Data, int width, int
//pitch, int height) {
__shared__ float data[CONVROW_W + 2 * RADIUS];
const int tx = threadIdx.x;
const int minx = blockIdx.x * CONVROW_W;
const int maxx = min(minx + CONVROW_W, s.width);
const int yptr = blockIdx.y * s.pitch;
const int loadPos = minx + tx - RADIUS;
const int writePos = minx + tx;
if (loadPos < 0)
data[tx] = s.d_Data[yptr];
else if (loadPos >= s.width)
data[tx] = s.d_Data[yptr + s.width - 1];
else
data[tx] = s.d_Data[yptr + loadPos];
__syncthreads();
if (writePos < maxx && tx < CONVROW_W) {
float sum = 0.0f;
for (int i = 0; i <= (2 * RADIUS); i++) sum += data[tx + i] * d_Kernel[i];
s.d_Result[yptr + writePos] = sum;
}
}
///////////////////////////////////////////////////////////////////////////////
// Column convolution filter
///////////////////////////////////////////////////////////////////////////////
template <int RADIUS>
__global__ void ConvColGPU(struct Conv_t s) {
//__global__ void ConvColGPU(float *d_Result, float *d_Data, int width, int
//pitch, int height) {
__shared__ float data[CONVCOL_W * (CONVCOL_H + 2 * RADIUS)];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int miny = blockIdx.y * CONVCOL_H;
const int maxy = min(miny + CONVCOL_H, s.height) - 1;
const int totStart = miny - RADIUS;
const int totEnd = maxy + RADIUS;
const int colStart = blockIdx.x * CONVCOL_W + tx;
const int colEnd = colStart + (s.height - 1) * s.pitch;
const int smemStep = CONVCOL_W * CONVCOL_S;
const int gmemStep = s.pitch * CONVCOL_S;
if (colStart < s.width) {
int smemPos = ty * CONVCOL_W + tx;
int gmemPos = colStart + (totStart + ty) * s.pitch;
for (int y = totStart + ty; y <= totEnd; y += blockDim.y) {
if (y < 0)
data[smemPos] = s.d_Data[colStart];
else if (y >= s.height)
data[smemPos] = s.d_Data[colEnd];
else
data[smemPos] = s.d_Data[gmemPos];
smemPos += smemStep;
gmemPos += gmemStep;
}
}
__syncthreads();
if (colStart < s.width) {
int smemPos = ty * CONVCOL_W + tx;
int gmemPos = colStart + (miny + ty) * s.pitch;
for (int y = miny + ty; y <= maxy; y += blockDim.y) {
float sum = 0.0f;
for (int i = 0; i <= 2 * RADIUS; i++)
sum += data[smemPos + i * CONVCOL_W] * d_Kernel[i];
s.d_Result[gmemPos] = sum;
smemPos += smemStep;
gmemPos += gmemStep;
}
}
}
template <int RADIUS>
double SeparableFilter(CudaImage &inimg, CudaImage &outimg, CudaImage &temp,
float *h_Kernel) {
int width = inimg.width;
int pitch = inimg.pitch;
int height = inimg.height;
float *d_DataA = inimg.d_data;
float *d_DataB = outimg.d_data;
float *d_Temp = temp.d_data;
if (d_DataA == NULL || d_DataB == NULL || d_Temp == NULL) {
printf("SeparableFilter: missing data\n");
return 0.0;
}
// TimerGPU timer0(0);
const unsigned int kernelSize = (2 * RADIUS + 1) * sizeof(float);
safeCall(cudaMemcpyToSymbolAsync(d_Kernel, h_Kernel, kernelSize));
dim3 blockGridRows(iDivUp(width, CONVROW_W), height);
dim3 threadBlockRows(CONVROW_W + 2 * RADIUS);
struct Conv_t s;
s.d_Result = d_Temp;
s.d_Data = d_DataA;
s.width = width;
s.pitch = pitch;
s.height = height;
ConvRowGPU<RADIUS> << <blockGridRows, threadBlockRows>>> (s);
// checkMsg("ConvRowGPU() execution failed\n");
// safeCall(cudaThreadSynchronize());
dim3 blockGridColumns(iDivUp(width, CONVCOL_W), iDivUp(height, CONVCOL_H));
dim3 threadBlockColumns(CONVCOL_W, CONVCOL_S);
s.d_Result = d_DataB;
s.d_Data = d_Temp;
ConvColGPU<RADIUS> << <blockGridColumns, threadBlockColumns>>> (s);
// checkMsg("ConvColGPU() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("SeparableFilter time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
template <int RADIUS>
double LowPass(CudaImage &inimg, CudaImage &outimg, CudaImage &temp,
double var) {
float kernel[2 * RADIUS + 1];
float kernelSum = 0.0f;
for (int j = -RADIUS; j <= RADIUS; j++) {
kernel[j + RADIUS] = (float)expf(-(double)j * j / 2.0 / var);
kernelSum += kernel[j + RADIUS];
}
for (int j = -RADIUS; j <= RADIUS; j++) kernel[j + RADIUS] /= kernelSum;
return SeparableFilter<RADIUS>(inimg, outimg, temp, kernel);
}
double LowPass(CudaImage &inimg, CudaImage &outimg, CudaImage &temp, double var,
int kernsize) {
if (kernsize <= 5)
return LowPass<2>(inimg, outimg, temp, var);
else if (kernsize <= 7)
return LowPass<3>(inimg, outimg, temp, var);
else if (kernsize <= 9)
return LowPass<4>(inimg, outimg, temp, var);
else {
if (kernsize > 11)
std::cerr << "Kernels larger than 11 not implemented" << std::endl;
return LowPass<5>(inimg, outimg, temp, var);
}
}
__global__ void Scharr(float *imgd, float *lxd, float *lyd, int width,
int pitch, int height) {
#define BW (SCHARR_W + 2)
__shared__ float buffer[BW * (SCHARR_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * SCHARR_W + tx;
int y = blockIdx.y * SCHARR_H + ty;
int xp = (x == 0 ? 1 : (x > width ? width - 2 : x - 1));
int yp = (y == 0 ? 1 : (y > height ? height - 2 : y - 1));
buffer[ty * BW + tx] = imgd[yp * pitch + xp];
__syncthreads();
if (x < width && y < height && tx < SCHARR_W && ty < SCHARR_H) {
float *b = buffer + (ty + 1) * BW + (tx + 1);
float ul = b[-BW - 1];
float ur = b[-BW + 1];
float ll = b[+BW - 1];
float lr = b[+BW + 1];
lxd[y * pitch + x] = 3.0f * (lr - ll + ur - ul) + 10.0f * (b[+1] - b[-1]);
lyd[y * pitch + x] = 3.0f * (lr + ll - ur - ul) + 10.0f * (b[BW] - b[-BW]);
}
}
double Scharr(CudaImage &img, CudaImage &lx, CudaImage &ly) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, SCHARR_W), iDivUp(img.height, SCHARR_H));
dim3 threads(SCHARR_W + 2, SCHARR_H + 2);
Scharr << <blocks, threads>>>
(img.d_data, lx.d_data, ly.d_data, img.width, img.pitch, img.height);
// checkMsg("Scharr() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("Scharr time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void Flow(float *imgd, float *flowd, int width, int pitch,
int height, DIFFUSIVITY_TYPE type, float invk) {
#define BW (SCHARR_W + 2)
__shared__ float buffer[BW * (SCHARR_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * SCHARR_W + tx;
int y = blockIdx.y * SCHARR_H + ty;
int xp = (x == 0 ? 1 : (x > width ? width - 2 : x - 1));
int yp = (y == 0 ? 1 : (y > height ? height - 2 : y - 1));
buffer[ty * BW + tx] = imgd[yp * pitch + xp];
__syncthreads();
if (x < width && y < height && tx < SCHARR_W && ty < SCHARR_H) {
float *b = buffer + (ty + 1) * BW + (tx + 1);
float ul = b[-BW - 1];
float ur = b[-BW + 1];
float ll = b[+BW - 1];
float lr = b[+BW + 1];
float lx = 3.0f * (lr - ll + ur - ul) + 10.0f * (b[+1] - b[-1]);
float ly = 3.0f * (lr + ll - ur - ul) + 10.0f * (b[BW] - b[-BW]);
float dif2 = invk * (lx * lx + ly * ly);
if (type == PM_G1)
flowd[y * pitch + x] = exp(-dif2);
else if (type == PM_G2)
flowd[y * pitch + x] = 1.0f / (1.0f + dif2);
else if (type == WEICKERT)
flowd[y * pitch + x] = 1.0f - exp(-3.315 / (dif2 * dif2 * dif2 * dif2));
else
flowd[y * pitch + x] = 1.0f / sqrt(1.0f + dif2);
}
}
double Flow(CudaImage &img, CudaImage &flow, DIFFUSIVITY_TYPE type,
float kcontrast) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, SCHARR_W), iDivUp(img.height, SCHARR_H));
dim3 threads(SCHARR_W + 2, SCHARR_H + 2);
Flow << <blocks, threads>>> (img.d_data, flow.d_data, img.width, img.pitch,
img.height, type,
1.0f / (kcontrast * kcontrast));
// checkMsg("Flow() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // = timer0.read();
#ifdef VERBOSE
printf("Flow time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
struct NLDStep_t {
float *imgd;
float *flod;
float *temd;
int width;
int pitch;
int height;
float stepsize;
};
//__global__ void NLDStep(float *imgd, float *flod, float *temd, int width, int
// pitch, int height, float stepsize)
__global__ void NLDStep(NLDStep_t s) {
#undef BW
#define BW (NLDSTEP_W + 2)
__shared__ float ibuff[BW * (NLDSTEP_H + 2)];
__shared__ float fbuff[BW * (NLDSTEP_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * NLDSTEP_W + tx;
int y = blockIdx.y * NLDSTEP_H + ty;
int xp = (x == 0 ? 0 : (x > s.width ? s.width - 1 : x - 1));
int yp = (y == 0 ? 0 : (y > s.height ? s.height - 1 : y - 1));
ibuff[ty * BW + tx] = s.imgd[yp * s.pitch + xp];
fbuff[ty * BW + tx] = s.flod[yp * s.pitch + xp];
__syncthreads();
if (tx < NLDSTEP_W && ty < NLDSTEP_H && x < s.width && y < s.height) {
float *ib = ibuff + (ty + 1) * BW + (tx + 1);
float *fb = fbuff + (ty + 1) * BW + (tx + 1);
float ib0 = ib[0];
float fb0 = fb[0];
float xpos = (fb0 + fb[+1]) * (ib[+1] - ib0);
float xneg = (fb0 + fb[-1]) * (ib0 - ib[-1]);
float ypos = (fb0 + fb[+BW]) * (ib[+BW] - ib0);
float yneg = (fb0 + fb[-BW]) * (ib0 - ib[-BW]);
s.temd[y * s.pitch + x] = s.stepsize * (xpos - xneg + ypos - yneg);
}
}
struct NLDUpdate_t {
float *imgd;
float *temd;
int width;
int pitch;
int height;
};
//__global__ void NLDUpdate(float *imgd, float *temd, int width, int pitch, int
// height)
__global__ void NLDUpdate(NLDUpdate_t s) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x < s.width && y < s.height) {
int p = y * s.pitch + x;
s.imgd[p] = s.imgd[p] + s.temd[p];
}
}
double NLDStep(CudaImage &img, CudaImage &flow, CudaImage &temp,
float stepsize) {
// TimerGPU timer0(0);
dim3 blocks0(iDivUp(img.width, NLDSTEP_W), iDivUp(img.height, NLDSTEP_H));
dim3 threads0(NLDSTEP_W + 2, NLDSTEP_H + 2);
NLDStep_t s;
s.imgd = img.d_data;
s.flod = flow.d_data;
s.temd = temp.d_data;
s.width = img.width;
s.pitch = img.pitch;
s.height = img.height;
s.stepsize = 0.5 * stepsize;
// NLDStep<<<blocks0, threads0>>>(img.d_data, flow.d_data, temp.d_data,
// img.width, img.pitch, img.height, 0.5f*stepsize);
NLDStep << <blocks0, threads0>>> (s);
// checkMsg("NLDStep() execution failed\n");
// safeCall(cudaThreadSynchronize());
dim3 blocks1(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads1(32, 16);
NLDUpdate_t su;
su.imgd = img.d_data;
su.temd = temp.d_data;
su.width = img.width;
su.height = img.height;
su.pitch = img.pitch;
// NLDUpdate<<<blocks1, threads1>>>(img.d_data, temp.d_data, img.width,
// img.pitch, img.height);
NLDUpdate << <blocks1, threads1>>> (su);
// checkMsg("NLDUpdate() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // = timer0.read();
#ifdef VERBOSE
printf("NLDStep time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void HalfSample(float *iimd, float *oimd, int iwidth, int iheight,
int ipitch, int owidth, int oheight, int opitch) {
__shared__ float buffer[16 * 33];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * 16 + tx;
int y = blockIdx.y * 16 + ty;
if (x >= owidth || y >= oheight) return;
float *ptri = iimd + (2 * y) * ipitch + (2 * x);
if (2 * owidth == iwidth) {
buffer[ty * 32 + tx] = owidth * (ptri[0] + ptri[1]);
ptri += ipitch;
buffer[ty * 32 + tx + 16] = owidth * (ptri[0] + ptri[1]);
if (ty == 15) {
ptri += ipitch;
buffer[tx + 32 * 16] = owidth * (ptri[0] + ptri[1]);
} else if (y * 2 + 3 == iheight) {
ptri += ipitch;
buffer[tx + 32 * (ty + 1)] = owidth * (ptri[0] + ptri[1]);
}
} else {
float f0 = owidth - x;
float f2 = 1 + x;
buffer[ty * 32 + tx] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
ptri += ipitch;
buffer[ty * 32 + tx + 16] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
if (ty == 15 && 2 * oheight != iheight) {
ptri += ipitch;
buffer[tx + 32 * 16] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[1];
} else if (y * 2 + 3 == iheight && 2 * oheight != iheight) {
ptri += ipitch;
buffer[tx + 32 * (ty + 1)] =
f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
}
}
__syncthreads();
float *buff = buffer + 32 * ty + tx;
if (2 * oheight == iheight)
oimd[y * opitch + x] = oheight * (buff[0] + buff[16]) / (iwidth * iheight);
else {
float f0 = oheight - y;
float f2 = 1 + y;
oimd[y * opitch + x] = (f0 * buff[0] + oheight * buff[16] + f2 * buff[32]) /
(iwidth * iheight);
}
}
__global__ void HalfSample2(float *iimd, float *oimd, int ipitch, int owidth,
int oheight, int opitch) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= owidth || y >= oheight) return;
float *ptr = iimd + (2 * y) * ipitch + (2 * x);
oimd[y * opitch + x] =
0.25f * (ptr[0] + ptr[1] + ptr[ipitch + 0] + ptr[ipitch + 1]);
}
double HalfSample(CudaImage &inimg, CudaImage &outimg) {
// TimerGPU timer0(0);
if (inimg.width == 2 * outimg.width && inimg.height == 2 * outimg.height) {
dim3 blocks(iDivUp(outimg.width, 32), iDivUp(outimg.height, 16));
dim3 threads(32, 16);
HalfSample2 << <blocks, threads>>> (inimg.d_data, outimg.d_data,
inimg.pitch, outimg.width,
outimg.height, outimg.pitch);
} else {
dim3 blocks(iDivUp(outimg.width, 16), iDivUp(outimg.height, 16));
dim3 threads(16, 16);
HalfSample << <blocks, threads>>> (inimg.d_data, outimg.d_data, inimg.width,
inimg.height, inimg.pitch, outimg.width,
outimg.height, outimg.pitch);
}
// checkMsg("HalfSample() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("HalfSample time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double Copy(CudaImage &inimg, CudaImage &outimg) {
// TimerGPU timer0(0);
double gpuTime = 0; // timer0.read();
safeCall(cudaMemcpy2DAsync(outimg.d_data, sizeof(float) * outimg.pitch,
inimg.d_data, sizeof(float) * outimg.pitch,
sizeof(float) * inimg.width, inimg.height,
cudaMemcpyDeviceToDevice));
#ifdef VERBOSE
printf("Copy time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
float *AllocBuffers(int width, int height, int num, int omax, int &maxpts,
std::vector<CudaImage> &buffers, cv::KeyPoint *&pts,
cv::KeyPoint *&ptsbuffer, int *&ptindices, unsigned char *&desc, float *&descbuffer, CudaImage *&ims) {
maxpts = 4 * ((maxpts+3)/4);
buffers.resize(omax * num);
int w = width;
int h = height;
int p = iAlignUp(w, 128);
int size = 0;
for (int i = 0; i < omax; i++) {
for (int j = 0; j < num; j++) {
CudaImage &buf = buffers[i * num + j];
buf.width = w;
buf.height = h;
buf.pitch = p;
buf.d_data = (float *)((long)size);
size += h * p;
}
w /= 2;
h /= 2;
p = iAlignUp(w, 128);
}
int ptsstart = size;
size += sizeof(cv::KeyPoint) * maxpts / sizeof(float);
int ptsbufferstart = size;
size += sizeof(cv::KeyPoint) * maxpts / sizeof(float);
int descstart = size;
size += sizeof(unsigned char)*maxpts*61/sizeof(float);
int descbufferstart = size;
size += sizeof(float)*3*29*maxpts / sizeof(float);
int indicesstart = size;
size += 21*21*sizeof(int)*maxpts/sizeof(float);
int imgstart = size;
size += sizeof(CudaImage) * (num * omax + sizeof(float) - 1) / sizeof(float);
float *memory = NULL;
size_t pitch;
std::cout << "allocating " << size/1024./1024. << " Mbytes of gpu memory\n";
safeCall(cudaMallocPitch((void **)&memory, &pitch, (size_t)4096,
(size + 4095) / 4096 * sizeof(float)));
for (int i = 0; i < omax * num; i++) {
CudaImage &buf = buffers[i];
buf.d_data = memory + (long)buf.d_data;
}
pts = (cv::KeyPoint *)(memory + ptsstart);
ptsbuffer = (cv::KeyPoint *)(memory + ptsbufferstart);
desc = (unsigned char *)(memory + descstart);
descbuffer = (float*)(memory + descbufferstart);
ptindices = (int*)(memory + indicesstart);
ims = (CudaImage *)(memory + imgstart);
InitCompareIndices();
cudaStreamCreate(©Stream);
return memory;
}
void FreeBuffers(float *buffers) { safeCall(cudaFree(buffers)); }
__device__ unsigned int d_Maxval[1];
__device__ int d_Histogram[512];
#define CONTRAST_W 64
#define CONTRAST_H 7
#define HISTCONT_W 64
#define HISTCONT_H 8
#define HISTCONT_R 4
__global__ void MaxContrast(float *imgd, float *cond, int width, int pitch,
int height) {
#define WID (CONTRAST_W + 2)
__shared__ float buffer[WID * (CONTRAST_H + 2)];
__shared__ unsigned int maxval[32];
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx < 32 && !ty) maxval[tx] = 0.0f;
__syncthreads();
int x = blockIdx.x * CONTRAST_W + tx;
int y = blockIdx.y * CONTRAST_H + ty;
if (x >= width || y >= height) return;
float *b = buffer + ty * WID + tx;
b[0] = imgd[y * pitch + x];
__syncthreads();
if (tx < CONTRAST_W && ty < CONTRAST_H && x < width - 2 && y < height - 2) {
float dx = 3.0f * (b[0] - b[2] + b[2 * WID] - b[2 * WID + 2]) +
10.0f * (b[WID] - b[WID + 2]);
float dy = 3.0f * (b[0] + b[2] - b[2 * WID] - b[2 * WID + 2]) +
10.0f * (b[1] - b[2 * WID + 1]);
float grad = sqrt(dx * dx + dy * dy);
cond[(y + 1) * pitch + (x + 1)] = grad;
unsigned int *gradi = (unsigned int *)&grad;
atomicMax(maxval + (tx & 31), *gradi);
}
__syncthreads();
if (tx < 32 && !ty) atomicMax(d_Maxval, maxval[tx]);
}
__global__ void HistContrast(float *cond, int width, int pitch, int height,
float imaxval, int nbins) {
__shared__ int hist[512];
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = ty * HISTCONT_W + tx;
if (i < nbins) hist[i] = 0;
__syncthreads();
int x = blockIdx.x * HISTCONT_W + tx;
int y = blockIdx.y * HISTCONT_H * HISTCONT_R + ty;
if (x > 0 && x < width - 1) {
for (int i = 0; i < HISTCONT_R; i++) {
if (y > 0 && y < height - 1) {
int idx = min((int)(nbins * cond[y * pitch + x] * imaxval), nbins - 1);
atomicAdd(hist + idx, 1);
}
y += HISTCONT_H;
}
}
__syncthreads();
if (i < nbins && hist[i] > 0) atomicAdd(d_Histogram + i, hist[i]);
}
double ContrastPercentile(CudaImage &img, CudaImage &temp, CudaImage &blur,
float perc, int nbins, float &contrast) {
// TimerGPU timer0(0);
LowPass(img, blur, temp, 1.0f, 5);
float h_Maxval = 0.0f;
safeCall(cudaMemcpyToSymbolAsync(d_Maxval, &h_Maxval, sizeof(float)));
dim3 blocks1(iDivUp(img.width, CONTRAST_W), iDivUp(img.height, CONTRAST_H));
dim3 threads1(CONTRAST_W + 2, CONTRAST_H + 2);
MaxContrast << <blocks1, threads1>>>
(blur.d_data, temp.d_data, blur.width, blur.pitch, blur.height);
// checkMsg("MaxContrast() execution failed\n");
// safeCall(cudaThreadSynchronize());
safeCall(cudaMemcpyFromSymbolAsync(&h_Maxval, d_Maxval, sizeof(float)));
if (nbins > 512) {
printf(
"Warning: Largest number of possible bins in ContrastPercentile() is "
"512\n");
nbins = 512;
}
int h_Histogram[512];
memset(h_Histogram, 0, nbins * sizeof(int));
safeCall(
cudaMemcpyToSymbolAsync(d_Histogram, h_Histogram, nbins * sizeof(int)));
dim3 blocks2(iDivUp(temp.width, HISTCONT_W),
iDivUp(temp.height, HISTCONT_H * HISTCONT_R));
dim3 threads2(HISTCONT_W, HISTCONT_H);
HistContrast << <blocks2, threads2>>> (temp.d_data, temp.width, temp.pitch,
temp.height, 1.0f / h_Maxval, nbins);
safeCall(
cudaMemcpyFromSymbolAsync(h_Histogram, d_Histogram, nbins * sizeof(int)));
int npoints = (temp.width - 2) * (temp.height - 2);
int nthreshold = (int)(npoints * perc);
int k = 0, nelements = 0;
for (k = 0; nelements < nthreshold && k < nbins; k++)
nelements += h_Histogram[k];
contrast = (nelements < nthreshold ? 0.03f : h_Maxval * ((float)k / nbins));
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("ContrastPercentile time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void Derivate(float *imd, float *lxd, float *lyd, int width,
int pitch, int height, int step, float fac1,
float fac2) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= width || y >= height) return;
int xl = (x < step ? step - x : x - step);
int xh = (x >= width - step ? 2 * width - x - step - 2 : x + step);
int yl = (y < step ? step - y : y - step);
int yh = (y >= height - step ? 2 * height - y - step - 2 : y + step);
float ul = imd[yl * pitch + xl];
float ur = imd[yl * pitch + xh];
float ll = imd[yh * pitch + xl];
float lr = imd[yh * pitch + xh];
float cl = imd[y * pitch + xl];
float cr = imd[y * pitch + xh];
lxd[y * pitch + x] = fac1 * (ur + lr - ul - ll) + fac2 * (cr - cl);
float uc = imd[yl * pitch + x];
float lc = imd[yh * pitch + x];
lyd[y * pitch + x] = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
}
__global__ void HessianDeterminant(float *lxd, float *lyd, float *detd,
int width, int pitch, int height, int step,
float fac1, float fac2) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= width || y >= height) return;
int xl = (x < step ? step - x : x - step);
int xh = (x >= width - step ? 2 * width - x - step - 2 : x + step);
int yl = (y < step ? step - y : y - step);
int yh = (y >= height - step ? 2 * height - y - step - 2 : y + step);
float ul = lxd[yl * pitch + xl];
float ur = lxd[yl * pitch + xh];
float ll = lxd[yh * pitch + xl];
float lr = lxd[yh * pitch + xh];
float cl = lxd[y * pitch + xl];
float cr = lxd[y * pitch + xh];
float lxx = fac1 * (ur + lr - ul - ll) + fac2 * (cr - cl);
float uc = lxd[yl * pitch + x];
float lc = lxd[yh * pitch + x];
float lyx = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
ul = lyd[yl * pitch + xl];
ur = lyd[yl * pitch + xh];
ll = lyd[yh * pitch + xl];
lr = lyd[yh * pitch + xh];
uc = lyd[yl * pitch + x];
lc = lyd[yh * pitch + x];
float lyy = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
detd[y * pitch + x] = lxx * lyy - lyx * lyx;
}
double HessianDeterminant(CudaImage &img, CudaImage &lx, CudaImage &ly,
int step) {
// TimerGPU timer0(0);
float w = 10.0 / 3.0;
float fac1 = 1.0 / (2.0 * (w + 2.0));
float fac2 = w * fac1;
dim3 blocks(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads(32, 16);
Derivate << <blocks, threads>>> (img.d_data, lx.d_data, ly.d_data, img.width,
img.pitch, img.height, step, fac1, fac2);
// checkMsg("Derivate() execution failed\n");
// safeCall(cudaThreadSynchronize());
HessianDeterminant << <blocks, threads>>> (lx.d_data, ly.d_data, img.d_data,
img.width, img.pitch, img.height,
step, fac1, fac2);
// checkMsg("HessianDeterminant() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("HessianDeterminant time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void FindExtrema(float *imd, float *imp, float *imn, int maxx,
int pitch, int maxy, float border, float dthreshold,
int scale, int octave, float size,
cv::KeyPoint *pts, int maxpts) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
int left_x = (int)(x - border + 0.5f) - 1;
int right_x = (int)(x + border + 0.5f) + 1;
int up_y = (int)(y - border + 0.5f) - 1;
int down_y = (int)(y + border + 0.5f) + 1;
if (left_x < 0 || right_x >= maxx || up_y < 0 || down_y >= maxy) return;
int p = y * pitch + x;
float v = imd[p];
if (v > dthreshold && v > imd[p - pitch - 1] && v > imd[p + pitch + 1] &&
v > imd[p + pitch - 1] && v > imd[p - pitch + 1] && v > imd[p - 1] &&
v > imd[p + 1] && v > imd[p + pitch] && v > imd[p - pitch]) {
float dx = 0.5f * (imd[p + 1] - imd[p - 1]);
float dy = 0.5f * (imd[p + pitch] - imd[p - pitch]);
float dxx = imd[p + 1] + imd[p - 1] - 2.0f * v;
float dyy = imd[p + pitch] + imd[p - pitch] - 2.0f * v;
float dxy = 0.25f * (imd[p + pitch + 1] + imd[p - pitch - 1] -
imd[p + pitch - 1] - imd[p - pitch + 1]);
float det = dxx * dyy - dxy * dxy;
float idet = (det != 0.0f ? 1.0f / det : 0.0f);
float dst0 = idet * (dxy * dy - dyy * dx);
float dst1 = idet * (dxy * dx - dxx * dy);
bool weak = true;
if (dst0 >= -1.0f && dst0 <= 1.0f && dst1 >= -1.0f && dst1 <= 1.0f) {
weak = 0;
}
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
if (idx < maxpts) {
cv::KeyPoint &point = pts[idx];
point.response = v;
point.size = (weak ? -1 : 1) * 2.0 * size;
float octsub = (dst0 < 0 ? -1 : 1) * (octave + fabs(dst0));
*(float *)(&point.octave) = (weak ? octave : octsub);
point.class_id = scale;
int ratio = (1 << octave);
point.pt.x = ratio * (x);
point.pt.y = ratio * (y);
point.angle = dst1;
} else {
atomicAdd(d_PointCounter,-1);
}
}
}
__global__ void CopyIdxArray(int scale) {
d_ExtremaIdx[scale] = d_PointCounter[0];
}
double FindExtrema(CudaImage &img, CudaImage &imgp, CudaImage &imgn,
float border, float dthreshold, int scale, int octave,
float size, cv::KeyPoint *pts, int maxpts) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads(32, 16);
float b = border;
FindExtrema << <blocks, threads>>>
(img.d_data, imgp.d_data, imgn.d_data, img.width, img.pitch, img.height,
b, dthreshold, scale, octave, size, pts, maxpts);
CopyIdxArray << <1, 1>>> (scale);
CHK
// checkMsg("FindExtrema() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("FindExtrema time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
void ClearPoints() {
int totPts = 0;
safeCall(cudaMemcpyToSymbolAsync(d_PointCounter, &totPts, sizeof(int)));
}
__forceinline__ __device__ void atomicSort(int *pts, int shmidx, int offset,
int sortdir) {
int &p0 = pts[shmidx + sortdir];
int &p1 = pts[shmidx + (offset - sortdir)];
if (p0 < p1) {
int t = p0;
p0 = p1;
p1 = t;
}
}
__forceinline__ __device__ bool atomicCompare(const cv::KeyPoint &i,
const cv::KeyPoint &j) {
float t = i.pt.x * j.pt.x;
if (t == 0) {
if (j.pt.x != 0) {
return false;
} else {
return true;
}
}
if (i.pt.y < j.pt.y) return true;
if (i.pt.y == j.pt.y && i.pt.x < j.pt.x) return true;
return false;
}
template <typename T>
struct sortstruct_t {
T idx;
short x;
short y;
};
template <typename T>
__forceinline__ __device__ bool atomicCompare(const sortstruct_t<T> &i,
const sortstruct_t<T> &j) {
int t = i.x * j.x;
if (t == 0) {
if (j.x != 0) {
return false;
} else {
return true;
}
}
if (i.y < j.y) return true;
if (i.y == j.y && i.x < j.x) return true;
return false;
}
template <typename T>
__forceinline__ __device__ void atomicSort(sortstruct_t<T> *pts, int shmidx,
int offset, int sortdir) {
sortstruct_t<T> &p0 = pts[(shmidx + sortdir)];
sortstruct_t<T> &p1 = pts[(shmidx + (offset - sortdir))];
if (atomicCompare(p0, p1)) {
int idx = p0.idx;
short ptx = p0.x;
short pty = p0.y;
p0.idx = p1.idx;
p0.x = p1.x;
p0.y = p1.y;
p1.idx = idx;
p1.x = ptx;
p1.y = pty;
}
}
#define BitonicSortThreads 1024
template <class T>
__global__ void bitonicSort(const T *pts, T *newpts) {
int scale = blockIdx.x;
__shared__ struct sortstruct_t<short> shm[8192];
int first = scale == 0 ? 0 : d_ExtremaIdx[scale - 1];
int last = d_ExtremaIdx[scale];
int nkpts = last - first;
const cv::KeyPoint *tmpg = &pts[first];
for (int i = threadIdx.x; i < 8192;
i += BitonicSortThreads) {
if (i < nkpts) {
shm[i].idx = i;
shm[i].y = (short)tmpg[i].pt.y;
shm[i].x = (short)tmpg[i].pt.x;
} else {
shm[i].idx = -1;
shm[i].y = 0;
shm[i].x = 0;
}
}
__syncthreads();
for (int i=1; i<8192; i <<= 1) {
for (int j=i; j>0; j >>= 1) {
int tx = threadIdx.x;
int mask = 0x0fffffff * j;
for (int idx=0; idx<4096; idx+=BitonicSortThreads) {
int sortdir = (tx & i) > 0 ? 0 : 1;
int tidx = ((tx & mask) << 1) + (tx & ~mask);
atomicSort(shm, tidx, j, j*sortdir);
tx += BitonicSortThreads;
__syncthreads();
}
}
}
cv::KeyPoint *tmpnewg = &newpts[first];
for (int i = 0; i < 8192; i += BitonicSortThreads) {
if (i + threadIdx.x < nkpts) {
tmpnewg[i + threadIdx.x].angle = tmpg[shm[i + threadIdx.x].idx].angle;
tmpnewg[i + threadIdx.x].class_id = tmpg[shm[i + threadIdx.x].idx].class_id;
tmpnewg[i + threadIdx.x].octave = tmpg[shm[i + threadIdx.x].idx].octave;
tmpnewg[i + threadIdx.x].pt.y = tmpg[shm[i + threadIdx.x].idx].pt.y;
tmpnewg[i + threadIdx.x].pt.x = tmpg[shm[i + threadIdx.x].idx].pt.x;
tmpnewg[i + threadIdx.x].response =
tmpg[shm[i + threadIdx.x].idx].response;
tmpnewg[i + threadIdx.x].size = tmpg[shm[i + threadIdx.x].idx].size;
}
}
}
template <class T>
__global__ void bitonicSort_global(const T *pts, T *newpts, sortstruct_t<int>* _shm, int _sz) {
int scale = blockIdx.x;
//__shared__ struct sortstruct_t shm[8192];
int first = scale == 0 ? 0 : d_ExtremaIdx[scale - 1];
int last = d_ExtremaIdx[scale];
int nkpts = last - first;
const cv::KeyPoint *tmpg = &pts[first];
int nkpts_ceil = 1;
while (nkpts_ceil < nkpts) nkpts_ceil *= 2;
sortstruct_t<int> *shm = &(_shm[_sz*blockIdx.x]);
for (int i = threadIdx.x; i < nkpts_ceil;
i += BitonicSortThreads) {
if (i < nkpts) {
shm[i].idx = i;
shm[i].y = (short)tmpg[i].pt.y;
shm[i].x = (short)tmpg[i].pt.x;
} else {
shm[i].idx = -1;
shm[i].y = 0;
shm[i].x = 0;
}
}
__syncthreads();
for (int i=1; i<nkpts_ceil; i <<= 1) {
for (int j=i; j>0; j >>= 1) {
int tx = threadIdx.x;
int mask = 0x0fffffff * j;
for (int idx=0; idx<nkpts_ceil/2; idx+=BitonicSortThreads) {
int sortdir = (tx & i) > 0 ? 0 : 1;
int tidx = ((tx & mask) << 1) + (tx & ~mask);
atomicSort(shm, tidx, j, j*sortdir);
tx += BitonicSortThreads;
__syncthreads();
}
}
}
cv::KeyPoint *tmpnewg = &newpts[first];
for (int i = 0; i < nkpts_ceil; i += BitonicSortThreads) {
if (i + threadIdx.x < nkpts) {
tmpnewg[i + threadIdx.x].angle = tmpg[shm[i + threadIdx.x].idx].angle;
tmpnewg[i + threadIdx.x].class_id = tmpg[shm[i + threadIdx.x].idx].class_id;
tmpnewg[i + threadIdx.x].octave = tmpg[shm[i + threadIdx.x].idx].octave;
tmpnewg[i + threadIdx.x].pt.y = tmpg[shm[i + threadIdx.x].idx].pt.y;
tmpnewg[i + threadIdx.x].pt.x = tmpg[shm[i + threadIdx.x].idx].pt.x;
tmpnewg[i + threadIdx.x].response =
tmpg[shm[i + threadIdx.x].idx].response;
tmpnewg[i + threadIdx.x].size = tmpg[shm[i + threadIdx.x].idx].size;
}
}
}
#define FindNeighborsThreads 32
__global__ void FindNeighbors(cv::KeyPoint *pts, int *kptindices, int width) {
__shared__ int gidx[1];
// which scale?
int scale = pts[blockIdx.x].class_id;
int cmpIdx = scale < 1 ? 0 : d_ExtremaIdx[scale - 1];
float size = pts[blockIdx.x].size;
gidx[0] = 1;
__syncthreads();
// One keypoint per block.
cv::KeyPoint &kpt = pts[blockIdx.x];
// Key point to compare. Only compare with smaller than current
// Iterate backwards instead and break as soon as possible!
//for (int i = cmpIdx + threadIdx.x; i < blockIdx.x; i += FindNeighborsThreads) {
for (int i=blockIdx.x-threadIdx.x-1; i >= cmpIdx; i -= FindNeighborsThreads) {
cv::KeyPoint &kpt_cmp = pts[i];
if (kpt.pt.y-kpt_cmp.pt.y > size*.5f) break;
//if (fabs(kpt.pt.y-kpt_cmp.pt.y) > size*.5f) continue;
float dist = (kpt.pt.x - kpt_cmp.pt.x) * (kpt.pt.x - kpt_cmp.pt.x) +
(kpt.pt.y - kpt_cmp.pt.y) * (kpt.pt.y - kpt_cmp.pt.y);
if (dist < size * size * 0.25) {
int idx = atomicAdd(gidx, 1);
kptindices[blockIdx.x * width + idx] = i;
}
}
if (scale > 0) {
int startidx = d_ExtremaIdx[scale-1];
cmpIdx = scale < 2 ? 0 : d_ExtremaIdx[scale - 2];
for (int i=startidx-threadIdx.x-1; i >= cmpIdx; i -= FindNeighborsThreads) {
cv::KeyPoint &kpt_cmp = pts[i];
if (kpt_cmp.pt.y-kpt.pt.y > size*.5f) continue;
if (kpt.pt.y-kpt_cmp.pt.y > size*.5f) break;
float dist = (kpt.pt.x - kpt_cmp.pt.x) * (kpt.pt.x - kpt_cmp.pt.x) +
(kpt.pt.y - kpt_cmp.pt.y) * (kpt.pt.y - kpt_cmp.pt.y);
if (dist < size * size * 0.25) {
int idx = atomicAdd(gidx, 1);
kptindices[blockIdx.x * width + idx] = i;
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
kptindices[blockIdx.x * width] = gidx[0];
}
}
// TODO Intermediate storage of memberarray and minneighbor
#define FilterExtremaThreads 1024
__global__ void FilterExtrema_kernel(cv::KeyPoint *kpts, cv::KeyPoint *newkpts,
int *kptindices, int width,
int *memberarray,
int *minneighbor,
char *shouldAdd) {
// -1 means not processed
// -2 means added but replaced
// >=0 means added
__shared__ bool shouldBreak[1];
int nump = d_PointCounter[0];
// Initially all points are unprocessed
for (int i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
memberarray[i] = -1;
}
if (threadIdx.x == 0) {
shouldBreak[0] = true;
}
__syncthreads();
// Loop until there are no more points to process
for (int xx=0; xx<10000; ++xx) {
//while (true) {
// Outer loop to handle more than 8*1024 points
// Start by restoring memberarray
// Make sure to add appropriate offset to indices
// for (int offset=0; offset<nump; offset += 8*1024) {
// memberarray[i] = storedmemberarray[i+offset];
//for (int offset=0; offset<nump; offset += 8*1024) {
// Mark all points for addition and no minimum neighbor
//int maxi = nump-offset >= 8*1024 ? 8*1024 : nump-offset;
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
minneighbor[i] = nump+1;
shouldAdd[i] = true;
}
__syncthreads();
// Look through all points. If there are points that have not been processed,
// disable breaking and check if it has no processed neighbors (add), has all processed
// neighbors (compare with neighbors) or has some unprocessed neighbor (wait)
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
int neighborsSize = kptindices[i * width] - 1;
int *neighbors = &(kptindices[i * width + 1]);
// Only do if we didn't process the point before
if (memberarray[i] == -1) {
// If we process at least one point we shouldn't break
// No need to sync. Only want to know if at least one thread wants to
// continue
shouldBreak[0] = false;
// Sort neighbors according to the order of currently added points
// (often very few)
// If the neighbor has been replaced, stick it to the back
// If any neighbor has not been processed, break;
bool shouldProcess = true;
for (int k = 0; k < neighborsSize; ++k) {
// If the point has one or more unprocessed neighbors, skip
if (memberarray[neighbors[k]] == -1) {
shouldProcess = false;
shouldAdd[i] = false;
break;
}
// If it has a neighbor that is in the list, we don't add, but process
if (memberarray[neighbors[k]] >= 0) {
shouldAdd[i] = false;
}
}
// We should process and potentially replace the neighbor
if (shouldProcess && !shouldAdd[i]) {
// Find the smallest neighbor. Often only one or two, so no ned for fancy algorithm
for (int k = 0; k < neighborsSize; ++k) {
for (int j = k + 1; j < neighborsSize; ++j) {
if (memberarray[neighbors[k]] == -2 ||
(memberarray[neighbors[j]] != -2 &&
memberarray[neighbors[j]] < memberarray[neighbors[k]])) {
int t = neighbors[k];
neighbors[k] = neighbors[j];
neighbors[j] = t;
}
}
}
// Pick the first neighbor
// We need to make sure, in case more than one point has this
// neighbor,
// That the point with lowest memberarrayindex processes it first
// Here minneighbor[i] is the target and i the neighbor
int nidx = neighbors[0];
minneighbor[nidx] = min(minneighbor[nidx], (int)i);
}
}
}
__syncthreads();
// Check which points we can add
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
if (memberarray[i] == -1) {
if (shouldAdd[i]) {
memberarray[i] = i;
}
}
}
__syncthreads();
// Look at the neighbors. If the response is higher, replace
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
if (minneighbor[i] != nump+1) {
if (memberarray[minneighbor[i]] == -1) {
if (!shouldAdd[minneighbor[i]]) {
const cv::KeyPoint &p0 = kpts[minneighbor[i]];
const cv::KeyPoint &p1 = kpts[i];
if (p0.response > p1.response) {
memberarray[minneighbor[i]] = i;
memberarray[i] = -2;
} else {
memberarray[minneighbor[i]] = -2;
}
}
}
}
}
__syncthreads();
// End outer loop
//for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
// storedmemberarray[i+offset] = memberarray[i];
// }
// __syncthreads();
//}
// Are we done?
if (shouldBreak[0]) break;
if (threadIdx.x == 0) {
shouldBreak[0] = true;
}
__syncthreads();
}
__syncthreads();
}
__global__ void sortFiltered_kernel(cv::KeyPoint *kpts, cv::KeyPoint *newkpts,
int *memberarray) {
__shared__ int minneighbor[2048];
__shared__ int curridx[1];
int nump = d_PointCounter[0];
if (threadIdx.x == 0) {
curridx[0] = 0;
}
// Sort array
const int upper = (nump + 2047) & (0xfffff800);
for (int i = threadIdx.x; i < upper; i += 2 * FilterExtremaThreads) {
minneighbor[threadIdx.x] =
i >= nump ? nump+1 : (memberarray[i] < 0 ? nump+1 : (kpts[memberarray[i]].size < 0 ? nump+1 : memberarray[i]));
minneighbor[threadIdx.x + 1024] =
i + 1024 >= nump ? nump+1
: (memberarray[i + 1024] < 0 ? nump+1 : (kpts[memberarray[i+1024]].size < 0 ? nump+1 : memberarray[i+1024]));
__syncthreads();
// Sort and store keypoints
#pragma unroll 1
for (int k = 1; k < 2048; k <<= 1) {
int sortdir = (threadIdx.x & k) > 0 ? 0 : 1;
#pragma unroll 1
for (int j = k; j > 0; j >>= 1) {
int mask = 0x0fffffff * j;
int tidx = ((threadIdx.x & mask) << 1) + (threadIdx.x & ~mask);
atomicSort(minneighbor, tidx, j, j * sortdir);
__syncthreads();
}
}
__syncthreads();
#pragma unroll 1
for (int k = threadIdx.x; k < 2048; k += 1024) {
if (minneighbor[k] < nump) {
// Restore subpixel component
cv::KeyPoint &okpt = kpts[minneighbor[k]];
float octsub = fabs(*(float*)(&kpts[minneighbor[k]].octave));
int octave = (int)octsub;
float subp = (*(float*)(&kpts[minneighbor[k]].octave) < 0 ? -1 : 1) * (octsub - octave);
float ratio = 1 << octave;
cv::KeyPoint &tkpt = newkpts[k + curridx[0]];
tkpt.pt.y = ratio * ((int)(0.5f+okpt.pt.y / ratio) + okpt.angle);
tkpt.pt.x = ratio * ((int)(0.5f+okpt.pt.x / ratio) + subp);
// newkpts[k + curridx[0] + threadIdx.x].angle = 0; // This will be set elsewhere
tkpt.class_id = okpt.class_id;
tkpt.octave = octave;
tkpt.response = okpt.response;
tkpt.size = okpt.size;
}
}
__syncthreads();
// How many did we add?
if (minneighbor[2047] < nump) {
curridx[0] += 2048;
} else {
if (minneighbor[1024] < nump) {
if (threadIdx.x < 1023 && minneighbor[1024 + threadIdx.x] < nump &&
minneighbor[1024 + threadIdx.x + 1] == nump+1) {
curridx[0] += 1024 + threadIdx.x + 1;
}
} else {
if (minneighbor[threadIdx.x] < nump &&
minneighbor[threadIdx.x + 1] == nump+1) {
curridx[0] += threadIdx.x + 1;
}
}
__syncthreads();
}
}
__syncthreads();
if (threadIdx.x == 0) {
d_PointCounter[0] = curridx[0];
}
}
void FilterExtrema(cv::KeyPoint *pts, cv::KeyPoint *newpts, int* kptindices, int& nump) {
//int nump;
cudaMemcpyFromSymbol(&nump, d_PointCounter, sizeof(int));
unsigned int extremaidx_h[16];
cudaMemcpyFromSymbol(extremaidx_h,d_ExtremaIdx,16*sizeof(unsigned int));
int maxnump = extremaidx_h[0];
for (int i=1; i<16; ++i) {
maxnump = max(maxnump,extremaidx_h[i]-extremaidx_h[i-1]);
}
int width = ceil(21) * ceil(21);
// Sort the list of points
dim3 blocks(16, 1, 1);
dim3 threads(BitonicSortThreads, 1, 1);
if (maxnump <= 8*1024) {
bitonicSort << <blocks, threads>>> (pts, newpts);
} else {
int nump_ceil = 1;
while (nump_ceil < nump) nump_ceil <<= 1;
std::cout << "numpceil: " << nump_ceil << std::endl;
sortstruct_t<int>* sortstruct;
cudaMalloc((void**)&sortstruct, nump_ceil*16*sizeof(sortstruct_t<int>));
bitonicSort_global << <blocks, threads>>> (pts, newpts, sortstruct,nump_ceil);
cudaFree(sortstruct);
}
CHK
/* cv::KeyPoint* newpts_h = new cv::KeyPoint[nump];
cudaMemcpy(newpts_h,newpts,nump*sizeof(cv::KeyPoint),cudaMemcpyDeviceToHost);
int scale = 0;
for (int i=1; i<nump; ++i) {
cv::KeyPoint &k0 = newpts_h[i-1];
cv::KeyPoint &k1 = newpts_h[i];
std::cout << i << ": " << newpts_h[i].class_id << ": " << newpts_h[i].pt.y << " " << newpts_h[i].pt.x << ", " << newpts_h[i].size;
if (!(k0.pt.y<k1.pt.y || (k0.pt.y==k1.pt.y && k0.pt.x<k1.pt.x))) std::cout << " <<<<";
if (k1.size < 0 ) std::cout << " ##############";
std::cout << "\n";
}
*/
// Find all neighbors
cudaStreamSynchronize(copyStream);
blocks.x = nump;
threads.x = FindNeighborsThreads;
FindNeighbors << <blocks, threads>>> (newpts, kptindices, width);
CHK
//cudaDeviceSynchronize();
//safeCall(cudaGetLastError());
// Filter extrema
blocks.x = 1;
threads.x = FilterExtremaThreads;
int *buffer1, *buffer2;
cudaMalloc((void**)&buffer1, nump*sizeof(int));
cudaMalloc((void**)&buffer2, nump*sizeof(int));
char* buffer3;
cudaMalloc((void**)&buffer3, nump);
FilterExtrema_kernel << <blocks, threads>>> (newpts, pts, kptindices, width,
buffer1, buffer2, buffer3);
threads.x = 1024;
sortFiltered_kernel << <blocks, threads>>> (newpts, pts, buffer1);
CHK
//cudaDeviceSynchronize();
//safeCall(cudaGetLastError());
cudaFree(buffer1);
cudaFree(buffer2);
cudaFree(buffer3);
cudaMemcpyFromSymbolAsync(&nump, d_PointCounter, sizeof(int));
}
int GetPoints(std::vector<cv::KeyPoint> &h_pts, cv::KeyPoint *d_pts, int numPts) {
h_pts.resize(numPts);
safeCall(cudaMemcpyAsync((float *)&h_pts[0], d_pts,
sizeof(cv::KeyPoint) * numPts,
cudaMemcpyDeviceToHost, copyStream));
return numPts;
}
void GetDescriptors(cv::Mat &h_desc, cv::Mat &d_desc, int numPts) {
h_desc = cv::Mat(numPts, 61, CV_8U);
cudaMemcpyAsync(h_desc.data, d_desc.data, numPts*61, cudaMemcpyDeviceToHost, copyStream);
}
__global__ void ExtractDescriptors(cv::KeyPoint *d_pts, CudaImage *d_imgs,
float *_vals, int size2, int size3,
int size4) {
__shared__ float acc_vals[3 * 30 * EXTRACT_S];
float *acc_vals_im = &acc_vals[0];
float *acc_vals_dx = &acc_vals[30 * EXTRACT_S];
float *acc_vals_dy = &acc_vals[2 * 30 * EXTRACT_S];
int p = blockIdx.x;
float *vals = &_vals[p * 3 * 29];
float iratio = 1.0f / (1 << d_pts[p].octave);
int scale = (int)(0.5f * d_pts[p].size * iratio + 0.5f);
float xf = d_pts[p].pt.x * iratio;
float yf = d_pts[p].pt.y * iratio;
float ang = d_pts[p].angle;
float co = cos(ang);
float si = sin(ang);
int tx = threadIdx.x;
int lev = d_pts[p].class_id;
float *imd = d_imgs[4 * lev + 0].d_data;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int winsize = max(3 * size3, 4 * size4);
for (int i = 0; i < 30; ++i) {
acc_vals_im[i * EXTRACT_S + tx] = 0.f;
acc_vals_dx[i * EXTRACT_S + tx] = 0.f;
acc_vals_dy[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float im = imd[pos];
float dx = dxd[pos];
float dy = dyd[pos];
float rx = -dx * si + dy * co;
float ry = dx * co + dy * si;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// Add 2x2
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx] += im;
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx + 2] += ry;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// Add 3x3
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx] += im;
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx + 2] += ry;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// Add 4x4
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx] += im;
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx + 2] += ry;
}
}
__syncthreads();
// Reduce stuff
float acc_reg;
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
for (int d = 0; d < 90; d += 30) {
if (tx_d < 32) {
acc_reg = acc_vals[3 * 30 * tx_d + offset + d] +
acc_vals[3 * 30 * (tx_d + 32) + offset + d];
acc_reg += __shfl_down(acc_reg, 1);
acc_reg += __shfl_down(acc_reg, 2);
acc_reg += __shfl_down(acc_reg, 4);
acc_reg += __shfl_down(acc_reg, 8);
acc_reg += __shfl_down(acc_reg, 16);
}
if (tx_d == 0) {
acc_vals[offset + d] = acc_reg;
}
}
}
__syncthreads();
// Have 29*3 values to store
// They are in acc_vals[0..28,64*30..64*30+28,64*60..64*60+28]
if (tx < 29) {
vals[tx] = acc_vals[tx];
vals[29 + tx] = acc_vals[29 + tx];
vals[2 * 29 + tx] = acc_vals[2 * 29 + tx];
}
}
__global__ void ExtractDescriptors_serial(cv::KeyPoint *d_pts,
CudaImage *d_imgs, float *_vals,
int size2, int size3, int size4) {
__shared__ float acc_vals[30 * EXTRACT_S];
__shared__ float final_vals[3 * 30];
int p = blockIdx.x;
float *vals = &_vals[p * 3 * 29];
float iratio = 1.0f / (1 << d_pts[p].octave);
int scale = (int)(0.5f * d_pts[p].size * iratio + 0.5f);
float xf = d_pts[p].pt.x * iratio;
float yf = d_pts[p].pt.y * iratio;
float ang = d_pts[p].angle;
float co = cos(ang);
float si = sin(ang);
int tx = threadIdx.x;
int lev = d_pts[p].class_id;
float *imd = d_imgs[4 * lev + 0].d_data;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int winsize = max(3 * size3, 4 * size4);
// IM
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float im = imd[pos];
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += im;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += im;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += im;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
// DX
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float dx = dxd[pos];
float dy = dyd[pos];
float rx = -dx * si + dy * co;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += rx;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += rx;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += rx;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
// DY
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float dx = dxd[pos];
float dy = dyd[pos];
float ry = dx * co + dy * si;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += ry;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += ry;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += ry;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
__syncthreads();
// Have 29*3 values to store
// They are in acc_vals[0..28,64*30..64*30+28,64*60..64*60+28]
if (tx < 29) {
vals[tx] = final_vals[tx];
vals[29 + tx] = final_vals[29 + tx];
vals[2 * 29 + tx] = final_vals[2 * 29 + tx];
}
}
__global__ void BuildDescriptor(float *_valsim, unsigned char *_desc) {
int p = blockIdx.x;
size_t idx = threadIdx.x;
if (idx < 61) {
float *valsim = &_valsim[3 * 29 * p];
unsigned char *desc = &_desc[61 * p];
unsigned char desc_r = 0;
#pragma unroll
for (int i = 0; i < (idx == 60 ? 6 : 8); ++i) {
int idx1 = comp_idx_1[idx * 8 + i];
int idx2 = comp_idx_2[idx * 8 + i];
desc_r |= (valsim[idx1] > valsim[idx2] ? 1 : 0) << i;
}
desc[idx] = desc_r;
}
}
double ExtractDescriptors(cv::KeyPoint *d_pts, std::vector<CudaImage> &h_imgs, CudaImage *d_imgs,
unsigned char *desc_d, float* vals_d, int patsize, int numPts) {
int size2 = patsize;
int size3 = ceil(2.0f * patsize / 3.0f);
int size4 = ceil(0.5f * patsize);
//int numPts;
//cudaMemcpyFromSymbol(&numPts, d_PointCounter, sizeof(int));
// TimerGPU timer0(0);
dim3 blocks(numPts);
dim3 threads(EXTRACT_S);
ExtractDescriptors << <blocks, threads>>>(d_pts, d_imgs, vals_d, size2, size3, size4);
CHK;
cudaMemsetAsync(desc_d, 0, numPts * 61);
BuildDescriptor << <blocks, 64>>> (vals_d, desc_d);
CHK;
////checkMsg("ExtractDescriptors() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("ExtractDescriptors time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
#define NTHREADS_MATCH 32
__global__ void MatchDescriptors(unsigned char *d1, unsigned char *d2,
int pitch, int nkpts_2, cv::DMatch *matches) {
int p = blockIdx.x;
int x = threadIdx.x;
__shared__ int idxBest[NTHREADS_MATCH];
__shared__ int idxSecondBest[NTHREADS_MATCH];
__shared__ int scoreBest[NTHREADS_MATCH];
__shared__ int scoreSecondBest[NTHREADS_MATCH];
idxBest[x] = 0;
idxSecondBest[x] = 0;
scoreBest[x] = 512;
scoreSecondBest[x] = 512;
__syncthreads();
// curent version fixed with popc, still not convinced
unsigned long long *d1i = (unsigned long long *)(d1 + pitch * p);
for (int i = 0; i < nkpts_2; i += NTHREADS_MATCH) {
unsigned long long *d2i = (unsigned long long *)(d2 + pitch * (x + i));
if (i + x < nkpts_2) {
// Check d1[p] with d2[i]
int score = 0;
#pragma unroll
for (int j = 0; j < 8; ++j) {
score += __popcll(d1i[j] ^ d2i[j]);
}
if (score < scoreBest[x]) {
scoreSecondBest[x] = scoreBest[x];
scoreBest[x] = score;
idxSecondBest[x] = idxBest[x];
idxBest[x] = i + x;
} else if (score < scoreSecondBest[x]) {
scoreSecondBest[x] = score;
idxSecondBest[x] = i + x;
}
}
}
// for( int i=16; i>=1; i/=2) {
// int tBest = __shfl_down(scoreBest,i);
// int tIdx = __shfl_down(idxBest,i);
// if(tBest < scoreBest) {
// scoreSecondBest = scoreBest;
// idxSecondBest = idxBest;
// scoreBest = tBest;
// idxBest = tIdx;
// }
// tBest = __shfl_down(scoreSecondBest,i);
// tIdx = __shfl_down(idxSecondBest,i);
// if(tBest < scoreSecondBest) {
// scoreSecondBest = tBest;
// idxSecondBest = tIdx;
// }
// }
__syncthreads();
for (int i = NTHREADS_MATCH / 2; i >= 1; i /= 2) {
if (x < i) {
if (scoreBest[x + i] < scoreBest[x]) {
scoreSecondBest[x] = scoreBest[x];
scoreBest[x] = scoreBest[x + i];
idxSecondBest[x] = idxBest[x];
idxBest[x] = idxBest[x + i];
} else if (scoreBest[x + i] < scoreSecondBest[x]) {
scoreSecondBest[x] = scoreBest[x + i];
idxSecondBest[x] = idxBest[x + i];
}
if (scoreSecondBest[x + i] < scoreSecondBest[x]) {
scoreSecondBest[x] = scoreSecondBest[x + i];
idxSecondBest[x] = idxSecondBest[x + i];
}
}
}
// if(i>16) __syncthreads();
// if(x<i) {
// if( scoreBest[x+i] < scoreSecondBest[x] ) {
// scoreSecondBest[x] = scoreBest[x+i];
// idxSecondBest[x] = idxBest[x+i];
// } else if (scoreSecondBest[x+i] < scoreSecondBest[x] ) {
// scoreSecondBest[x] = scoreSecondBest[x+i];
// idxSecondBest[x] = idxSecondBest[x+i];
// }
// }
// if(i>16) __syncthreads();
//}
/*for (int i = 1; i <= NTHREADS_MATCH; ++i) {
if (scoreBest[i] < scoreBest[0]) {
scoreSecondBest[0] = scoreBest[0];
scoreBest[0] = scoreBest[i];
idxSecondBest[0] = idxBest[0];
idxBest[0] = idxBest[i];
} else if( scoreBest[i] < scoreSecondBest[0] ) {
scoreSecondBest[0] = scoreBest[i];
idxSecondBest[0] = idxBest[i];
}
if(scoreSecondBest[i] < scoreSecondBest[0]) {
scoreSecondBest[0] = scoreSecondBest[i];
idxSecondBest[0] = idxSecondBest[i];
}
}*/
// if(x==0) {
// matches[2*p].queryIdx = p;
// matches[2*p].trainIdx = idxBest;
// matches[2*p].distance = scoreBest;
// matches[2*p+1].queryIdx = p;
// matches[2*p+1].trainIdx = idxSecondBest;
// matches[2*p+1].distance = scoreSecondBest;
// }
if (x == 0) {
matches[2 * p].queryIdx = p;
matches[2 * p].trainIdx = idxBest[x];
matches[2 * p].distance = scoreBest[x];
matches[2 * p + 1].queryIdx = p;
matches[2 * p + 1].trainIdx = idxSecondBest[x];
matches[2 * p + 1].distance = scoreSecondBest[x];
}
}
void MatchDescriptors(cv::Mat &desc_query, cv::Mat &desc_train,
std::vector<std::vector<cv::DMatch> > &dmatches,
size_t pitch,
unsigned char* descq_d, unsigned char* desct_d, cv::DMatch* dmatches_d, cv::DMatch* dmatches_h) {
dim3 block(desc_query.rows);
MatchDescriptors << <block, NTHREADS_MATCH>>>(descq_d, desct_d, pitch, desc_train.rows, dmatches_d);
cudaMemcpy(dmatches_h, dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch),
cudaMemcpyDeviceToHost);
for (int i = 0; i < desc_query.rows; ++i) {
std::vector<cv::DMatch> tdmatch;
//std::cout << dmatches_h[2*i].trainIdx << " - " << dmatches_h[2*i].queryIdx << std::endl;
tdmatch.push_back(dmatches_h[2 * i]);
tdmatch.push_back(dmatches_h[2 * i + 1]);
dmatches.push_back(tdmatch);
}
}
void MatchDescriptors(cv::Mat &desc_query, cv::Mat &desc_train,
std::vector<std::vector<cv::DMatch> > &dmatches) {
size_t pitch1, pitch2;
unsigned char *descq_d;
cudaMallocPitch(&descq_d, &pitch1, 64, desc_query.rows);
cudaMemset2D(descq_d, pitch1, 0, 64, desc_query.rows);
cudaMemcpy2D(descq_d, pitch1, desc_query.data, desc_query.cols,
desc_query.cols, desc_query.rows, cudaMemcpyHostToDevice);
unsigned char *desct_d;
cudaMallocPitch(&desct_d, &pitch2, 64, desc_train.rows);
cudaMemset2D(desct_d, pitch2, 0, 64, desc_train.rows);
cudaMemcpy2D(desct_d, pitch2, desc_train.data, desc_train.cols,
desc_train.cols, desc_train.rows, cudaMemcpyHostToDevice);
dim3 block(desc_query.rows);
cv::DMatch *dmatches_d;
cudaMalloc(&dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch));
MatchDescriptors << <block, NTHREADS_MATCH>>>(descq_d, desct_d, pitch1, desc_train.rows, dmatches_d);
cv::DMatch *dmatches_h = new cv::DMatch[2 * desc_query.rows];
cudaMemcpy(dmatches_h, dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch),
cudaMemcpyDeviceToHost);
for (int i = 0; i < desc_query.rows; ++i) {
std::vector<cv::DMatch> tdmatch;
//std::cout << dmatches_h[2*i].trainIdx << " - " << dmatches_h[2*i].queryIdx << std::endl;
tdmatch.push_back(dmatches_h[2 * i]);
tdmatch.push_back(dmatches_h[2 * i + 1]);
dmatches.push_back(tdmatch);
}
cudaFree(descq_d);
cudaFree(desct_d);
cudaFree(dmatches_d);
delete[] dmatches_h;
}
void InitCompareIndices() {
int comp_idx_1_h[61 * 8];
int comp_idx_2_h[61 * 8];
int cntr = 0;
for (int j = 0; j < 4; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 0; j < 3; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 0; j < 3; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
// 3x3
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
// 4x4
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
cudaMemcpyToSymbol(comp_idx_1, comp_idx_1_h, 8 * 61 * sizeof(int));
cudaMemcpyToSymbol(comp_idx_2, comp_idx_2_h, 8 * 61 * sizeof(int));
}
__global__ void FindOrientation(cv::KeyPoint *d_pts, CudaImage *d_imgs) {
__shared__ float resx[42], resy[42];
__shared__ float re8x[42], re8y[42];
int p = blockIdx.x;
int tx = threadIdx.x;
if (tx < 42) resx[tx] = resy[tx] = 0.0f;
__syncthreads();
int lev = d_pts[p].class_id;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int octave = d_pts[p].octave;
int step = (int)(0.5f * d_pts[p].size + 0.5f) >> octave;
int x = (int)(d_pts[p].pt.x + 0.5f) >> octave;
int y = (int)(d_pts[p].pt.y + 0.5f) >> octave;
int i = (tx & 15) - 6;
int j = (tx / 16) - 6;
int r2 = i * i + j * j;
if (r2 < 36) {
float gweight = exp(-r2 / (2.5f * 2.5f * 2.0f));
int pos = (y + step * j) * pitch + (x + step * i);
float dx = gweight * dxd[pos];
float dy = gweight * dyd[pos];
float angle = atan2(dy, dx);
int a = max(min((int)(angle * (21 / CV_PI)) + 21, 41), 0);
atomicAdd(resx + a, dx);
atomicAdd(resy + a, dy);
}
__syncthreads();
if (tx < 42) {
re8x[tx] = resx[tx];
re8y[tx] = resy[tx];
for (int k = tx + 1; k < tx + 7; k++) {
re8x[tx] += resx[k < 42 ? k : k - 42];
re8y[tx] += resy[k < 42 ? k : k - 42];
}
}
__syncthreads();
if (tx == 0) {
float maxr = 0.0f;
int maxk = 0;
for (int k = 0; k < 42; k++) {
float r = re8x[k] * re8x[k] + re8y[k] * re8y[k];
if (r > maxr) {
maxr = r;
maxk = k;
}
}
float angle = atan2(re8y[maxk], re8x[maxk]);
d_pts[p].angle = (angle < 0.0f ? angle + 2.0f * CV_PI : angle);
// printf("XXX %.2f %.2f %.2f\n", d_pts[p].pt.x, d_pts[p].pt.y,
// d_pts[p].angle/CV_PI*180.0f);
}
}
double FindOrientation(cv::KeyPoint *d_pts, std::vector<CudaImage> &h_imgs, CudaImage *d_imgs, int numPts) {
safeCall(cudaMemcpyAsync(d_imgs, (float *)&h_imgs[0],
sizeof(CudaImage) * h_imgs.size(),
cudaMemcpyHostToDevice));
// TimerGPU timer0(0);
cudaStreamSynchronize(0);
dim3 blocks(numPts);
dim3 threads(ORIENT_S);
FindOrientation << <blocks, threads>>> (d_pts, d_imgs);
CHK
// checkMsg("FindOrientation() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("FindOrientation time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
|
a5ad4d1d0fbd29c926d745aa92ca5dae53bbf5f1.hip | // !!! This is a file automatically generated by hipify!!!
// Fast Block Distributed CUDA Implementation of the Hungarian Algorithm
//
// Annex to the paper:
// Paulo A. C. Lopes, Satyendra Singh Yadav, Aleksandar Ilic, Sarat Kumar Patra ,
// "Fast Block Distributed CUDA Implementation of the Hungarian Algorithm",
// Journal Parallel Distributed Computing
//
// Hungarian algorithm:
// (This algorithm was modified to result in an efficient GPU implementation, see paper)
//
// Initialize the slack matrix with the cost matrix, and then work with the slack matrix.
//
// STEP 1: Subtract the row minimum from each row. Subtract the column minimum from each column.
//
// STEP 2: Find a zero of the slack matrix. If there are no starred zeros in its column or row star the zero.
// Repeat for each zero.
//
// STEP 3: Cover each column with a starred zero. If all the columns are
// covered then the matching is maximum.
//
// STEP 4: Find a non-covered zero and prime it. If there is no starred zero in the row containing this primed zero,
// Go to Step 5. Otherwise, cover this row and uncover the column containing the starred zero.
// Continue in this manner until there are no uncovered zeros left.
// Save the smallest uncovered value and Go to Step 6.
//
// STEP 5: Construct a series of alternating primed and starred zeros as follows:
// Let Z0 represent the uncovered primed zero found in Step 4.
// Let Z1 denote the starred zero in the column of Z0(if any).
// Let Z2 denote the primed zero in the row of Z1(there will always be one).
// Continue until the series terminates at a primed zero that has no starred zero in its column.
// Un-star each starred zero of the series, star each primed zero of the series,
// erase all primes and uncover every row in the matrix. Return to Step 3.
//
// STEP 6: Add the minimum uncovered value to every element of each covered row,
// and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered rows.
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <random>
#include <assert.h>
#include <chrono>
#include <fstream>
// Uncomment to use chars as the data type, otherwise use int
// #define CHAR_DATA_TYPE
// Uncomment to use a 4x4 predefined matrix for testing
// #define USE_TEST_MATRIX
// Comment to use managed variables instead of dynamic parallelism; usefull for debugging
// #define DYNAMIC
#define klog2(n) ((n<8)?2:((n<16)?3:((n<32)?4:((n<64)?5:((n<128)?6:((n<256)?7:((n<512)?8:((n<1024)?9:((n<2048)?10:((n<4096)?11:((n<8192)?12:((n<16384)?13:0))))))))))))
#define _n_ 4096
#ifndef DYNAMIC
#define MANAGED __managed__
#define dh_checkCuda checkCuda
#define dh_get_globaltime get_globaltime
#define dh_get_timer_period get_timer_period
#else
#define dh_checkCuda d_checkCuda
#define dh_get_globaltime d_get_globaltime
#define dh_get_timer_period d_get_timer_period
#define MANAGED
#endif
#define kmin(x,y) ((x<y)?x:y)
#define kmax(x,y) ((x>y)?x:y)
#ifndef USE_TEST_MATRIX
#ifdef _n_
// These values are meant to be changed by scripts
const int n = _n_; // size of the cost/pay matrix
//const int range = _range_; // defines the range of the random matrix.
const int range = n;
const int user_n = n;
const int n_tests = 100;
#else
// User inputs: These values should be changed by the user
const int user_n = 1000; // This is the size of the cost matrix as supplied by the user
const int n = 1<<(klog2(user_n)+1); // The size of the cost/pay matrix used in the algorithm that is increased to a power of two
const int range = n; // defines the range of the random matrix.
const int n_tests = 10; // defines the number of tests performed
#endif
// End of user inputs
const int log2_n = klog2(n); // log2(n)
const int n_threads = kmin(n,64); // Number of threads used in small kernels grid size (typically grid size equal to n)
// Used in steps 3ini, 3, 4ini, 4a, 4b, 5a and 5b (64)
const int n_threads_reduction = kmin(n, 256); // Number of threads used in the redution kernels in step 1 and 6 (256)
const int n_blocks_reduction = kmin(n, 256); // Number of blocks used in the redution kernels in step 1 and 6 (256)
const int n_threads_full = kmin(n, 512); // Number of threads used the largest grids sizes (typically grid size equal to n*n)
// Used in steps 2 and 6 (512)
const int seed = 45345; // Initialization for the random number generator
#else
const int n = 4;
const int log2_n = 2;
const int n_threads = 2;
const int n_threads_reduction = 2;
const int n_blocks_reduction = 2;
const int n_threads_full = 2;
#endif
const int n_blocks = n / n_threads; // Number of blocks used in small kernels grid size (typically grid size equal to n)
const int n_blocks_full = n * n / n_threads_full; // Number of blocks used the largest gris sizes (typically grid size equal to n*n)
const int row_mask = (1 << log2_n) - 1; // Used to extract the row from tha matrix position index (matrices are column wise)
const int nrows = n, ncols = n; // The matrix is square so the number of rows and columns is equal to n
const int max_threads_per_block = 1024; // The maximum number of threads per block
const int columns_per_block_step_4 = 512; // Number of columns per block in step 4
const int n_blocks_step_4 = kmax(n / columns_per_block_step_4, 1); // Number of blocks in step 4 and 2
const int data_block_size = columns_per_block_step_4 * n; // The size of a data block. Note that this can be bigger than the matrix size.
const int log2_data_block_size = log2_n + klog2(columns_per_block_step_4); // log2 of the size of a data block. Note that klog2 cannot handle very large sizes
// For the selection of the data type used
#ifndef CHAR_DATA_TYPE
//typedef int data;
typedef float data;
#define MAX_DATA INT_MAX
#define MIN_DATA INT_MIN
#else
typedef unsigned char data;
#define MAX_DATA 255
#define MIN_DATA 0
#endif
// Host Variables
// Some host variables start with h_ to distinguish them from the corresponding device variables
// Device variables have no prefix.
#ifndef USE_TEST_MATRIX
data h_cost[ncols][nrows];
#else
data h_cost[n][n] = { { 1, 2, 3, 4 }, { 2, 4, 6, 8 }, { 3, 6, 9, 12 }, { 4, 8, 12, 16 } };
#endif
int h_column_of_star_at_row[nrows];
int h_zeros_vector_size;
int h_n_matches;
bool h_found;
bool h_goto_5;
// Device Variables
__device__ data slack[nrows*ncols]; // The slack matrix
__device__ data min_in_rows[nrows]; // Minimum in rows
__device__ data min_in_cols[ncols]; // Minimum in columns
__device__ int zeros[nrows*ncols]; // A vector with the position of the zeros in the slack matrix
__device__ int zeros_size_b[n_blocks_step_4]; // The number of zeros in block i
__device__ int row_of_star_at_column[ncols]; // A vector that given the column j gives the row of the star at that column (or -1, no star)
__device__ int column_of_star_at_row[nrows]; // A vector that given the row i gives the column of the star at that row (or -1, no star)
__device__ int cover_row[nrows]; // A vector that given the row i indicates if it is covered (1- covered, 0- uncovered)
__device__ int cover_column[ncols]; // A vector that given the column j indicates if it is covered (1- covered, 0- uncovered)
__device__ int column_of_prime_at_row[nrows]; // A vector that given the row i gives the column of the prime at that row (or -1, no prime)
__device__ int row_of_green_at_column[ncols]; // A vector that given the row j gives the column of the green at that row (or -1, no green)
__device__ data max_in_mat_row[nrows]; // Used in step 1 to stores the maximum in rows
__device__ data min_in_mat_col[ncols]; // Used in step 1 to stores the minimums in columns
__device__ data d_min_in_mat_vect[n_blocks_reduction]; // Used in step 6 to stores the intermediate results from the first reduction kernel
__device__ data d_min_in_mat; // Used in step 6 to store the minimum
MANAGED __device__ int zeros_size; // The number fo zeros
MANAGED __device__ int n_matches; // Used in step 3 to count the number of matches found
MANAGED __device__ bool goto_5; // After step 4, goto step 5?
MANAGED __device__ bool repeat_kernel; // Needs to repeat the step 2 and step 4 kernel?
#if defined(DEBUG) || defined(_DEBUG)
MANAGED __device__ int n_covered_rows; // Used in debug mode to check for the number of covered rows
MANAGED __device__ int n_covered_columns; // Used in debug mode to check for the number of covered columns
#endif
__shared__ extern data sdata[]; // For access to shared memory
// -------------------------------------------------------------------------------------
// Device code
// -------------------------------------------------------------------------------------
#if defined(DEBUG) || defined(_DEBUG)
__global__ void convergence_check() {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (cover_column[i]) atomicAdd((int*)&n_covered_columns, 1);
if (cover_row[i]) atomicAdd((int*)&n_covered_rows, 1);
}
#endif
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline __device__ hipError_t d_checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
printf("CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
};
__global__ void init()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
// initializations
//for step 2
if (i < nrows){
cover_row[i] = 0;
column_of_star_at_row[i] = -1;
}
if (i < ncols){
cover_column[i] = 0;
row_of_star_at_column[i] = -1;
}
}
// STEP 1.
// a) Subtracting the row by the minimum in each row
const int n_rows_per_block = n / n_blocks_reduction;
__device__ void min_in_rows_warp_reduce(volatile data* sdata, int tid) {
if (n_threads_reduction >= 64 && n_rows_per_block < 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (n_threads_reduction >= 32 && n_rows_per_block < 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (n_threads_reduction >= 16 && n_rows_per_block < 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (n_threads_reduction >= 8 && n_rows_per_block < 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (n_threads_reduction >= 4 && n_rows_per_block < 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (n_threads_reduction >= 2 && n_rows_per_block < 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
__global__ void calc_min_in_rows()
{
__shared__ data sdata[n_threads_reduction]; // One temporary result for each thread.
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
// One gets the line and column from the blockID and threadID.
unsigned int l = bid * n_rows_per_block + tid % n_rows_per_block;
unsigned int c = tid / n_rows_per_block;
unsigned int i = c * nrows + l;
const unsigned int gridSize = n_threads_reduction * n_blocks_reduction;
data thread_min = MAX_DATA;
while (i < n * n) {
thread_min = min(thread_min, slack[i]);
i += gridSize; // go to the next piece of the matrix...
// gridSize = 2^k * n, so that each thread always processes the same line or column
}
sdata[tid] = thread_min;
__syncthreads();
if (n_threads_reduction >= 1024 && n_rows_per_block < 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (n_threads_reduction >= 512 && n_rows_per_block < 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (n_threads_reduction >= 256 && n_rows_per_block < 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (n_threads_reduction >= 128 && n_rows_per_block < 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_in_rows_warp_reduce(sdata, tid);
if (tid < n_rows_per_block) min_in_rows[bid*n_rows_per_block + tid] = sdata[tid];
}
// a) Subtracting the column by the minimum in each column
const int n_cols_per_block = n / n_blocks_reduction;
__device__ void min_in_cols_warp_reduce(volatile data* sdata, int tid) {
if (n_threads_reduction >= 64 && n_cols_per_block < 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (n_threads_reduction >= 32 && n_cols_per_block < 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (n_threads_reduction >= 16 && n_cols_per_block < 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (n_threads_reduction >= 8 && n_cols_per_block < 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (n_threads_reduction >= 4 && n_cols_per_block < 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (n_threads_reduction >= 2 && n_cols_per_block < 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
__global__ void calc_min_in_cols()
{
__shared__ data sdata[n_threads_reduction]; // One temporary result for each thread
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
// One gets the line and column from the blockID and threadID.
unsigned int c = bid * n_cols_per_block + tid % n_cols_per_block;
unsigned int l = tid / n_cols_per_block;
const unsigned int gridSize = n_threads_reduction * n_blocks_reduction;
data thread_min = MAX_DATA;
while (l < n) {
unsigned int i = c * nrows + l;
thread_min = min(thread_min, slack[i]);
l += gridSize / n; // go to the next piece of the matrix...
// gridSize = 2^k * n, so that each thread always processes the same line or column
}
sdata[tid] = thread_min;
__syncthreads();
if (n_threads_reduction >= 1024 && n_cols_per_block < 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (n_threads_reduction >= 512 && n_cols_per_block < 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (n_threads_reduction >= 256 && n_cols_per_block < 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (n_threads_reduction >= 128 && n_cols_per_block < 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_in_cols_warp_reduce(sdata, tid);
if (tid < n_cols_per_block) min_in_cols[bid*n_cols_per_block + tid] = sdata[tid];
}
__global__ void step_1_row_sub()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int l = i & row_mask;
slack[i] = slack[i] - min_in_rows[l]; // subtract the minimum in row from that row
}
__global__ void step_1_col_sub()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int c = i >> log2_n;
slack[i] = slack[i] - min_in_cols[c]; // subtract the minimum in row from that row
if (i == 0) zeros_size = 0;
if (i < n_blocks_step_4) zeros_size_b[i] = 0;
}
// Compress matrix
__global__ void compress_matrix(){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (slack[i] == 0) {
atomicAdd(&zeros_size, 1);
int b = i >> log2_data_block_size;
int i0 = i & ~(data_block_size - 1); // == b << log2_data_block_size
int j = atomicAdd(zeros_size_b + b, 1);
zeros[i0 + j] = i;
}
}
// STEP 2
// Find a zero of slack. If there are no starred zeros in its
// column or row star the zero. Repeat for each zero.
// The zeros are split through blocks of data so we run step 2 with several thread blocks and rerun the kernel if repeat was set to true.
__global__ void step_2()
{
int i = threadIdx.x;
int b = blockIdx.x;
__shared__ bool repeat;
__shared__ bool s_repeat_kernel;
if (i == 0) s_repeat_kernel = false;
do {
__syncthreads();
if (i == 0) repeat = false;
__syncthreads();
for (int j = i; j < zeros_size_b[b]; j += blockDim.x)
{
int z = zeros[(b << log2_data_block_size) + j];
int l = z & row_mask;
int c = z >> log2_n;
if (cover_row[l] == 0 && cover_column[c] == 0) {
// thread trys to get the line
if (!atomicExch((int *)&(cover_row[l]), 1)){
// only one thread gets the line
if (!atomicExch((int *)&(cover_column[c]), 1)){
// only one thread gets the column
row_of_star_at_column[c] = l;
column_of_star_at_row[l] = c;
}
else {
cover_row[l] = 0;
repeat = true;
s_repeat_kernel = true;
}
}
}
}
__syncthreads();
} while (repeat);
if (s_repeat_kernel) repeat_kernel = true;
}
// STEP 3
// uncover all the rows and columns before going to step 3
__global__ void step_3ini()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
cover_row[i] = 0;
cover_column[i] = 0;
if (i == 0) n_matches = 0;
}
// Cover each column with a starred zero. If all the columns are
// covered then the matching is maximum
__global__ void step_3()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (row_of_star_at_column[i]>=0)
{
cover_column[i] = 1;
atomicAdd((int*)&n_matches, 1);
}
}
// STEP 4
// Find a noncovered zero and prime it. If there is no starred
// zero in the row containing this primed zero, go to Step 5.
// Otherwise, cover this row and uncover the column containing
// the starred zero. Continue in this manner until there are no
// uncovered zeros left. Save the smallest uncovered value and
// Go to Step 6.
__global__ void step_4_init()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
column_of_prime_at_row[i] = -1;
row_of_green_at_column[i] = -1;
}
__global__ void step_4() {
__shared__ bool s_found;
__shared__ bool s_goto_5;
__shared__ bool s_repeat_kernel;
volatile int *v_cover_row = cover_row;
volatile int *v_cover_column = cover_column;
int i = threadIdx.x;
int b = blockIdx.x;
// int limit; my__syncthreads_init(limit);
if (i == 0) {
s_repeat_kernel = false;
s_goto_5 = false;
}
do {
__syncthreads();
if (i == 0) s_found = false;
__syncthreads();
for (int j = i; j < zeros_size_b[b]; j += blockDim.x)
{
int z = zeros[(b << log2_data_block_size) + j];
int l = z & row_mask;
int c = z >> log2_n;
int c1 = column_of_star_at_row[l];
for (int n = 0; n < 10; n++) {
if (!v_cover_column[c] && !v_cover_row[l]) {
s_found = true; s_repeat_kernel = true;
column_of_prime_at_row[l] = c;
if (c1 >= 0) {
v_cover_row[l] = 1;
__threadfence();
v_cover_column[c1] = 0;
}
else {
s_goto_5 = true;
}
}
} // for(int n
} // for(int j
__syncthreads();
} while (s_found && !s_goto_5);
if (i == 0 && s_repeat_kernel) repeat_kernel = true;
if (i == 0 && s_goto_5) goto_5 = true;
}
/* STEP 5:
Construct a series of alternating primed and starred zeros as
follows:
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0(if any).
Let Z2 denote the primed zero in the row of Z1(there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred
zero of the series, star each primed zero of the series, erase
all primes and uncover every line in the matrix. Return to Step 3.*/
// Eliminates joining paths
__global__ void step_5a()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int r_Z0, c_Z0;
c_Z0 = column_of_prime_at_row[i];
if (c_Z0 >= 0 && column_of_star_at_row[i] < 0) {
row_of_green_at_column[c_Z0] = i;
while ((r_Z0 = row_of_star_at_column[c_Z0]) >= 0) {
c_Z0 = column_of_prime_at_row[r_Z0];
row_of_green_at_column[c_Z0] = r_Z0;
}
}
}
// Applies the alternating paths
__global__ void step_5b()
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
int r_Z0, c_Z0, c_Z2;
r_Z0 = row_of_green_at_column[j];
if (r_Z0 >= 0 && row_of_star_at_column[j] < 0) {
c_Z2 = column_of_star_at_row[r_Z0];
column_of_star_at_row[r_Z0] = j;
row_of_star_at_column[j] = r_Z0;
while (c_Z2 >= 0) {
r_Z0 = row_of_green_at_column[c_Z2]; // row of Z2
c_Z0 = c_Z2; // col of Z2
c_Z2 = column_of_star_at_row[r_Z0]; // col of Z4
// star Z2
column_of_star_at_row[r_Z0] = c_Z0;
row_of_star_at_column[c_Z0] = r_Z0;
}
}
}
// STEP 6
// Add the minimum uncovered value to every element of each covered
// row, and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered lines.
template <unsigned int blockSize>
__device__ void min_warp_reduce(volatile data* sdata, int tid) {
if (blockSize >= 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (blockSize >= 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (blockSize >= 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (blockSize >= 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (blockSize >= 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (blockSize >= 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
template <unsigned int blockSize> // blockSize is the size of a block of threads
__device__ void min_reduce1(volatile data *g_idata, volatile data *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
unsigned int gridSize = blockSize * 2 * gridDim.x;
sdata[tid] = MAX_DATA;
while (i < n) {
int i1 = i;
int i2 = i + blockSize;
int l1 = i1 & row_mask;
int c1 = i1 >> log2_n;
data g1;
if (cover_row[l1] == 1 || cover_column[c1] == 1) g1 = MAX_DATA;
else g1 = g_idata[i1];
int l2 = i2 & row_mask;
int c2 = i2 >> log2_n;
data g2;
if (cover_row[l2] == 1 || cover_column[c2] == 1) g2 = MAX_DATA;
else g2 = g_idata[i2];
sdata[tid] = min(sdata[tid], min(g1, g2));
i += gridSize;
}
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_warp_reduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <unsigned int blockSize>
__device__ void min_reduce2(volatile data *g_idata, volatile data *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
sdata[tid] = min(g_idata[i], g_idata[i + blockSize]);
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_warp_reduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void step_6_add_sub()
{
// STEP 6:
// /*STEP 6: Add the minimum uncovered value to every element of each covered
// row, and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered lines. */
int i = blockDim.x * blockIdx.x + threadIdx.x;
int l = i & row_mask;
int c = i >> log2_n;
if (cover_row[l] == 1 && cover_column[c] == 1)
slack[i] += d_min_in_mat;
if (cover_row[l] == 0 && cover_column[c] == 0)
slack[i] -= d_min_in_mat;
if (i == 0) zeros_size = 0;
if (i < n_blocks_step_4) zeros_size_b[i] = 0;
}
__global__ void min_reduce_kernel1() {
min_reduce1<n_threads_reduction>(slack, d_min_in_mat_vect, nrows*ncols);
}
__global__ void min_reduce_kernel2() {
min_reduce2<n_threads_reduction / 2>(d_min_in_mat_vect, &d_min_in_mat, n_blocks_reduction);
}
__device__ inline long long int d_get_globaltime(void) {
long long int ret;
asm volatile ("mov.u64 %0, %%globaltimer;" : "=l"(ret));
return ret;
}
// Returns the period in miliseconds
__device__ inline double d_get_timer_period(void) {
return 1.0e-6;
}
// -------------------------------------------------------------------------------------
// Host code
// -------------------------------------------------------------------------------------
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
printf("CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
};
typedef std::chrono::high_resolution_clock::rep hr_clock_rep;
inline hr_clock_rep get_globaltime(void) {
using namespace std::chrono;
return high_resolution_clock::now().time_since_epoch().count();
}
// Returns the period in miliseconds
inline double get_timer_period(void) {
using namespace std::chrono;
return 1000.0 * high_resolution_clock::period::num / high_resolution_clock::period::den;
}
#define declare_kernel(k) \
hr_clock_rep k##_time = 0; \
int k##_runs = 0
#define call_kernel(k, n_blocks, n_threads) call_kernel_s(k, n_blocks, n_threads, 0ll)
#define call_kernel_s(k, n_blocks, n_threads, shared) \
{ \
timer_start = dh_get_globaltime(); \
k << < n_blocks, n_threads, shared>> > (); \
dh_checkCuda(hipDeviceSynchronize()); \
timer_stop = dh_get_globaltime(); \
k##_time += timer_stop - timer_start; \
k##_runs++; \
}
// printf("Finished kernel " #k "(%d,%d,%lld)\n", n_blocks, n_threads, shared); \
// fflush(0); \
#define kernel_stats(k) \
printf(#k "\t %g \t %d\n", dh_get_timer_period() * k##_time, k##_runs)
// Hungarian_Algorithm
#ifndef DYNAMIC
void Hungarian_Algorithm()
#else
__global__ void Hungarian_Algorithm()
#endif
{
hr_clock_rep timer_start, timer_stop;
hr_clock_rep total_time_start, total_time_stop;
#if defined(DEBUG) || defined(_DEBUG)
int last_n_covered_rows = 0, last_n_matches = 0;
#endif
declare_kernel(init);
declare_kernel(calc_min_in_rows); declare_kernel(step_1_row_sub);
declare_kernel(calc_min_in_cols); declare_kernel(step_1_col_sub);
declare_kernel(compress_matrix);
declare_kernel(step_2);
declare_kernel(step_3ini); declare_kernel(step_3);
declare_kernel(step_4_init); declare_kernel(step_4);
declare_kernel(min_reduce_kernel1); declare_kernel(min_reduce_kernel2); declare_kernel(step_6_add_sub);
declare_kernel(step_5a); declare_kernel(step_5b); declare_kernel(step_5c);
total_time_start = dh_get_globaltime();
// Initialization
call_kernel(init, n_blocks, n_threads);
// Step 1 kernels
call_kernel(calc_min_in_rows, n_blocks_reduction, n_threads_reduction);
call_kernel(step_1_row_sub, n_blocks_full, n_threads_full);
call_kernel(calc_min_in_cols, n_blocks_reduction, n_threads_reduction);
call_kernel(step_1_col_sub, n_blocks_full, n_threads_full);
// compress_matrix
call_kernel(compress_matrix, n_blocks_full, n_threads_full);
// Step 2 kernels
do {
repeat_kernel = false; dh_checkCuda(hipDeviceSynchronize());
call_kernel(step_2, n_blocks_step_4, (n_blocks_step_4 > 1 || zeros_size > max_threads_per_block) ? max_threads_per_block : zeros_size);
// If we have more than one block it means that we have 512 lines per block so 1024 threads should be adequate.
} while (repeat_kernel);
while (1) { // repeat steps 3 to 6
// Step 3 kernels
call_kernel(step_3ini, n_blocks, n_threads);
call_kernel(step_3, n_blocks, n_threads);
if (n_matches >= ncols) break; // It's done
//step 4_kernels
call_kernel(step_4_init, n_blocks, n_threads);
while (1) // repeat step 4 and 6
{
#if defined(DEBUG) || defined(_DEBUG)
// At each iteraton either the number of matched or covered rows has to increase.
// If we went to step 5 the number of matches increases.
// If we went to step 6 the number of covered rows increases.
n_covered_rows = 0; n_covered_columns = 0;
dh_checkCuda(hipDeviceSynchronize());
convergence_check << < n_blocks, n_threads >> > ();
dh_checkCuda(hipDeviceSynchronize());
assert(n_matches>last_n_matches || n_covered_rows>last_n_covered_rows);
assert(n_matches == n_covered_columns + n_covered_rows);
last_n_matches = n_matches;
last_n_covered_rows = n_covered_rows;
#endif
do { // step 4 loop
goto_5 = false; repeat_kernel = false;
dh_checkCuda(hipDeviceSynchronize());
call_kernel(step_4, n_blocks_step_4, (n_blocks_step_4 > 1 || zeros_size > max_threads_per_block) ? max_threads_per_block : zeros_size);
// If we have more than one block it means that we have 512 lines per block so 1024 threads should be adequate.
} while (repeat_kernel && !goto_5);
if (goto_5) break;
//step 6_kernel
call_kernel_s(min_reduce_kernel1, n_blocks_reduction, n_threads_reduction, n_threads_reduction*sizeof(int));
call_kernel_s(min_reduce_kernel2, 1, n_blocks_reduction / 2, (n_blocks_reduction / 2) * sizeof(int));
call_kernel(step_6_add_sub, n_blocks_full, n_threads_full);
//compress_matrix
call_kernel(compress_matrix, n_blocks_full, n_threads_full);
} // repeat step 4 and 6
call_kernel(step_5a, n_blocks, n_threads);
call_kernel(step_5b, n_blocks, n_threads);
} // repeat steps 3 to 6
total_time_stop = dh_get_globaltime();
printf("kernel \t time (ms) \t runs\n");
kernel_stats(init);
kernel_stats(calc_min_in_rows); kernel_stats(step_1_row_sub);
kernel_stats(calc_min_in_cols); kernel_stats(step_1_col_sub);
kernel_stats(compress_matrix);
kernel_stats(step_2);
kernel_stats(step_3ini); kernel_stats(step_3);
kernel_stats(step_4_init); kernel_stats(step_4);
kernel_stats(min_reduce_kernel1); kernel_stats(min_reduce_kernel2); kernel_stats(step_6_add_sub);
kernel_stats(step_5a); kernel_stats(step_5b); kernel_stats(step_5c);
printf("Total time(ms) \t %g\n", dh_get_timer_period() * (total_time_stop - total_time_start));
}
// Used to make sure some constants are properly set
void check(bool val, const char *str){
if (!val) {
printf("Check failed: %s!\n", str);
getchar();
exit(-1);
}
}
int main(int argc, char* argv[])
{
//printf("Starting cuda \n");
// Constant checks:
//printf("log2_m %i" ,1 << log2_n);
check(n == (1 << log2_n), "Incorrect log2_n!");
check(n_threads*n_blocks == n, "n_threads*n_blocks != n\n");
// step 1
check(n_blocks_reduction <= n, "Step 1: Should have several lines per block!");
check(n % n_blocks_reduction == 0, "Step 1: Number of lines per block should be integer!");
check((n_blocks_reduction*n_threads_reduction) % n == 0, "Step 1: The grid size must be a multiple of the line size!");
check(n_threads_reduction*n_blocks_reduction <= n*n, "Step 1: The grid size is bigger than the matrix size!");
// step 6
check(n_threads_full*n_blocks_full <= n*n, "Step 6: The grid size is bigger than the matrix size!");
check(columns_per_block_step_4*n == (1 << log2_data_block_size), "Columns per block of step 4 is not a power of two!");
//printf("Running. See out.txt for output.\n");
// Open text file
//FILE *file = freopen("out.txt", "w", stdout);
//if (file == NULL)
//{
// perror("Error opening the output file!\n");
// getchar();
// exit(1);
//};
// Prints the current time
time_t current_time;
time(¤t_time);
printf("%s\n", ctime(¤t_time));
//fflush(file);
//#ifndef USE_TEST_MATRIX
std::string inpath(argv[1]);
std::ifstream infile(inpath);
data i;
printf("Populating cost .. %i, %i\n", nrows, ncols);
for (int r = 0; r < nrows; r++) {
for (int c = 0; c < ncols; c++) {
infile >> i;
h_cost[c][r] = (float)i;
//if(i < 1) {
// printf("%i, %i, %f\n", r, c, i);
//}
}
}
printf("Populated cost\n");
/*
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> distribution(0, range-1);
for (int test = 0; test < n_tests; test++) {
printf("\n\n\n\ntest %d\n", test);
fflush(file);
for (int c = 0; c < ncols; c++)
for (int r = 0; r < nrows; r++) {
if (c < user_n && r < user_n)
h_cost[c][r] = distribution(generator);
else {
if (c == r) h_cost[c][r] = 0;
else h_cost[c][r] = MAX_DATA;
}
}
#endif
*/
// Copy vectors from host memory to device memory
hipMemcpyToSymbol(slack, h_cost, sizeof(data)*nrows*ncols); // symbol refers to the device memory hence "To" means from Host to Device
// Invoke kernels
time_t start_time = clock();
hipDeviceSetLimit(hipLimitPrintfFifoSize, 1024 *1024 * 1024);
#ifndef DYNAMIC
Hungarian_Algorithm();
#else
Hungarian_Algorithm << <1, 1 >> > ();
#endif
checkCuda(hipDeviceSynchronize());
time_t stop_time = clock();
//fflush(file);
// Copy assignments from Device to Host and calculate the total Cost
hipMemcpyFromSymbol(h_column_of_star_at_row, column_of_star_at_row, nrows * sizeof(int));
long long int total_cost = 0;
std::fstream solution;
solution.open("gpu-solution.txt", std::fstream::out);
for (int r = 0; r < nrows; r++) {
int c = h_column_of_star_at_row[r];
if (c >= 0) total_cost += h_cost[c][r];
solution << r << " " << c << std::endl;
}
solution.close();
printf("Total cost is \t %lld \n", total_cost);
printf("Low resolution time is \t %f \n", 1000.0*(double)(stop_time - start_time) / CLOCKS_PER_SEC);
//#ifndef USE_TEST_MATRIX
//} // for (int test
//#endif
//fclose(file);
}
| a5ad4d1d0fbd29c926d745aa92ca5dae53bbf5f1.cu | // Fast Block Distributed CUDA Implementation of the Hungarian Algorithm
//
// Annex to the paper:
// Paulo A. C. Lopes, Satyendra Singh Yadav, Aleksandar Ilic, Sarat Kumar Patra ,
// "Fast Block Distributed CUDA Implementation of the Hungarian Algorithm",
// Journal Parallel Distributed Computing
//
// Hungarian algorithm:
// (This algorithm was modified to result in an efficient GPU implementation, see paper)
//
// Initialize the slack matrix with the cost matrix, and then work with the slack matrix.
//
// STEP 1: Subtract the row minimum from each row. Subtract the column minimum from each column.
//
// STEP 2: Find a zero of the slack matrix. If there are no starred zeros in its column or row star the zero.
// Repeat for each zero.
//
// STEP 3: Cover each column with a starred zero. If all the columns are
// covered then the matching is maximum.
//
// STEP 4: Find a non-covered zero and prime it. If there is no starred zero in the row containing this primed zero,
// Go to Step 5. Otherwise, cover this row and uncover the column containing the starred zero.
// Continue in this manner until there are no uncovered zeros left.
// Save the smallest uncovered value and Go to Step 6.
//
// STEP 5: Construct a series of alternating primed and starred zeros as follows:
// Let Z0 represent the uncovered primed zero found in Step 4.
// Let Z1 denote the starred zero in the column of Z0(if any).
// Let Z2 denote the primed zero in the row of Z1(there will always be one).
// Continue until the series terminates at a primed zero that has no starred zero in its column.
// Un-star each starred zero of the series, star each primed zero of the series,
// erase all primes and uncover every row in the matrix. Return to Step 3.
//
// STEP 6: Add the minimum uncovered value to every element of each covered row,
// and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered rows.
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <random>
#include <assert.h>
#include <chrono>
#include <fstream>
// Uncomment to use chars as the data type, otherwise use int
// #define CHAR_DATA_TYPE
// Uncomment to use a 4x4 predefined matrix for testing
// #define USE_TEST_MATRIX
// Comment to use managed variables instead of dynamic parallelism; usefull for debugging
// #define DYNAMIC
#define klog2(n) ((n<8)?2:((n<16)?3:((n<32)?4:((n<64)?5:((n<128)?6:((n<256)?7:((n<512)?8:((n<1024)?9:((n<2048)?10:((n<4096)?11:((n<8192)?12:((n<16384)?13:0))))))))))))
#define _n_ 4096
#ifndef DYNAMIC
#define MANAGED __managed__
#define dh_checkCuda checkCuda
#define dh_get_globaltime get_globaltime
#define dh_get_timer_period get_timer_period
#else
#define dh_checkCuda d_checkCuda
#define dh_get_globaltime d_get_globaltime
#define dh_get_timer_period d_get_timer_period
#define MANAGED
#endif
#define kmin(x,y) ((x<y)?x:y)
#define kmax(x,y) ((x>y)?x:y)
#ifndef USE_TEST_MATRIX
#ifdef _n_
// These values are meant to be changed by scripts
const int n = _n_; // size of the cost/pay matrix
//const int range = _range_; // defines the range of the random matrix.
const int range = n;
const int user_n = n;
const int n_tests = 100;
#else
// User inputs: These values should be changed by the user
const int user_n = 1000; // This is the size of the cost matrix as supplied by the user
const int n = 1<<(klog2(user_n)+1); // The size of the cost/pay matrix used in the algorithm that is increased to a power of two
const int range = n; // defines the range of the random matrix.
const int n_tests = 10; // defines the number of tests performed
#endif
// End of user inputs
const int log2_n = klog2(n); // log2(n)
const int n_threads = kmin(n,64); // Number of threads used in small kernels grid size (typically grid size equal to n)
// Used in steps 3ini, 3, 4ini, 4a, 4b, 5a and 5b (64)
const int n_threads_reduction = kmin(n, 256); // Number of threads used in the redution kernels in step 1 and 6 (256)
const int n_blocks_reduction = kmin(n, 256); // Number of blocks used in the redution kernels in step 1 and 6 (256)
const int n_threads_full = kmin(n, 512); // Number of threads used the largest grids sizes (typically grid size equal to n*n)
// Used in steps 2 and 6 (512)
const int seed = 45345; // Initialization for the random number generator
#else
const int n = 4;
const int log2_n = 2;
const int n_threads = 2;
const int n_threads_reduction = 2;
const int n_blocks_reduction = 2;
const int n_threads_full = 2;
#endif
const int n_blocks = n / n_threads; // Number of blocks used in small kernels grid size (typically grid size equal to n)
const int n_blocks_full = n * n / n_threads_full; // Number of blocks used the largest gris sizes (typically grid size equal to n*n)
const int row_mask = (1 << log2_n) - 1; // Used to extract the row from tha matrix position index (matrices are column wise)
const int nrows = n, ncols = n; // The matrix is square so the number of rows and columns is equal to n
const int max_threads_per_block = 1024; // The maximum number of threads per block
const int columns_per_block_step_4 = 512; // Number of columns per block in step 4
const int n_blocks_step_4 = kmax(n / columns_per_block_step_4, 1); // Number of blocks in step 4 and 2
const int data_block_size = columns_per_block_step_4 * n; // The size of a data block. Note that this can be bigger than the matrix size.
const int log2_data_block_size = log2_n + klog2(columns_per_block_step_4); // log2 of the size of a data block. Note that klog2 cannot handle very large sizes
// For the selection of the data type used
#ifndef CHAR_DATA_TYPE
//typedef int data;
typedef float data;
#define MAX_DATA INT_MAX
#define MIN_DATA INT_MIN
#else
typedef unsigned char data;
#define MAX_DATA 255
#define MIN_DATA 0
#endif
// Host Variables
// Some host variables start with h_ to distinguish them from the corresponding device variables
// Device variables have no prefix.
#ifndef USE_TEST_MATRIX
data h_cost[ncols][nrows];
#else
data h_cost[n][n] = { { 1, 2, 3, 4 }, { 2, 4, 6, 8 }, { 3, 6, 9, 12 }, { 4, 8, 12, 16 } };
#endif
int h_column_of_star_at_row[nrows];
int h_zeros_vector_size;
int h_n_matches;
bool h_found;
bool h_goto_5;
// Device Variables
__device__ data slack[nrows*ncols]; // The slack matrix
__device__ data min_in_rows[nrows]; // Minimum in rows
__device__ data min_in_cols[ncols]; // Minimum in columns
__device__ int zeros[nrows*ncols]; // A vector with the position of the zeros in the slack matrix
__device__ int zeros_size_b[n_blocks_step_4]; // The number of zeros in block i
__device__ int row_of_star_at_column[ncols]; // A vector that given the column j gives the row of the star at that column (or -1, no star)
__device__ int column_of_star_at_row[nrows]; // A vector that given the row i gives the column of the star at that row (or -1, no star)
__device__ int cover_row[nrows]; // A vector that given the row i indicates if it is covered (1- covered, 0- uncovered)
__device__ int cover_column[ncols]; // A vector that given the column j indicates if it is covered (1- covered, 0- uncovered)
__device__ int column_of_prime_at_row[nrows]; // A vector that given the row i gives the column of the prime at that row (or -1, no prime)
__device__ int row_of_green_at_column[ncols]; // A vector that given the row j gives the column of the green at that row (or -1, no green)
__device__ data max_in_mat_row[nrows]; // Used in step 1 to stores the maximum in rows
__device__ data min_in_mat_col[ncols]; // Used in step 1 to stores the minimums in columns
__device__ data d_min_in_mat_vect[n_blocks_reduction]; // Used in step 6 to stores the intermediate results from the first reduction kernel
__device__ data d_min_in_mat; // Used in step 6 to store the minimum
MANAGED __device__ int zeros_size; // The number fo zeros
MANAGED __device__ int n_matches; // Used in step 3 to count the number of matches found
MANAGED __device__ bool goto_5; // After step 4, goto step 5?
MANAGED __device__ bool repeat_kernel; // Needs to repeat the step 2 and step 4 kernel?
#if defined(DEBUG) || defined(_DEBUG)
MANAGED __device__ int n_covered_rows; // Used in debug mode to check for the number of covered rows
MANAGED __device__ int n_covered_columns; // Used in debug mode to check for the number of covered columns
#endif
__shared__ extern data sdata[]; // For access to shared memory
// -------------------------------------------------------------------------------------
// Device code
// -------------------------------------------------------------------------------------
#if defined(DEBUG) || defined(_DEBUG)
__global__ void convergence_check() {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (cover_column[i]) atomicAdd((int*)&n_covered_columns, 1);
if (cover_row[i]) atomicAdd((int*)&n_covered_rows, 1);
}
#endif
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline __device__ cudaError_t d_checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
printf("CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
};
__global__ void init()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
// initializations
//for step 2
if (i < nrows){
cover_row[i] = 0;
column_of_star_at_row[i] = -1;
}
if (i < ncols){
cover_column[i] = 0;
row_of_star_at_column[i] = -1;
}
}
// STEP 1.
// a) Subtracting the row by the minimum in each row
const int n_rows_per_block = n / n_blocks_reduction;
__device__ void min_in_rows_warp_reduce(volatile data* sdata, int tid) {
if (n_threads_reduction >= 64 && n_rows_per_block < 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (n_threads_reduction >= 32 && n_rows_per_block < 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (n_threads_reduction >= 16 && n_rows_per_block < 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (n_threads_reduction >= 8 && n_rows_per_block < 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (n_threads_reduction >= 4 && n_rows_per_block < 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (n_threads_reduction >= 2 && n_rows_per_block < 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
__global__ void calc_min_in_rows()
{
__shared__ data sdata[n_threads_reduction]; // One temporary result for each thread.
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
// One gets the line and column from the blockID and threadID.
unsigned int l = bid * n_rows_per_block + tid % n_rows_per_block;
unsigned int c = tid / n_rows_per_block;
unsigned int i = c * nrows + l;
const unsigned int gridSize = n_threads_reduction * n_blocks_reduction;
data thread_min = MAX_DATA;
while (i < n * n) {
thread_min = min(thread_min, slack[i]);
i += gridSize; // go to the next piece of the matrix...
// gridSize = 2^k * n, so that each thread always processes the same line or column
}
sdata[tid] = thread_min;
__syncthreads();
if (n_threads_reduction >= 1024 && n_rows_per_block < 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (n_threads_reduction >= 512 && n_rows_per_block < 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (n_threads_reduction >= 256 && n_rows_per_block < 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (n_threads_reduction >= 128 && n_rows_per_block < 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_in_rows_warp_reduce(sdata, tid);
if (tid < n_rows_per_block) min_in_rows[bid*n_rows_per_block + tid] = sdata[tid];
}
// a) Subtracting the column by the minimum in each column
const int n_cols_per_block = n / n_blocks_reduction;
__device__ void min_in_cols_warp_reduce(volatile data* sdata, int tid) {
if (n_threads_reduction >= 64 && n_cols_per_block < 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (n_threads_reduction >= 32 && n_cols_per_block < 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (n_threads_reduction >= 16 && n_cols_per_block < 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (n_threads_reduction >= 8 && n_cols_per_block < 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (n_threads_reduction >= 4 && n_cols_per_block < 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (n_threads_reduction >= 2 && n_cols_per_block < 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
__global__ void calc_min_in_cols()
{
__shared__ data sdata[n_threads_reduction]; // One temporary result for each thread
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
// One gets the line and column from the blockID and threadID.
unsigned int c = bid * n_cols_per_block + tid % n_cols_per_block;
unsigned int l = tid / n_cols_per_block;
const unsigned int gridSize = n_threads_reduction * n_blocks_reduction;
data thread_min = MAX_DATA;
while (l < n) {
unsigned int i = c * nrows + l;
thread_min = min(thread_min, slack[i]);
l += gridSize / n; // go to the next piece of the matrix...
// gridSize = 2^k * n, so that each thread always processes the same line or column
}
sdata[tid] = thread_min;
__syncthreads();
if (n_threads_reduction >= 1024 && n_cols_per_block < 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (n_threads_reduction >= 512 && n_cols_per_block < 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (n_threads_reduction >= 256 && n_cols_per_block < 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (n_threads_reduction >= 128 && n_cols_per_block < 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_in_cols_warp_reduce(sdata, tid);
if (tid < n_cols_per_block) min_in_cols[bid*n_cols_per_block + tid] = sdata[tid];
}
__global__ void step_1_row_sub()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int l = i & row_mask;
slack[i] = slack[i] - min_in_rows[l]; // subtract the minimum in row from that row
}
__global__ void step_1_col_sub()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int c = i >> log2_n;
slack[i] = slack[i] - min_in_cols[c]; // subtract the minimum in row from that row
if (i == 0) zeros_size = 0;
if (i < n_blocks_step_4) zeros_size_b[i] = 0;
}
// Compress matrix
__global__ void compress_matrix(){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (slack[i] == 0) {
atomicAdd(&zeros_size, 1);
int b = i >> log2_data_block_size;
int i0 = i & ~(data_block_size - 1); // == b << log2_data_block_size
int j = atomicAdd(zeros_size_b + b, 1);
zeros[i0 + j] = i;
}
}
// STEP 2
// Find a zero of slack. If there are no starred zeros in its
// column or row star the zero. Repeat for each zero.
// The zeros are split through blocks of data so we run step 2 with several thread blocks and rerun the kernel if repeat was set to true.
__global__ void step_2()
{
int i = threadIdx.x;
int b = blockIdx.x;
__shared__ bool repeat;
__shared__ bool s_repeat_kernel;
if (i == 0) s_repeat_kernel = false;
do {
__syncthreads();
if (i == 0) repeat = false;
__syncthreads();
for (int j = i; j < zeros_size_b[b]; j += blockDim.x)
{
int z = zeros[(b << log2_data_block_size) + j];
int l = z & row_mask;
int c = z >> log2_n;
if (cover_row[l] == 0 && cover_column[c] == 0) {
// thread trys to get the line
if (!atomicExch((int *)&(cover_row[l]), 1)){
// only one thread gets the line
if (!atomicExch((int *)&(cover_column[c]), 1)){
// only one thread gets the column
row_of_star_at_column[c] = l;
column_of_star_at_row[l] = c;
}
else {
cover_row[l] = 0;
repeat = true;
s_repeat_kernel = true;
}
}
}
}
__syncthreads();
} while (repeat);
if (s_repeat_kernel) repeat_kernel = true;
}
// STEP 3
// uncover all the rows and columns before going to step 3
__global__ void step_3ini()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
cover_row[i] = 0;
cover_column[i] = 0;
if (i == 0) n_matches = 0;
}
// Cover each column with a starred zero. If all the columns are
// covered then the matching is maximum
__global__ void step_3()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (row_of_star_at_column[i]>=0)
{
cover_column[i] = 1;
atomicAdd((int*)&n_matches, 1);
}
}
// STEP 4
// Find a noncovered zero and prime it. If there is no starred
// zero in the row containing this primed zero, go to Step 5.
// Otherwise, cover this row and uncover the column containing
// the starred zero. Continue in this manner until there are no
// uncovered zeros left. Save the smallest uncovered value and
// Go to Step 6.
__global__ void step_4_init()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
column_of_prime_at_row[i] = -1;
row_of_green_at_column[i] = -1;
}
__global__ void step_4() {
__shared__ bool s_found;
__shared__ bool s_goto_5;
__shared__ bool s_repeat_kernel;
volatile int *v_cover_row = cover_row;
volatile int *v_cover_column = cover_column;
int i = threadIdx.x;
int b = blockIdx.x;
// int limit; my__syncthreads_init(limit);
if (i == 0) {
s_repeat_kernel = false;
s_goto_5 = false;
}
do {
__syncthreads();
if (i == 0) s_found = false;
__syncthreads();
for (int j = i; j < zeros_size_b[b]; j += blockDim.x)
{
int z = zeros[(b << log2_data_block_size) + j];
int l = z & row_mask;
int c = z >> log2_n;
int c1 = column_of_star_at_row[l];
for (int n = 0; n < 10; n++) {
if (!v_cover_column[c] && !v_cover_row[l]) {
s_found = true; s_repeat_kernel = true;
column_of_prime_at_row[l] = c;
if (c1 >= 0) {
v_cover_row[l] = 1;
__threadfence();
v_cover_column[c1] = 0;
}
else {
s_goto_5 = true;
}
}
} // for(int n
} // for(int j
__syncthreads();
} while (s_found && !s_goto_5);
if (i == 0 && s_repeat_kernel) repeat_kernel = true;
if (i == 0 && s_goto_5) goto_5 = true;
}
/* STEP 5:
Construct a series of alternating primed and starred zeros as
follows:
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0(if any).
Let Z2 denote the primed zero in the row of Z1(there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred
zero of the series, star each primed zero of the series, erase
all primes and uncover every line in the matrix. Return to Step 3.*/
// Eliminates joining paths
__global__ void step_5a()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int r_Z0, c_Z0;
c_Z0 = column_of_prime_at_row[i];
if (c_Z0 >= 0 && column_of_star_at_row[i] < 0) {
row_of_green_at_column[c_Z0] = i;
while ((r_Z0 = row_of_star_at_column[c_Z0]) >= 0) {
c_Z0 = column_of_prime_at_row[r_Z0];
row_of_green_at_column[c_Z0] = r_Z0;
}
}
}
// Applies the alternating paths
__global__ void step_5b()
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
int r_Z0, c_Z0, c_Z2;
r_Z0 = row_of_green_at_column[j];
if (r_Z0 >= 0 && row_of_star_at_column[j] < 0) {
c_Z2 = column_of_star_at_row[r_Z0];
column_of_star_at_row[r_Z0] = j;
row_of_star_at_column[j] = r_Z0;
while (c_Z2 >= 0) {
r_Z0 = row_of_green_at_column[c_Z2]; // row of Z2
c_Z0 = c_Z2; // col of Z2
c_Z2 = column_of_star_at_row[r_Z0]; // col of Z4
// star Z2
column_of_star_at_row[r_Z0] = c_Z0;
row_of_star_at_column[c_Z0] = r_Z0;
}
}
}
// STEP 6
// Add the minimum uncovered value to every element of each covered
// row, and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered lines.
template <unsigned int blockSize>
__device__ void min_warp_reduce(volatile data* sdata, int tid) {
if (blockSize >= 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (blockSize >= 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (blockSize >= 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (blockSize >= 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (blockSize >= 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (blockSize >= 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
template <unsigned int blockSize> // blockSize is the size of a block of threads
__device__ void min_reduce1(volatile data *g_idata, volatile data *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
unsigned int gridSize = blockSize * 2 * gridDim.x;
sdata[tid] = MAX_DATA;
while (i < n) {
int i1 = i;
int i2 = i + blockSize;
int l1 = i1 & row_mask;
int c1 = i1 >> log2_n;
data g1;
if (cover_row[l1] == 1 || cover_column[c1] == 1) g1 = MAX_DATA;
else g1 = g_idata[i1];
int l2 = i2 & row_mask;
int c2 = i2 >> log2_n;
data g2;
if (cover_row[l2] == 1 || cover_column[c2] == 1) g2 = MAX_DATA;
else g2 = g_idata[i2];
sdata[tid] = min(sdata[tid], min(g1, g2));
i += gridSize;
}
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_warp_reduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <unsigned int blockSize>
__device__ void min_reduce2(volatile data *g_idata, volatile data *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
sdata[tid] = min(g_idata[i], g_idata[i + blockSize]);
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_warp_reduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void step_6_add_sub()
{
// STEP 6:
// /*STEP 6: Add the minimum uncovered value to every element of each covered
// row, and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered lines. */
int i = blockDim.x * blockIdx.x + threadIdx.x;
int l = i & row_mask;
int c = i >> log2_n;
if (cover_row[l] == 1 && cover_column[c] == 1)
slack[i] += d_min_in_mat;
if (cover_row[l] == 0 && cover_column[c] == 0)
slack[i] -= d_min_in_mat;
if (i == 0) zeros_size = 0;
if (i < n_blocks_step_4) zeros_size_b[i] = 0;
}
__global__ void min_reduce_kernel1() {
min_reduce1<n_threads_reduction>(slack, d_min_in_mat_vect, nrows*ncols);
}
__global__ void min_reduce_kernel2() {
min_reduce2<n_threads_reduction / 2>(d_min_in_mat_vect, &d_min_in_mat, n_blocks_reduction);
}
__device__ inline long long int d_get_globaltime(void) {
long long int ret;
asm volatile ("mov.u64 %0, %%globaltimer;" : "=l"(ret));
return ret;
}
// Returns the period in miliseconds
__device__ inline double d_get_timer_period(void) {
return 1.0e-6;
}
// -------------------------------------------------------------------------------------
// Host code
// -------------------------------------------------------------------------------------
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
printf("CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
};
typedef std::chrono::high_resolution_clock::rep hr_clock_rep;
inline hr_clock_rep get_globaltime(void) {
using namespace std::chrono;
return high_resolution_clock::now().time_since_epoch().count();
}
// Returns the period in miliseconds
inline double get_timer_period(void) {
using namespace std::chrono;
return 1000.0 * high_resolution_clock::period::num / high_resolution_clock::period::den;
}
#define declare_kernel(k) \
hr_clock_rep k##_time = 0; \
int k##_runs = 0
#define call_kernel(k, n_blocks, n_threads) call_kernel_s(k, n_blocks, n_threads, 0ll)
#define call_kernel_s(k, n_blocks, n_threads, shared) \
{ \
timer_start = dh_get_globaltime(); \
k << < n_blocks, n_threads, shared>> > (); \
dh_checkCuda(cudaDeviceSynchronize()); \
timer_stop = dh_get_globaltime(); \
k##_time += timer_stop - timer_start; \
k##_runs++; \
}
// printf("Finished kernel " #k "(%d,%d,%lld)\n", n_blocks, n_threads, shared); \
// fflush(0); \
#define kernel_stats(k) \
printf(#k "\t %g \t %d\n", dh_get_timer_period() * k##_time, k##_runs)
// Hungarian_Algorithm
#ifndef DYNAMIC
void Hungarian_Algorithm()
#else
__global__ void Hungarian_Algorithm()
#endif
{
hr_clock_rep timer_start, timer_stop;
hr_clock_rep total_time_start, total_time_stop;
#if defined(DEBUG) || defined(_DEBUG)
int last_n_covered_rows = 0, last_n_matches = 0;
#endif
declare_kernel(init);
declare_kernel(calc_min_in_rows); declare_kernel(step_1_row_sub);
declare_kernel(calc_min_in_cols); declare_kernel(step_1_col_sub);
declare_kernel(compress_matrix);
declare_kernel(step_2);
declare_kernel(step_3ini); declare_kernel(step_3);
declare_kernel(step_4_init); declare_kernel(step_4);
declare_kernel(min_reduce_kernel1); declare_kernel(min_reduce_kernel2); declare_kernel(step_6_add_sub);
declare_kernel(step_5a); declare_kernel(step_5b); declare_kernel(step_5c);
total_time_start = dh_get_globaltime();
// Initialization
call_kernel(init, n_blocks, n_threads);
// Step 1 kernels
call_kernel(calc_min_in_rows, n_blocks_reduction, n_threads_reduction);
call_kernel(step_1_row_sub, n_blocks_full, n_threads_full);
call_kernel(calc_min_in_cols, n_blocks_reduction, n_threads_reduction);
call_kernel(step_1_col_sub, n_blocks_full, n_threads_full);
// compress_matrix
call_kernel(compress_matrix, n_blocks_full, n_threads_full);
// Step 2 kernels
do {
repeat_kernel = false; dh_checkCuda(cudaDeviceSynchronize());
call_kernel(step_2, n_blocks_step_4, (n_blocks_step_4 > 1 || zeros_size > max_threads_per_block) ? max_threads_per_block : zeros_size);
// If we have more than one block it means that we have 512 lines per block so 1024 threads should be adequate.
} while (repeat_kernel);
while (1) { // repeat steps 3 to 6
// Step 3 kernels
call_kernel(step_3ini, n_blocks, n_threads);
call_kernel(step_3, n_blocks, n_threads);
if (n_matches >= ncols) break; // It's done
//step 4_kernels
call_kernel(step_4_init, n_blocks, n_threads);
while (1) // repeat step 4 and 6
{
#if defined(DEBUG) || defined(_DEBUG)
// At each iteraton either the number of matched or covered rows has to increase.
// If we went to step 5 the number of matches increases.
// If we went to step 6 the number of covered rows increases.
n_covered_rows = 0; n_covered_columns = 0;
dh_checkCuda(cudaDeviceSynchronize());
convergence_check << < n_blocks, n_threads >> > ();
dh_checkCuda(cudaDeviceSynchronize());
assert(n_matches>last_n_matches || n_covered_rows>last_n_covered_rows);
assert(n_matches == n_covered_columns + n_covered_rows);
last_n_matches = n_matches;
last_n_covered_rows = n_covered_rows;
#endif
do { // step 4 loop
goto_5 = false; repeat_kernel = false;
dh_checkCuda(cudaDeviceSynchronize());
call_kernel(step_4, n_blocks_step_4, (n_blocks_step_4 > 1 || zeros_size > max_threads_per_block) ? max_threads_per_block : zeros_size);
// If we have more than one block it means that we have 512 lines per block so 1024 threads should be adequate.
} while (repeat_kernel && !goto_5);
if (goto_5) break;
//step 6_kernel
call_kernel_s(min_reduce_kernel1, n_blocks_reduction, n_threads_reduction, n_threads_reduction*sizeof(int));
call_kernel_s(min_reduce_kernel2, 1, n_blocks_reduction / 2, (n_blocks_reduction / 2) * sizeof(int));
call_kernel(step_6_add_sub, n_blocks_full, n_threads_full);
//compress_matrix
call_kernel(compress_matrix, n_blocks_full, n_threads_full);
} // repeat step 4 and 6
call_kernel(step_5a, n_blocks, n_threads);
call_kernel(step_5b, n_blocks, n_threads);
} // repeat steps 3 to 6
total_time_stop = dh_get_globaltime();
printf("kernel \t time (ms) \t runs\n");
kernel_stats(init);
kernel_stats(calc_min_in_rows); kernel_stats(step_1_row_sub);
kernel_stats(calc_min_in_cols); kernel_stats(step_1_col_sub);
kernel_stats(compress_matrix);
kernel_stats(step_2);
kernel_stats(step_3ini); kernel_stats(step_3);
kernel_stats(step_4_init); kernel_stats(step_4);
kernel_stats(min_reduce_kernel1); kernel_stats(min_reduce_kernel2); kernel_stats(step_6_add_sub);
kernel_stats(step_5a); kernel_stats(step_5b); kernel_stats(step_5c);
printf("Total time(ms) \t %g\n", dh_get_timer_period() * (total_time_stop - total_time_start));
}
// Used to make sure some constants are properly set
void check(bool val, const char *str){
if (!val) {
printf("Check failed: %s!\n", str);
getchar();
exit(-1);
}
}
int main(int argc, char* argv[])
{
//printf("Starting cuda \n");
// Constant checks:
//printf("log2_m %i" ,1 << log2_n);
check(n == (1 << log2_n), "Incorrect log2_n!");
check(n_threads*n_blocks == n, "n_threads*n_blocks != n\n");
// step 1
check(n_blocks_reduction <= n, "Step 1: Should have several lines per block!");
check(n % n_blocks_reduction == 0, "Step 1: Number of lines per block should be integer!");
check((n_blocks_reduction*n_threads_reduction) % n == 0, "Step 1: The grid size must be a multiple of the line size!");
check(n_threads_reduction*n_blocks_reduction <= n*n, "Step 1: The grid size is bigger than the matrix size!");
// step 6
check(n_threads_full*n_blocks_full <= n*n, "Step 6: The grid size is bigger than the matrix size!");
check(columns_per_block_step_4*n == (1 << log2_data_block_size), "Columns per block of step 4 is not a power of two!");
//printf("Running. See out.txt for output.\n");
// Open text file
//FILE *file = freopen("out.txt", "w", stdout);
//if (file == NULL)
//{
// perror("Error opening the output file!\n");
// getchar();
// exit(1);
//};
// Prints the current time
time_t current_time;
time(¤t_time);
printf("%s\n", ctime(¤t_time));
//fflush(file);
//#ifndef USE_TEST_MATRIX
std::string inpath(argv[1]);
std::ifstream infile(inpath);
data i;
printf("Populating cost .. %i, %i\n", nrows, ncols);
for (int r = 0; r < nrows; r++) {
for (int c = 0; c < ncols; c++) {
infile >> i;
h_cost[c][r] = (float)i;
//if(i < 1) {
// printf("%i, %i, %f\n", r, c, i);
//}
}
}
printf("Populated cost\n");
/*
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> distribution(0, range-1);
for (int test = 0; test < n_tests; test++) {
printf("\n\n\n\ntest %d\n", test);
fflush(file);
for (int c = 0; c < ncols; c++)
for (int r = 0; r < nrows; r++) {
if (c < user_n && r < user_n)
h_cost[c][r] = distribution(generator);
else {
if (c == r) h_cost[c][r] = 0;
else h_cost[c][r] = MAX_DATA;
}
}
#endif
*/
// Copy vectors from host memory to device memory
cudaMemcpyToSymbol(slack, h_cost, sizeof(data)*nrows*ncols); // symbol refers to the device memory hence "To" means from Host to Device
// Invoke kernels
time_t start_time = clock();
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 1024 *1024 * 1024);
#ifndef DYNAMIC
Hungarian_Algorithm();
#else
Hungarian_Algorithm << <1, 1 >> > ();
#endif
checkCuda(cudaDeviceSynchronize());
time_t stop_time = clock();
//fflush(file);
// Copy assignments from Device to Host and calculate the total Cost
cudaMemcpyFromSymbol(h_column_of_star_at_row, column_of_star_at_row, nrows * sizeof(int));
long long int total_cost = 0;
std::fstream solution;
solution.open("gpu-solution.txt", std::fstream::out);
for (int r = 0; r < nrows; r++) {
int c = h_column_of_star_at_row[r];
if (c >= 0) total_cost += h_cost[c][r];
solution << r << " " << c << std::endl;
}
solution.close();
printf("Total cost is \t %lld \n", total_cost);
printf("Low resolution time is \t %f \n", 1000.0*(double)(stop_time - start_time) / CLOCKS_PER_SEC);
//#ifndef USE_TEST_MATRIX
//} // for (int test
//#endif
//fclose(file);
}
|
2c5938834b72fa94f22d5204c8b07498fcb0f9ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "star2d1r-256-10-256_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 9
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_10), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 9)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 242;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 9)
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 238;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.1873f * A[t%2][i-1][j]
+ 0.1876f * A[t%2][i][j-1]
+ 0.2500f * A[t%2][i][j]
+ 0.1877f * A[t%2][i][j+1]
+ 0.1874f * A[t%2][i+1][j];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 2c5938834b72fa94f22d5204c8b07498fcb0f9ca.cu | #include <assert.h>
#include <stdio.h>
#include "star2d1r-256-10-256_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 9
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_10<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 9)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 242;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 9)
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 238;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.1873f * A[t%2][i-1][j]
+ 0.1876f * A[t%2][i][j-1]
+ 0.2500f * A[t%2][i][j]
+ 0.1877f * A[t%2][i][j+1]
+ 0.1874f * A[t%2][i+1][j];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
70ac26d47403d43a4fde3e472618024e69f4d780.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
using namespace std;
/*
* Display the dimensionality of a thread block and grid from the host and
* device.
*/
__global__ void checkIndex(void) {
printf("threadIdx:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z);
printf("blockIdx:(%d, %d, %d)\n", blockIdx.x, blockIdx.y, blockIdx.z);
printf("blockDim:(%d, %d, %d)\n", blockDim.x, blockDim.y, blockDim.z);
printf("gridDim:(%d, %d, %d)\n", gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv) {
// define total data element
int nElem = 6;
// define grid and block structure
dim3 block(3);
dim3 grid((nElem + block.x - 1) / block.x);
// check grid and block dimension from host side
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
// check grid and block dimension from device side
hipLaunchKernelGGL(( checkIndex), dim3(grid), dim3(block), 0, 0, );
// reset device before you leave
// CHECK(hipDeviceReset());
hipDeviceReset();
cout << "aaa" << endl;
return (0);
}
| 70ac26d47403d43a4fde3e472618024e69f4d780.cu | #include <cuda_runtime.h>
#include <iostream>
#include <stdio.h>
using namespace std;
/*
* Display the dimensionality of a thread block and grid from the host and
* device.
*/
__global__ void checkIndex(void) {
printf("threadIdx:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z);
printf("blockIdx:(%d, %d, %d)\n", blockIdx.x, blockIdx.y, blockIdx.z);
printf("blockDim:(%d, %d, %d)\n", blockDim.x, blockDim.y, blockDim.z);
printf("gridDim:(%d, %d, %d)\n", gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv) {
// define total data element
int nElem = 6;
// define grid and block structure
dim3 block(3);
dim3 grid((nElem + block.x - 1) / block.x);
// check grid and block dimension from host side
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
// check grid and block dimension from device side
checkIndex<<<grid, block>>>();
// reset device before you leave
// CHECK(cudaDeviceReset());
cudaDeviceReset();
cout << "aaa" << endl;
return (0);
}
|
408178b49548d8325f69b002720ee9eb81f9b623.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define BLOCK_SIZE 128
#define BLOCK_SIZE_F 128.0
__global__
void sumRedKernel(float *A, int n){
__shared__ float partialSum[BLOCK_SIZE*2];
int i = (threadIdx.x + blockDim.x * blockIdx.x)*2;
unsigned int t = threadIdx.x * 2;
partialSum[t] = A[i];
partialSum[t+1] = A[i+1];
t = threadIdx.x;
for(unsigned int stride = blockDim.x; stride > 0; stride /= 2){
__syncthreads();
if(t < stride && (t+stride) < n) partialSum[t] += partialSum[t+stride];
}
__syncthreads();
if(threadIdx.x == 0){
A[blockIdx.x] = partialSum[0];
}
}
void sumRed(float* A, int n){
int size = n*sizeof(float);
float *d_A;
hipMalloc((void **) &d_A, size);
hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sumRedKernel), dim3(ceil(n/(BLOCK_SIZE_F*2))), dim3(BLOCK_SIZE), 0, 0, d_A,n);
hipMemcpy(A,d_A,size,hipMemcpyDeviceToHost);
hipFree(d_A);
}
int main(){
int n,i;
float *h_A;
scanf("%d", &n);
h_A = (float*) malloc(n*sizeof(float));
for(i = 0; i < n; i++){
//scanf("%f", &h_A[i]);
h_A[i] = 1;
}
while(n > 1){
sumRed(h_A,n);
n = ceil(n/(BLOCK_SIZE_F*2));
}
printf("%f", h_A[0]);
printf("\n");
return 0;
}
| 408178b49548d8325f69b002720ee9eb81f9b623.cu | #include <stdio.h>
#define BLOCK_SIZE 128
#define BLOCK_SIZE_F 128.0
__global__
void sumRedKernel(float *A, int n){
__shared__ float partialSum[BLOCK_SIZE*2];
int i = (threadIdx.x + blockDim.x * blockIdx.x)*2;
unsigned int t = threadIdx.x * 2;
partialSum[t] = A[i];
partialSum[t+1] = A[i+1];
t = threadIdx.x;
for(unsigned int stride = blockDim.x; stride > 0; stride /= 2){
__syncthreads();
if(t < stride && (t+stride) < n) partialSum[t] += partialSum[t+stride];
}
__syncthreads();
if(threadIdx.x == 0){
A[blockIdx.x] = partialSum[0];
}
}
void sumRed(float* A, int n){
int size = n*sizeof(float);
float *d_A;
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
sumRedKernel<<<ceil(n/(BLOCK_SIZE_F*2)), BLOCK_SIZE>>>(d_A,n);
cudaMemcpy(A,d_A,size,cudaMemcpyDeviceToHost);
cudaFree(d_A);
}
int main(){
int n,i;
float *h_A;
scanf("%d", &n);
h_A = (float*) malloc(n*sizeof(float));
for(i = 0; i < n; i++){
//scanf("%f", &h_A[i]);
h_A[i] = 1;
}
while(n > 1){
sumRed(h_A,n);
n = ceil(n/(BLOCK_SIZE_F*2));
}
printf("%f", h_A[0]);
printf("\n");
return 0;
}
|
9632f5234588dc9c3cf8f6b212270953eac92120.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//#include "hip/hip_runtime.h"
// for using cublas
#include <rocblas.h>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <complex>
#include <assert.h>
#include <algorithm>
#include <hip/hip_complex.h>
#include "util_type.h"
#include "util.cuh"
#include "util_func.h"
#include "memory_ops.h"
int get_num_device(){
int n_gpu;
hipGetDeviceCount(&n_gpu);
return n_gpu;
}
void set_device(unsigned int device_num){
hipSetDevice(device_num);
}
int get_current_device() {
int curr_dev_num;
hipGetDevice(&curr_dev_num);
return curr_dev_num;
}
inline __device__ double __shfl_down_double(double var, unsigned int srcLane, int width = 32) {
int2 a = *reinterpret_cast<int2*>(&var);
a.x = __shfl_down_sync(a.x, srcLane, width);
a.y = __shfl_down_sync(a.y, srcLane, width);
return *reinterpret_cast<double*>(&a);
}
inline __device__ int warpReduceSum(int val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2)
val += __shfl_down_sync(0xffffffff, val, offset);
// val += __shfl_down(val, offset);
return val;
}
// __device__ int __popcll ( unsigned long long int x )
inline __device__ int popcount64(ITYPE b) {
return __popcll(b);
}
//__device__ int __popc ( unsigned int x )
inline __device__ int popcount32(unsigned int b) {
return __popc(b);
}
__global__ void deviceReduceWarpAtomicKernel(int *in, int* out, ITYPE N) {
int sum = int(0);
for (ITYPE i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
sum += in[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (warpSize - 1)) == 0)
atomicAdd(out, sum);
}
__global__ void set_computational_basis_gpu(ITYPE comp_basis, GTYPE* state, ITYPE dim){
ITYPE idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < dim) {
state[idx] = make_cuDoubleComplex(0.0, 0.0);
}
if(idx==comp_basis) state[comp_basis] = make_cuDoubleComplex(1.0, 0.0);
}
__host__ void set_computational_basis_host(ITYPE comp_basis, void* state, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice(device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
set_computational_basis_gpu << <grid, block,0 , *cuda_stream >> > (comp_basis, state_gpu, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
checkCudaErrors(hipGetLastError(), __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
// copy state_gpu to state_gpu_copy
void copy_quantum_state_from_device_to_device(void* state_gpu_copy, const void* state_gpu, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice(device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
const GTYPE* psi_gpu = reinterpret_cast<const GTYPE*>(state_gpu);
GTYPE* psi_gpu_copy = reinterpret_cast<GTYPE*>(state_gpu_copy);
checkCudaErrors(hipMemcpyAsync(psi_gpu_copy, psi_gpu, dim * sizeof(GTYPE), hipMemcpyDeviceToDevice, *cuda_stream), __FILE__, __LINE__);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
state_gpu = reinterpret_cast<const void*>(psi_gpu);
state_gpu_copy = reinterpret_cast<void*>(psi_gpu_copy);
}
// copy cppstate to state_gpu_copy
void copy_quantum_state_from_host_to_device(void* state_gpu_copy, const void* state, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice(device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* psi_gpu_copy = reinterpret_cast<GTYPE*>(state_gpu_copy);
checkCudaErrors(hipMemcpyAsync(psi_gpu_copy, state, dim * sizeof(GTYPE), hipMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
state_gpu_copy = reinterpret_cast<void*>(psi_gpu_copy);
}
// this function will be removed in the future version
void copy_quantum_state_from_cppstate_host(void* state_gpu_copy, const CPPCTYPE* cppstate, ITYPE dim, void* stream, UINT device_number) {
copy_quantum_state_from_host_to_device(state_gpu_copy, cppstate, dim, stream, device_number);
}
void copy_quantum_state_from_device_to_host(void* state_cpu_copy, const void* state_gpu_original, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice(device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
const GTYPE* psi_gpu = reinterpret_cast<const GTYPE*>(state_gpu_original);
checkCudaErrors(hipMemcpyAsync(state_cpu_copy, psi_gpu, dim * sizeof(GTYPE), hipMemcpyDeviceToHost, *cuda_stream), __FILE__, __LINE__);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
state_gpu_original = reinterpret_cast<const void*>(psi_gpu);
}
// copy state_gpu to psi_cpu_copy
// this function is same as copy_quantum_state_from_device_to_host
void get_quantum_state_host(void* state_gpu, void* psi_cpu_copy, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice(device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* psi_gpu = reinterpret_cast<GTYPE*>(state_gpu);
psi_cpu_copy = reinterpret_cast<CPPCTYPE*>(psi_cpu_copy);
checkCudaErrors(hipMemcpyAsync(psi_cpu_copy, psi_gpu, dim * sizeof(CPPCTYPE), hipMemcpyDeviceToHost, *cuda_stream), __FILE__, __LINE__);
state_gpu = reinterpret_cast<void*>(psi_gpu);
}
void print_quantum_state_host(void* state, ITYPE dim, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice(device_number);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
CPPCTYPE* state_cpu=(CPPCTYPE*)malloc(sizeof(CPPCTYPE)*dim);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(state_cpu, state_gpu, dim * sizeof(CPPCTYPE), hipMemcpyDeviceToHost));
for(int i=0;i<dim;++i){
std::cout << i << " : " << state_cpu[i].real() << "+i" << state_cpu[i].imag() << '\n';
}
std::cout << '\n';
free(state_cpu);
state = reinterpret_cast<void*>(state);
}
ITYPE insert_zero_to_basis_index_gsim(ITYPE basis_index, unsigned int qubit_index){
ITYPE temp_basis = (basis_index >> qubit_index) << (qubit_index+1);
return temp_basis + (basis_index & ( (1ULL<<qubit_index) -1));
}
void get_Pauli_masks_partial_list_gsim(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count,
ITYPE* bit_flip_mask, ITYPE* phase_flip_mask, UINT* global_phase_90rot_count, UINT* pivot_qubit_index){
(*bit_flip_mask)=0;
(*phase_flip_mask)=0;
(*global_phase_90rot_count)=0;
(*pivot_qubit_index)=0;
for(UINT cursor=0;cursor < target_qubit_index_count; ++cursor){
UINT target_qubit_index = target_qubit_index_list[cursor];
switch(Pauli_operator_type_list[cursor]){
case 0: // I
break;
case 1: // X
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*pivot_qubit_index) = target_qubit_index;
break;
case 2: // Y
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
(*global_phase_90rot_count) ++;
(*pivot_qubit_index) = target_qubit_index;
break;
case 3: // Z
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
break;
default:
fprintf(stderr,"Invalid Pauli operator ID called");
assert(0);
}
}
}
void get_Pauli_masks_whole_list_gsim(const UINT* Pauli_operator_type_list, UINT target_qubit_index_count,
ITYPE* bit_flip_mask, ITYPE* phase_flip_mask, UINT* global_phase_90rot_count, UINT* pivot_qubit_index){
(*bit_flip_mask)=0;
(*phase_flip_mask)=0;
(*global_phase_90rot_count)=0;
(*pivot_qubit_index)=0;
for(UINT target_qubit_index=0; target_qubit_index < target_qubit_index_count; ++target_qubit_index){
switch(Pauli_operator_type_list[target_qubit_index]){
case 0: // I
break;
case 1: // X
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*pivot_qubit_index) = target_qubit_index;
break;
case 2: // Y
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
(*global_phase_90rot_count) ++;
(*pivot_qubit_index) = target_qubit_index;
break;
case 3: // Z
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
break;
default:
fprintf(stderr,"Invalid Pauli operator ID called");
assert(0);
}
}
}
ITYPE* create_matrix_mask_list_gsim(const UINT* qubit_index_list, UINT qubit_index_count){
const ITYPE matrix_dim = 1ULL << qubit_index_count;
ITYPE* mask_list = (ITYPE*) calloc((size_t)matrix_dim, sizeof(ITYPE));
ITYPE cursor = 0;
for(cursor=0;cursor < matrix_dim; ++cursor){
for(UINT bit_cursor = 0; bit_cursor < qubit_index_count;++bit_cursor){
if ((cursor >> bit_cursor) & 1) {
UINT bit_index = qubit_index_list[bit_cursor];
mask_list[cursor] ^= (1ULL << bit_index);
}
}
}
return mask_list;
}
ITYPE create_control_mask_gsim(const UINT* qubit_index_list, const UINT* value_list, UINT size) {
ITYPE mask = 0;
for (UINT cursor = 0; cursor < size; ++cursor) {
mask ^= (1ULL << qubit_index_list[cursor]) * value_list[cursor];
}
return mask;
}
UINT* create_sorted_ui_list_gsim(const UINT* array, size_t size){
UINT* new_array = (UINT*)calloc(size,sizeof(UINT));
memcpy(new_array, array, size*sizeof(UINT));
std::sort(new_array, new_array+size);
return new_array;
}
UINT* create_sorted_ui_list_value_gsim(const UINT* array, size_t size, UINT value){
UINT* new_array = (UINT*)calloc(size+1, sizeof(UINT));
memcpy(new_array,array,size*sizeof(UINT));
new_array[size]=value;
std::sort(new_array, new_array+size+1);
return new_array;
}
UINT* create_sorted_ui_list_list_gsim(const UINT* array1, size_t size1, const UINT* array2, size_t size2){
UINT* new_array = (UINT*)calloc(size1+size2, sizeof(UINT));
memcpy(new_array,array1,size1*sizeof(UINT));
memcpy(new_array+size1,array2,size2*sizeof(UINT));
std::sort(new_array, new_array+size1+size2);
return new_array;
}
// C=alpha*A*B+beta*C
// in this wrapper, we assume beta is always zero!
int cublas_zgemm_wrapper(ITYPE n, CPPCTYPE alpha, const CPPCTYPE *h_A, const CPPCTYPE *h_B, CPPCTYPE beta, CPPCTYPE *h_C){
ITYPE n2 = n*n;
hipblasStatus_t status;
hipblasHandle_t handle;
GTYPE *d_A;// = make_cuDoubleComplex(0.0,0.0);
GTYPE *d_B;// = make_cuDoubleComplex(0,0);
GTYPE *d_C;// = make_cuDoubleComplex(0,0);
GTYPE d_alpha=make_cuDoubleComplex(alpha.real(), alpha.imag());
GTYPE d_beta=make_cuDoubleComplex(beta.real(), beta.imag());
// int dev = 0; //findCudaDevice(argc, (const char **)argv);
/* Initialize CUBLAS */
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate device memory for the matrices */
if (hipMalloc(reinterpret_cast<void **>(&d_A), n2 * sizeof(d_A[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&d_B), n2 * sizeof(d_B[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate B)\n");
return EXIT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&d_C), n2 * sizeof(d_C[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate C)\n");
return EXIT_FAILURE;
}
/* Initialize the device matrices with the host matrices */
//status = hipblasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1);
status = hipblasSetMatrix(n, n, sizeof(h_A[0]), h_A, n, d_A, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
//status = hipblasSetVector(n2, sizeof(h_B[0]), h_B, 1, d_B, 1);
status = hipblasSetMatrix(n, n, sizeof(h_B[0]), h_B, n, d_B, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write B)\n");
return EXIT_FAILURE;
}
//status = hipblasSetVector(n2, sizeof(h_C[0]), h_C, 1, d_C, 1);
status = hipblasSetMatrix(n, n, sizeof(h_C[0]), h_C, n, d_C, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
/* Performs operation using cublas */
status = hipblasZgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, n, n, n, &d_alpha, d_A,
n, d_B, n, &d_beta, d_C, n);
//status=hipblasZgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
/* Allocate host memory for reading back the result from device memory */
CPPCTYPE* tmp_h_C = reinterpret_cast<CPPCTYPE *>(malloc(n2 * sizeof(h_C[0])));
if (tmp_h_C == 0) {
fprintf(stderr, "!!!! host memory allocation error (C)\n");
return EXIT_FAILURE;
}
/* Read the result back */
status = hipblasGetMatrix(n, n, sizeof(GTYPE), d_C, n, tmp_h_C, n);
memcpy(h_C, tmp_h_C, sizeof(h_C[0])*n2);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_A) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (hipFree(d_B) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (B)\n");
return EXIT_FAILURE;
}
if (hipFree(d_C) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
// C=alpha*A*x+beta*y
// in this wrapper, we assume beta is always zero!
int cublas_zgemv_wrapper(ITYPE n, CPPCTYPE alpha, const CPPCTYPE *h_A, const CPPCTYPE *h_x, CPPCTYPE beta, CPPCTYPE *h_y){
ITYPE n2 = n*n;
hipblasStatus_t status;
hipblasHandle_t handle;
GTYPE *d_A;
GTYPE *d_x;
GTYPE *d_y;
GTYPE d_alpha=make_cuDoubleComplex(alpha.real(), alpha.imag());
GTYPE d_beta=make_cuDoubleComplex(beta.real(), beta.imag());
// int dev = 0; //findCudaDevice(argc, (const char **)argv);
/* Initialize CUBLAS */
printf("simpleCUBLAS test running..\n");
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate device memory for the matrices */
if (hipMalloc(reinterpret_cast<void **>(&d_A), n2 * sizeof(d_A[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&d_x), n * sizeof(d_x[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate x)\n");
return EXIT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&d_y), n * sizeof(d_y[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate y)\n");
return EXIT_FAILURE;
}
/* Initialize the device matrices with the host matrices */
//status = hipblasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1);
status = hipblasSetMatrix(n, n, sizeof(h_A[0]), h_A, n, d_A, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(n, sizeof(h_x[0]), h_x, 1, d_x, 1);
//status = hipblasSetMatrix(n, n, sizeof(h_B[0]), h_B, n, d_B, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write x)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(n, sizeof(h_y[0]), h_y, 1, d_y, 1);
//status = hipblasSetMatrix(n, n, sizeof(h_C[0]), h_C, n, d_C, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
/* Performs operation using cublas */
status = hipblasZgemv(handle, HIPBLAS_OP_T, n, n, &d_alpha, d_A, n,
d_x, 1, &d_beta, d_y, 1);
/*
hipblasStatus_t hipblasZgemv(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n,
const hipDoubleComplex *alpha,
const hipDoubleComplex *A, int lda,
const hipDoubleComplex *x, int incx,
const hipDoubleComplex *beta,
hipDoubleComplex *y, int incy)
*/
//status=hipblasZgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
/* Allocate host memory for reading back the result from device memory */
CPPCTYPE* tmp_h_y = reinterpret_cast<CPPCTYPE *>(malloc(n * sizeof(h_y[0])));
if (tmp_h_y == 0) {
fprintf(stderr, "!!!! host memory allocation error (y)\n");
return EXIT_FAILURE;
}
/* Read the result back */
status = hipblasGetVector(n, sizeof(GTYPE), d_y, 1, tmp_h_y, 1);
/*
hipblasStatus_t hipblasGetVector(int n, int elemSize, const void *x, int incx, void *y, int incy)
*/
memcpy(h_y, tmp_h_y, sizeof(h_y[0])*n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_A) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (hipFree(d_x) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (x)\n");
return EXIT_FAILURE;
}
if (hipFree(d_y) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (y)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
// we assume state has already allocated at device
int cublas_zgemv_wrapper(ITYPE n, const CPPCTYPE *h_matrix, GTYPE *d_state){
ITYPE n2 = n*n;
hipblasStatus_t status;
hipblasHandle_t handle;
GTYPE *d_matrix;
GTYPE *d_y; // this will include the answer of the state.
GTYPE d_alpha = make_cuDoubleComplex(1.0, 0.0);
GTYPE d_beta = make_cuDoubleComplex(0.0, 0.0);
// int dev = 0;
/* Initialize CUBLAS */
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate device memory for the matrices */
if (hipMalloc(reinterpret_cast<void **>(&d_matrix), n2 * sizeof(d_matrix[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&d_y), n * sizeof(d_y[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate y)\n");
return EXIT_FAILURE;
}
// hipMemset(&d_y, 0, sizeof(d_y[0])*n);
/* Initialize the device matrices with the host matrices */
status = hipblasSetMatrix(n, n, sizeof(h_matrix[0]), h_matrix, n, d_matrix, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
/* Performs operation using cublas */
status = hipblasZgemv(handle, HIPBLAS_OP_T, n, n, &d_alpha, d_matrix, n,
d_state, 1, &d_beta, d_y, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
hipMemcpy(d_state, d_y, n * sizeof(GTYPE), hipMemcpyDeviceToDevice);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_matrix) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (hipFree(d_y) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (y)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
// we assume state and matrix has already allocated at device
int cublas_zgemv_wrapper(ITYPE n, const GTYPE *d_matrix, GTYPE *d_state){
// ITYPE n2 = n*n;
hipblasStatus_t status;
hipblasHandle_t handle;
GTYPE *d_y; // this will include the answer of the state.
GTYPE d_alpha = make_cuDoubleComplex(1.0, 0.0);
GTYPE d_beta = make_cuDoubleComplex(0.0, 0.0);
// int dev = 0;
/* Initialize CUBLAS */
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&d_y), n * sizeof(d_y[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate y)\n");
return EXIT_FAILURE;
}
// hipMemset(&d_y, 0, sizeof(d_y[0])*n);
/* Performs operation using cublas */
status = hipblasZgemv(handle, HIPBLAS_OP_T, n, n, &d_alpha, d_matrix, n,
d_state, 1, &d_beta, d_y, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
hipMemcpy(d_state, d_y, n * sizeof(GTYPE), hipMemcpyDeviceToDevice);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_y) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (y)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
| 9632f5234588dc9c3cf8f6b212270953eac92120.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
//#include "cuda.h"
// for using cublas
#include <cublas_v2.h>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <complex>
#include <assert.h>
#include <algorithm>
#include <cuComplex.h>
#include "util_type.h"
#include "util.cuh"
#include "util_func.h"
#include "memory_ops.h"
int get_num_device(){
int n_gpu;
cudaGetDeviceCount(&n_gpu);
return n_gpu;
}
void set_device(unsigned int device_num){
cudaSetDevice(device_num);
}
int get_current_device() {
int curr_dev_num;
cudaGetDevice(&curr_dev_num);
return curr_dev_num;
}
inline __device__ double __shfl_down_double(double var, unsigned int srcLane, int width = 32) {
int2 a = *reinterpret_cast<int2*>(&var);
a.x = __shfl_down_sync(a.x, srcLane, width);
a.y = __shfl_down_sync(a.y, srcLane, width);
return *reinterpret_cast<double*>(&a);
}
inline __device__ int warpReduceSum(int val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2)
val += __shfl_down_sync(0xffffffff, val, offset);
// val += __shfl_down(val, offset);
return val;
}
// __device__ int __popcll ( unsigned long long int x )
inline __device__ int popcount64(ITYPE b) {
return __popcll(b);
}
//__device__ int __popc ( unsigned int x )
inline __device__ int popcount32(unsigned int b) {
return __popc(b);
}
__global__ void deviceReduceWarpAtomicKernel(int *in, int* out, ITYPE N) {
int sum = int(0);
for (ITYPE i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
sum += in[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (warpSize - 1)) == 0)
atomicAdd(out, sum);
}
__global__ void set_computational_basis_gpu(ITYPE comp_basis, GTYPE* state, ITYPE dim){
ITYPE idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < dim) {
state[idx] = make_cuDoubleComplex(0.0, 0.0);
}
if(idx==comp_basis) state[comp_basis] = make_cuDoubleComplex(1.0, 0.0);
}
__host__ void set_computational_basis_host(ITYPE comp_basis, void* state, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice(device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
set_computational_basis_gpu << <grid, block,0 , *cuda_stream >> > (comp_basis, state_gpu, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
checkCudaErrors(cudaGetLastError(), __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
// copy state_gpu to state_gpu_copy
void copy_quantum_state_from_device_to_device(void* state_gpu_copy, const void* state_gpu, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice(device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
const GTYPE* psi_gpu = reinterpret_cast<const GTYPE*>(state_gpu);
GTYPE* psi_gpu_copy = reinterpret_cast<GTYPE*>(state_gpu_copy);
checkCudaErrors(cudaMemcpyAsync(psi_gpu_copy, psi_gpu, dim * sizeof(GTYPE), cudaMemcpyDeviceToDevice, *cuda_stream), __FILE__, __LINE__);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
state_gpu = reinterpret_cast<const void*>(psi_gpu);
state_gpu_copy = reinterpret_cast<void*>(psi_gpu_copy);
}
// copy cppstate to state_gpu_copy
void copy_quantum_state_from_host_to_device(void* state_gpu_copy, const void* state, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice(device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* psi_gpu_copy = reinterpret_cast<GTYPE*>(state_gpu_copy);
checkCudaErrors(cudaMemcpyAsync(psi_gpu_copy, state, dim * sizeof(GTYPE), cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
state_gpu_copy = reinterpret_cast<void*>(psi_gpu_copy);
}
// this function will be removed in the future version
void copy_quantum_state_from_cppstate_host(void* state_gpu_copy, const CPPCTYPE* cppstate, ITYPE dim, void* stream, UINT device_number) {
copy_quantum_state_from_host_to_device(state_gpu_copy, cppstate, dim, stream, device_number);
}
void copy_quantum_state_from_device_to_host(void* state_cpu_copy, const void* state_gpu_original, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice(device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
const GTYPE* psi_gpu = reinterpret_cast<const GTYPE*>(state_gpu_original);
checkCudaErrors(cudaMemcpyAsync(state_cpu_copy, psi_gpu, dim * sizeof(GTYPE), cudaMemcpyDeviceToHost, *cuda_stream), __FILE__, __LINE__);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
state_gpu_original = reinterpret_cast<const void*>(psi_gpu);
}
// copy state_gpu to psi_cpu_copy
// this function is same as copy_quantum_state_from_device_to_host
void get_quantum_state_host(void* state_gpu, void* psi_cpu_copy, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice(device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* psi_gpu = reinterpret_cast<GTYPE*>(state_gpu);
psi_cpu_copy = reinterpret_cast<CPPCTYPE*>(psi_cpu_copy);
checkCudaErrors(cudaMemcpyAsync(psi_cpu_copy, psi_gpu, dim * sizeof(CPPCTYPE), cudaMemcpyDeviceToHost, *cuda_stream), __FILE__, __LINE__);
state_gpu = reinterpret_cast<void*>(psi_gpu);
}
void print_quantum_state_host(void* state, ITYPE dim, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice(device_number);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
CPPCTYPE* state_cpu=(CPPCTYPE*)malloc(sizeof(CPPCTYPE)*dim);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(state_cpu, state_gpu, dim * sizeof(CPPCTYPE), cudaMemcpyDeviceToHost));
for(int i=0;i<dim;++i){
std::cout << i << " : " << state_cpu[i].real() << "+i" << state_cpu[i].imag() << '\n';
}
std::cout << '\n';
free(state_cpu);
state = reinterpret_cast<void*>(state);
}
ITYPE insert_zero_to_basis_index_gsim(ITYPE basis_index, unsigned int qubit_index){
ITYPE temp_basis = (basis_index >> qubit_index) << (qubit_index+1);
return temp_basis + (basis_index & ( (1ULL<<qubit_index) -1));
}
void get_Pauli_masks_partial_list_gsim(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count,
ITYPE* bit_flip_mask, ITYPE* phase_flip_mask, UINT* global_phase_90rot_count, UINT* pivot_qubit_index){
(*bit_flip_mask)=0;
(*phase_flip_mask)=0;
(*global_phase_90rot_count)=0;
(*pivot_qubit_index)=0;
for(UINT cursor=0;cursor < target_qubit_index_count; ++cursor){
UINT target_qubit_index = target_qubit_index_list[cursor];
switch(Pauli_operator_type_list[cursor]){
case 0: // I
break;
case 1: // X
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*pivot_qubit_index) = target_qubit_index;
break;
case 2: // Y
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
(*global_phase_90rot_count) ++;
(*pivot_qubit_index) = target_qubit_index;
break;
case 3: // Z
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
break;
default:
fprintf(stderr,"Invalid Pauli operator ID called");
assert(0);
}
}
}
void get_Pauli_masks_whole_list_gsim(const UINT* Pauli_operator_type_list, UINT target_qubit_index_count,
ITYPE* bit_flip_mask, ITYPE* phase_flip_mask, UINT* global_phase_90rot_count, UINT* pivot_qubit_index){
(*bit_flip_mask)=0;
(*phase_flip_mask)=0;
(*global_phase_90rot_count)=0;
(*pivot_qubit_index)=0;
for(UINT target_qubit_index=0; target_qubit_index < target_qubit_index_count; ++target_qubit_index){
switch(Pauli_operator_type_list[target_qubit_index]){
case 0: // I
break;
case 1: // X
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*pivot_qubit_index) = target_qubit_index;
break;
case 2: // Y
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
(*global_phase_90rot_count) ++;
(*pivot_qubit_index) = target_qubit_index;
break;
case 3: // Z
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
break;
default:
fprintf(stderr,"Invalid Pauli operator ID called");
assert(0);
}
}
}
ITYPE* create_matrix_mask_list_gsim(const UINT* qubit_index_list, UINT qubit_index_count){
const ITYPE matrix_dim = 1ULL << qubit_index_count;
ITYPE* mask_list = (ITYPE*) calloc((size_t)matrix_dim, sizeof(ITYPE));
ITYPE cursor = 0;
for(cursor=0;cursor < matrix_dim; ++cursor){
for(UINT bit_cursor = 0; bit_cursor < qubit_index_count;++bit_cursor){
if ((cursor >> bit_cursor) & 1) {
UINT bit_index = qubit_index_list[bit_cursor];
mask_list[cursor] ^= (1ULL << bit_index);
}
}
}
return mask_list;
}
ITYPE create_control_mask_gsim(const UINT* qubit_index_list, const UINT* value_list, UINT size) {
ITYPE mask = 0;
for (UINT cursor = 0; cursor < size; ++cursor) {
mask ^= (1ULL << qubit_index_list[cursor]) * value_list[cursor];
}
return mask;
}
UINT* create_sorted_ui_list_gsim(const UINT* array, size_t size){
UINT* new_array = (UINT*)calloc(size,sizeof(UINT));
memcpy(new_array, array, size*sizeof(UINT));
std::sort(new_array, new_array+size);
return new_array;
}
UINT* create_sorted_ui_list_value_gsim(const UINT* array, size_t size, UINT value){
UINT* new_array = (UINT*)calloc(size+1, sizeof(UINT));
memcpy(new_array,array,size*sizeof(UINT));
new_array[size]=value;
std::sort(new_array, new_array+size+1);
return new_array;
}
UINT* create_sorted_ui_list_list_gsim(const UINT* array1, size_t size1, const UINT* array2, size_t size2){
UINT* new_array = (UINT*)calloc(size1+size2, sizeof(UINT));
memcpy(new_array,array1,size1*sizeof(UINT));
memcpy(new_array+size1,array2,size2*sizeof(UINT));
std::sort(new_array, new_array+size1+size2);
return new_array;
}
// C=alpha*A*B+beta*C
// in this wrapper, we assume beta is always zero!
int cublas_zgemm_wrapper(ITYPE n, CPPCTYPE alpha, const CPPCTYPE *h_A, const CPPCTYPE *h_B, CPPCTYPE beta, CPPCTYPE *h_C){
ITYPE n2 = n*n;
cublasStatus_t status;
cublasHandle_t handle;
GTYPE *d_A;// = make_cuDoubleComplex(0.0,0.0);
GTYPE *d_B;// = make_cuDoubleComplex(0,0);
GTYPE *d_C;// = make_cuDoubleComplex(0,0);
GTYPE d_alpha=make_cuDoubleComplex(alpha.real(), alpha.imag());
GTYPE d_beta=make_cuDoubleComplex(beta.real(), beta.imag());
// int dev = 0; //findCudaDevice(argc, (const char **)argv);
/* Initialize CUBLAS */
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate device memory for the matrices */
if (cudaMalloc(reinterpret_cast<void **>(&d_A), n2 * sizeof(d_A[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&d_B), n2 * sizeof(d_B[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate B)\n");
return EXIT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&d_C), n2 * sizeof(d_C[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate C)\n");
return EXIT_FAILURE;
}
/* Initialize the device matrices with the host matrices */
//status = cublasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1);
status = cublasSetMatrix(n, n, sizeof(h_A[0]), h_A, n, d_A, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
//status = cublasSetVector(n2, sizeof(h_B[0]), h_B, 1, d_B, 1);
status = cublasSetMatrix(n, n, sizeof(h_B[0]), h_B, n, d_B, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write B)\n");
return EXIT_FAILURE;
}
//status = cublasSetVector(n2, sizeof(h_C[0]), h_C, 1, d_C, 1);
status = cublasSetMatrix(n, n, sizeof(h_C[0]), h_C, n, d_C, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
/* Performs operation using cublas */
status = cublasZgemm(handle, CUBLAS_OP_T, CUBLAS_OP_T, n, n, n, &d_alpha, d_A,
n, d_B, n, &d_beta, d_C, n);
//status=cublasZgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
/* Allocate host memory for reading back the result from device memory */
CPPCTYPE* tmp_h_C = reinterpret_cast<CPPCTYPE *>(malloc(n2 * sizeof(h_C[0])));
if (tmp_h_C == 0) {
fprintf(stderr, "!!!! host memory allocation error (C)\n");
return EXIT_FAILURE;
}
/* Read the result back */
status = cublasGetMatrix(n, n, sizeof(GTYPE), d_C, n, tmp_h_C, n);
memcpy(h_C, tmp_h_C, sizeof(h_C[0])*n2);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_A) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_B) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (B)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_C) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
// C=alpha*A*x+beta*y
// in this wrapper, we assume beta is always zero!
int cublas_zgemv_wrapper(ITYPE n, CPPCTYPE alpha, const CPPCTYPE *h_A, const CPPCTYPE *h_x, CPPCTYPE beta, CPPCTYPE *h_y){
ITYPE n2 = n*n;
cublasStatus_t status;
cublasHandle_t handle;
GTYPE *d_A;
GTYPE *d_x;
GTYPE *d_y;
GTYPE d_alpha=make_cuDoubleComplex(alpha.real(), alpha.imag());
GTYPE d_beta=make_cuDoubleComplex(beta.real(), beta.imag());
// int dev = 0; //findCudaDevice(argc, (const char **)argv);
/* Initialize CUBLAS */
printf("simpleCUBLAS test running..\n");
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate device memory for the matrices */
if (cudaMalloc(reinterpret_cast<void **>(&d_A), n2 * sizeof(d_A[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&d_x), n * sizeof(d_x[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate x)\n");
return EXIT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&d_y), n * sizeof(d_y[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate y)\n");
return EXIT_FAILURE;
}
/* Initialize the device matrices with the host matrices */
//status = cublasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1);
status = cublasSetMatrix(n, n, sizeof(h_A[0]), h_A, n, d_A, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(n, sizeof(h_x[0]), h_x, 1, d_x, 1);
//status = cublasSetMatrix(n, n, sizeof(h_B[0]), h_B, n, d_B, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write x)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(n, sizeof(h_y[0]), h_y, 1, d_y, 1);
//status = cublasSetMatrix(n, n, sizeof(h_C[0]), h_C, n, d_C, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
/* Performs operation using cublas */
status = cublasZgemv(handle, CUBLAS_OP_T, n, n, &d_alpha, d_A, n,
d_x, 1, &d_beta, d_y, 1);
/*
cublasStatus_t cublasZgemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
const cuDoubleComplex *alpha,
const cuDoubleComplex *A, int lda,
const cuDoubleComplex *x, int incx,
const cuDoubleComplex *beta,
cuDoubleComplex *y, int incy)
*/
//status=cublasZgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
/* Allocate host memory for reading back the result from device memory */
CPPCTYPE* tmp_h_y = reinterpret_cast<CPPCTYPE *>(malloc(n * sizeof(h_y[0])));
if (tmp_h_y == 0) {
fprintf(stderr, "!!!! host memory allocation error (y)\n");
return EXIT_FAILURE;
}
/* Read the result back */
status = cublasGetVector(n, sizeof(GTYPE), d_y, 1, tmp_h_y, 1);
/*
cublasStatus_t cublasGetVector(int n, int elemSize, const void *x, int incx, void *y, int incy)
*/
memcpy(h_y, tmp_h_y, sizeof(h_y[0])*n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_A) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_x) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (x)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_y) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (y)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
// we assume state has already allocated at device
int cublas_zgemv_wrapper(ITYPE n, const CPPCTYPE *h_matrix, GTYPE *d_state){
ITYPE n2 = n*n;
cublasStatus_t status;
cublasHandle_t handle;
GTYPE *d_matrix;
GTYPE *d_y; // this will include the answer of the state.
GTYPE d_alpha = make_cuDoubleComplex(1.0, 0.0);
GTYPE d_beta = make_cuDoubleComplex(0.0, 0.0);
// int dev = 0;
/* Initialize CUBLAS */
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate device memory for the matrices */
if (cudaMalloc(reinterpret_cast<void **>(&d_matrix), n2 * sizeof(d_matrix[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&d_y), n * sizeof(d_y[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate y)\n");
return EXIT_FAILURE;
}
// cudaMemset(&d_y, 0, sizeof(d_y[0])*n);
/* Initialize the device matrices with the host matrices */
status = cublasSetMatrix(n, n, sizeof(h_matrix[0]), h_matrix, n, d_matrix, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
/* Performs operation using cublas */
status = cublasZgemv(handle, CUBLAS_OP_T, n, n, &d_alpha, d_matrix, n,
d_state, 1, &d_beta, d_y, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
cudaMemcpy(d_state, d_y, n * sizeof(GTYPE), cudaMemcpyDeviceToDevice);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_matrix) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_y) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (y)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
// we assume state and matrix has already allocated at device
int cublas_zgemv_wrapper(ITYPE n, const GTYPE *d_matrix, GTYPE *d_state){
// ITYPE n2 = n*n;
cublasStatus_t status;
cublasHandle_t handle;
GTYPE *d_y; // this will include the answer of the state.
GTYPE d_alpha = make_cuDoubleComplex(1.0, 0.0);
GTYPE d_beta = make_cuDoubleComplex(0.0, 0.0);
// int dev = 0;
/* Initialize CUBLAS */
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&d_y), n * sizeof(d_y[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate y)\n");
return EXIT_FAILURE;
}
// cudaMemset(&d_y, 0, sizeof(d_y[0])*n);
/* Performs operation using cublas */
status = cublasZgemv(handle, CUBLAS_OP_T, n, n, &d_alpha, d_matrix, n,
d_state, 1, &d_beta, d_y, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
cudaMemcpy(d_state, d_y, n * sizeof(GTYPE), cudaMemcpyDeviceToDevice);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_y) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (y)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
|
b42557bd2b1c93126fa4e87906804bbb43bb8718.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_constants.h>
#include "BC.h"
/**
* Calculates the next finite difference step given a
* grid point and step lengths.
*
* @param curr Pointer to the grid point that should be updated.
* @param width Number of grid points in the x dimension.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
* @returns Grid value of next timestep.
*/
template<int order>
__device__
float Stencil(const float* curr, int width, float xcfl, float ycfl) {
switch(order) {
case 2:
return curr[0] + xcfl * (curr[-1] + curr[1] - 2.f * curr[0]) +
ycfl * (curr[width] + curr[-width] - 2.f * curr[0]);
case 4:
return curr[0] + xcfl * (-curr[2] + 16.f * curr[1] - 30.f * curr[0]
+ 16.f * curr[-1] - curr[-2])
+ ycfl * (- curr[2 * width] + 16.f * curr[width]
- 30.f * curr[0] + 16.f * curr[-width]
- curr[-2 * width]);
case 8:
return curr[0] + xcfl * (-9.f * curr[4] + 128.f * curr[3]
- 1008.f * curr[2] + 8064.f * curr[1]
- 14350.f * curr[0] + 8064.f * curr[-1]
- 1008.f * curr[-2] + 128.f * curr[-3]
- 9.f * curr[-4])
+ ycfl * (-9.f * curr[4 * width]
+ 128.f * curr[3 * width]
- 1008.f * curr[2 * width]
+ 8064.f * curr[width]
- 14350.f * curr[0]
+ 8064.f * curr[-width]
- 1008.f * curr[-2 * width]
+ 128.f * curr[-3 * width]
- 9.f * curr[-4 * width]);
default:
printf("ERROR: Order %d not supported", order);
return CUDART_NAN_F;
}
}
/**
* Kernel to propagate finite difference grid from the current
* time point to the next.
*
* This kernel should be very simple and only use global memory
* and 1d threads and blocks.
*
* @param next[out] Next grid state.
* @param curr Current grid state.
* @param gx Number of grid points in the x dimension.
* @param nx Number of grid points in the x dimension to which the full
* stencil can be applied (ie the number of points that are at least
* order/2 grid points away from the boundary).
* @param ny Number of grid points in the y dimension to which th full
* stencil can be applied.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
*/
template<int order>
__global__
void gpuStencilGlobal(float* next, const float* __restrict__ curr, int gx, int nx, int ny,
float xcfl, float ycfl) {
// TODO
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
unsigned int border_dim = (gx - nx) / 2;
if (i < nx * ny) {
unsigned int x = (i / ny) + border_dim;
unsigned int y = (i % ny) + border_dim;
next[x*gx+y] = Stencil<order>(curr+x*gx+y, gx, xcfl, ycfl);
}
return;
}
/**
* Propagates the finite difference 2D heat diffusion solver
* using the gpuStencilGlobal kernel.
*
* Use this function to do necessary setup and propagate params.iters()
* number of times.
*
* @param curr_grid The current state of the grid.
* @param params Parameters for the finite difference computation.
* @returns Time required for computation.
*/
double gpuComputationGlobal(Grid& curr_grid, const simParams& params) {
boundary_conditions BC(params);
Grid next_grid(curr_grid);
// TODO: Declare variables/Compute parameters.
float xcfl = params.xcfl();
float ycfl = params.ycfl();
int nx = params.nx();
int ny = params.ny();
int gx = params.gx();
unsigned int block_size = 512;
int numBlocks = (nx*ny+block_size-1)/block_size;
event_pair timer;
start_timer(&timer);
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_);
// TODO: Apply stencil.
if (params.order()==2) {
hipLaunchKernelGGL(( gpuStencilGlobal<2>), dim3(numBlocks), dim3(block_size), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl);
} else if (params.order()==4) {
hipLaunchKernelGGL(( gpuStencilGlobal<4>), dim3(numBlocks), dim3(block_size), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl);
} else if (params.order()==8) {
hipLaunchKernelGGL(( gpuStencilGlobal<8>), dim3(numBlocks), dim3(block_size), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl);
}
Grid::swap(curr_grid, next_grid);
curr_grid.fromGPU();
if (i==0) curr_grid.saveStateToFile("0.csv");
if (i==1000) curr_grid.saveStateToFile("1000.csv");
if (i==2000) curr_grid.saveStateToFile("2000.csv");
}
check_launch("gpuStencilGlobal");
return stop_timer(&timer);
}
/**
* Kernel to propagate finite difference grid from the current
* time point to the next.
*
* This kernel should be optimized to compute finite difference updates
* in blocks of size (blockDim.y * numYPerStep) * blockDim.x. Each thread
* should calculate at most numYPerStep updates. It should still only use
* global memory.
*
* @param next[out] Next grid state.
* @param curr Current grid state.
* @param gx Number of grid points in the x dimension.
* @param nx Number of grid points in the x dimension to which the full
* stencil can be applied (ie the number of points that are at least
* order/2 grid points away from the boundary).
* @param ny Number of grid points in the y dimension to which th full
* stencil can be applied.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
*/
template<int order, int numYPerStep>
__global__
void gpuStencilBlock(float* next, const float* __restrict__ curr, int gx, int nx, int ny,
float xcfl, float ycfl) {
// TODO
unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
int border_dim = (gx - nx) / 2;
if (idx_x < nx) {
for (int y = idx_y * numYPerStep; y < (idx_y+1) * numYPerStep; y++) {
if (y < ny) {
int pos = (idx_x + border_dim) + (y + border_dim) * gx;
next[pos] = Stencil<order>(curr+pos, gx, xcfl, ycfl);
}
}
}
return;
}
/**
* Propagates the finite difference 2D heat diffusion solver
* using the gpuStencilBlock kernel.
*
* Use this function to do necessary setup and propagate params.iters()
* number of times.
*
* @param curr_grid The current state of the grid.
* @param params Parameters for the finite difference computation.
* @returns Time required for computation.
*/
double gpuComputationBlock(Grid& curr_grid, const simParams& params) {
boundary_conditions BC(params);
Grid next_grid(curr_grid);
// TODO: Declare variables/Compute parameters.
int nx = params.nx();
int ny = params.ny();
float xcfl = params.xcfl();
float ycfl = params.ycfl();
int gx = params.gx();
int block_size_x = 1024;
int block_size_y = 1;
const int numYPerStep = 16;
int numBlocks_x = nx / block_size_x;
int numBlocks_y = ny / (numYPerStep * block_size_y);
dim3 threads(block_size_x, block_size_y);
dim3 blocks(numBlocks_x, numBlocks_y);
event_pair timer;
start_timer(&timer);
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_);
// TODO: Apply stencil.
if (params.order()==2) {
hipLaunchKernelGGL(( gpuStencilBlock<2, numYPerStep>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl);
} else if (params.order()==4) {
hipLaunchKernelGGL(( gpuStencilBlock<4, numYPerStep>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl);
} else if (params.order()==8) {
hipLaunchKernelGGL(( gpuStencilBlock<8, numYPerStep>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl);
}
Grid::swap(curr_grid, next_grid);
}
check_launch("gpuStencilBlock");
return stop_timer(&timer);
}
/**
* Kernel to propagate finite difference grid from the current
* time point to the next.
*
* This kernel should be optimized to compute finite difference updates
* in blocks of size side * side using shared memory.
*
* @param next[out] Next grid state.
* @param curr Current grid state.
* @param gx Number of grid points in the x dimension.
* @param gy Number of grid points in the y dimension.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
*/
template<int side, int order>
__global__
void gpuStencilShared(float* next, const float* __restrict__ curr, int gx, int gy,
float xcfl, float ycfl) {
// TODO
extern __shared__ float block[];
int nx = gx - order;
int ny = gy - order;
int x_dim = blockDim.x - order;
int y_dim = side - order;
int idx_x = blockIdx.x * x_dim + threadIdx.x;
int idx_y = blockIdx.y * y_dim + threadIdx.y;
int block_i = threadIdx.x + threadIdx.y * side;
int idx = idx_x + idx_y * gx;
for (int i=0; i<side/order; i++) {
if ((idx_x < gx) && (idx_y + order*i < gy)) {
block[block_i+i*order*side] = curr[idx+i*order*gx];
}
}
__syncthreads();
for (int i=0; i<side/order; i++) {
if ((threadIdx.x < side-order/2) && (threadIdx.x >= order/2)
&& (idx_x < nx+order/2) && (idx_y+i*order < ny + order/2)
&& (threadIdx.y+i*order < side-order/2) && (threadIdx.y+i*order >= order/2)) {
next[idx+i*order*gx]=Stencil<order>(block+block_i+i*order*side, side, xcfl, ycfl);
}
}
return;
}
/**
* Propagates the finite difference 2D heat diffusion solver
* using the gpuStencilShared kernel.
*
* Use this function to do necessary setup and propagate params.iters()
* number of times.
*
* @param curr_grid The current state of the grid.
* @param params Parameters for the finite difference computation.
* @returns Time required for computation.
*/
template<int order>
double gpuComputationShared(Grid& curr_grid, const simParams& params) {
boundary_conditions BC(params);
Grid next_grid(curr_grid);
// TODO: Declare variables/Compute parameters.
int nx = params.nx();
int ny = params.ny();
float xcfl = params.xcfl();
float ycfl = params.ycfl();
int gx = params.gx();
int gy = params.gy();
const int side = 32;
int block_size_y = params.order();
int numBlocks_x = (nx + side - order - 1) / (side - order);
int numBlocks_y = (ny + side - order - 1) / (side - order);
dim3 threads(side, block_size_y);
dim3 blocks(numBlocks_x, numBlocks_y);
event_pair timer;
start_timer(&timer);
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_);
// TODO: Apply stencil.
if (params.order()==2) {
hipLaunchKernelGGL(( gpuStencilShared<side, 2>), dim3(blocks), dim3(threads), side*side*sizeof(float), 0, next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl);
}
else if (params.order()==4) {
hipLaunchKernelGGL(( gpuStencilShared<side, 4>), dim3(blocks), dim3(threads), side*side*sizeof(float), 0, next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl);
}
else if (params.order()==8) {
hipLaunchKernelGGL(( gpuStencilShared<side, 8>), dim3(blocks), dim3(threads), side*side*sizeof(float), 0, next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl);
}
Grid::swap(curr_grid, next_grid);
}
check_launch("gpuStencilShared");
return stop_timer(&timer);
}
| b42557bd2b1c93126fa4e87906804bbb43bb8718.cu | #include <math_constants.h>
#include "BC.h"
/**
* Calculates the next finite difference step given a
* grid point and step lengths.
*
* @param curr Pointer to the grid point that should be updated.
* @param width Number of grid points in the x dimension.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
* @returns Grid value of next timestep.
*/
template<int order>
__device__
float Stencil(const float* curr, int width, float xcfl, float ycfl) {
switch(order) {
case 2:
return curr[0] + xcfl * (curr[-1] + curr[1] - 2.f * curr[0]) +
ycfl * (curr[width] + curr[-width] - 2.f * curr[0]);
case 4:
return curr[0] + xcfl * (-curr[2] + 16.f * curr[1] - 30.f * curr[0]
+ 16.f * curr[-1] - curr[-2])
+ ycfl * (- curr[2 * width] + 16.f * curr[width]
- 30.f * curr[0] + 16.f * curr[-width]
- curr[-2 * width]);
case 8:
return curr[0] + xcfl * (-9.f * curr[4] + 128.f * curr[3]
- 1008.f * curr[2] + 8064.f * curr[1]
- 14350.f * curr[0] + 8064.f * curr[-1]
- 1008.f * curr[-2] + 128.f * curr[-3]
- 9.f * curr[-4])
+ ycfl * (-9.f * curr[4 * width]
+ 128.f * curr[3 * width]
- 1008.f * curr[2 * width]
+ 8064.f * curr[width]
- 14350.f * curr[0]
+ 8064.f * curr[-width]
- 1008.f * curr[-2 * width]
+ 128.f * curr[-3 * width]
- 9.f * curr[-4 * width]);
default:
printf("ERROR: Order %d not supported", order);
return CUDART_NAN_F;
}
}
/**
* Kernel to propagate finite difference grid from the current
* time point to the next.
*
* This kernel should be very simple and only use global memory
* and 1d threads and blocks.
*
* @param next[out] Next grid state.
* @param curr Current grid state.
* @param gx Number of grid points in the x dimension.
* @param nx Number of grid points in the x dimension to which the full
* stencil can be applied (ie the number of points that are at least
* order/2 grid points away from the boundary).
* @param ny Number of grid points in the y dimension to which th full
* stencil can be applied.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
*/
template<int order>
__global__
void gpuStencilGlobal(float* next, const float* __restrict__ curr, int gx, int nx, int ny,
float xcfl, float ycfl) {
// TODO
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
unsigned int border_dim = (gx - nx) / 2;
if (i < nx * ny) {
unsigned int x = (i / ny) + border_dim;
unsigned int y = (i % ny) + border_dim;
next[x*gx+y] = Stencil<order>(curr+x*gx+y, gx, xcfl, ycfl);
}
return;
}
/**
* Propagates the finite difference 2D heat diffusion solver
* using the gpuStencilGlobal kernel.
*
* Use this function to do necessary setup and propagate params.iters()
* number of times.
*
* @param curr_grid The current state of the grid.
* @param params Parameters for the finite difference computation.
* @returns Time required for computation.
*/
double gpuComputationGlobal(Grid& curr_grid, const simParams& params) {
boundary_conditions BC(params);
Grid next_grid(curr_grid);
// TODO: Declare variables/Compute parameters.
float xcfl = params.xcfl();
float ycfl = params.ycfl();
int nx = params.nx();
int ny = params.ny();
int gx = params.gx();
unsigned int block_size = 512;
int numBlocks = (nx*ny+block_size-1)/block_size;
event_pair timer;
start_timer(&timer);
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_);
// TODO: Apply stencil.
if (params.order()==2) {
gpuStencilGlobal<2><<<numBlocks, block_size>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl);
} else if (params.order()==4) {
gpuStencilGlobal<4><<<numBlocks, block_size>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl);
} else if (params.order()==8) {
gpuStencilGlobal<8><<<numBlocks, block_size>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl);
}
Grid::swap(curr_grid, next_grid);
curr_grid.fromGPU();
if (i==0) curr_grid.saveStateToFile("0.csv");
if (i==1000) curr_grid.saveStateToFile("1000.csv");
if (i==2000) curr_grid.saveStateToFile("2000.csv");
}
check_launch("gpuStencilGlobal");
return stop_timer(&timer);
}
/**
* Kernel to propagate finite difference grid from the current
* time point to the next.
*
* This kernel should be optimized to compute finite difference updates
* in blocks of size (blockDim.y * numYPerStep) * blockDim.x. Each thread
* should calculate at most numYPerStep updates. It should still only use
* global memory.
*
* @param next[out] Next grid state.
* @param curr Current grid state.
* @param gx Number of grid points in the x dimension.
* @param nx Number of grid points in the x dimension to which the full
* stencil can be applied (ie the number of points that are at least
* order/2 grid points away from the boundary).
* @param ny Number of grid points in the y dimension to which th full
* stencil can be applied.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
*/
template<int order, int numYPerStep>
__global__
void gpuStencilBlock(float* next, const float* __restrict__ curr, int gx, int nx, int ny,
float xcfl, float ycfl) {
// TODO
unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
int border_dim = (gx - nx) / 2;
if (idx_x < nx) {
for (int y = idx_y * numYPerStep; y < (idx_y+1) * numYPerStep; y++) {
if (y < ny) {
int pos = (idx_x + border_dim) + (y + border_dim) * gx;
next[pos] = Stencil<order>(curr+pos, gx, xcfl, ycfl);
}
}
}
return;
}
/**
* Propagates the finite difference 2D heat diffusion solver
* using the gpuStencilBlock kernel.
*
* Use this function to do necessary setup and propagate params.iters()
* number of times.
*
* @param curr_grid The current state of the grid.
* @param params Parameters for the finite difference computation.
* @returns Time required for computation.
*/
double gpuComputationBlock(Grid& curr_grid, const simParams& params) {
boundary_conditions BC(params);
Grid next_grid(curr_grid);
// TODO: Declare variables/Compute parameters.
int nx = params.nx();
int ny = params.ny();
float xcfl = params.xcfl();
float ycfl = params.ycfl();
int gx = params.gx();
int block_size_x = 1024;
int block_size_y = 1;
const int numYPerStep = 16;
int numBlocks_x = nx / block_size_x;
int numBlocks_y = ny / (numYPerStep * block_size_y);
dim3 threads(block_size_x, block_size_y);
dim3 blocks(numBlocks_x, numBlocks_y);
event_pair timer;
start_timer(&timer);
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_);
// TODO: Apply stencil.
if (params.order()==2) {
gpuStencilBlock<2, numYPerStep><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl);
} else if (params.order()==4) {
gpuStencilBlock<4, numYPerStep><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl);
} else if (params.order()==8) {
gpuStencilBlock<8, numYPerStep><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl);
}
Grid::swap(curr_grid, next_grid);
}
check_launch("gpuStencilBlock");
return stop_timer(&timer);
}
/**
* Kernel to propagate finite difference grid from the current
* time point to the next.
*
* This kernel should be optimized to compute finite difference updates
* in blocks of size side * side using shared memory.
*
* @param next[out] Next grid state.
* @param curr Current grid state.
* @param gx Number of grid points in the x dimension.
* @param gy Number of grid points in the y dimension.
* @param xcfl Courant number for x dimension.
* @param ycfl Courant number for y dimension.
*/
template<int side, int order>
__global__
void gpuStencilShared(float* next, const float* __restrict__ curr, int gx, int gy,
float xcfl, float ycfl) {
// TODO
extern __shared__ float block[];
int nx = gx - order;
int ny = gy - order;
int x_dim = blockDim.x - order;
int y_dim = side - order;
int idx_x = blockIdx.x * x_dim + threadIdx.x;
int idx_y = blockIdx.y * y_dim + threadIdx.y;
int block_i = threadIdx.x + threadIdx.y * side;
int idx = idx_x + idx_y * gx;
for (int i=0; i<side/order; i++) {
if ((idx_x < gx) && (idx_y + order*i < gy)) {
block[block_i+i*order*side] = curr[idx+i*order*gx];
}
}
__syncthreads();
for (int i=0; i<side/order; i++) {
if ((threadIdx.x < side-order/2) && (threadIdx.x >= order/2)
&& (idx_x < nx+order/2) && (idx_y+i*order < ny + order/2)
&& (threadIdx.y+i*order < side-order/2) && (threadIdx.y+i*order >= order/2)) {
next[idx+i*order*gx]=Stencil<order>(block+block_i+i*order*side, side, xcfl, ycfl);
}
}
return;
}
/**
* Propagates the finite difference 2D heat diffusion solver
* using the gpuStencilShared kernel.
*
* Use this function to do necessary setup and propagate params.iters()
* number of times.
*
* @param curr_grid The current state of the grid.
* @param params Parameters for the finite difference computation.
* @returns Time required for computation.
*/
template<int order>
double gpuComputationShared(Grid& curr_grid, const simParams& params) {
boundary_conditions BC(params);
Grid next_grid(curr_grid);
// TODO: Declare variables/Compute parameters.
int nx = params.nx();
int ny = params.ny();
float xcfl = params.xcfl();
float ycfl = params.ycfl();
int gx = params.gx();
int gy = params.gy();
const int side = 32;
int block_size_y = params.order();
int numBlocks_x = (nx + side - order - 1) / (side - order);
int numBlocks_y = (ny + side - order - 1) / (side - order);
dim3 threads(side, block_size_y);
dim3 blocks(numBlocks_x, numBlocks_y);
event_pair timer;
start_timer(&timer);
for(int i = 0; i < params.iters(); ++i) {
// update the values on the boundary only
BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_);
// TODO: Apply stencil.
if (params.order()==2) {
gpuStencilShared<side, 2><<<blocks, threads, side*side*sizeof(float)>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl);
}
else if (params.order()==4) {
gpuStencilShared<side, 4><<<blocks, threads, side*side*sizeof(float)>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl);
}
else if (params.order()==8) {
gpuStencilShared<side, 8><<<blocks, threads, side*side*sizeof(float)>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl);
}
Grid::swap(curr_grid, next_grid);
}
check_launch("gpuStencilShared");
return stop_timer(&timer);
}
|
79c854e70ba66b5025176a3ad268feee7b39f724.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
// RUN: -aux-triple x86_64-unknown-linux-gnu -fcuda-is-device -emit-llvm %s \
// RUN: -o - | FileCheck %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
// RUN: -aux-triple x86_64-pc-windows-msvc -fcuda-is-device -emit-llvm %s \
// RUN: -o - | FileCheck %s
#include "Inputs/cuda.h"
// CHECK-LABEL: @_Z16use_dispatch_ptrPi(
// CHECK: %[[PTR:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
// CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i32*
__global__ void use_dispatch_ptr(int* out) {
const int* dispatch_ptr = (const int*)__builtin_amdgcn_dispatch_ptr();
*out = *dispatch_ptr;
}
// CHECK-LABEL: @_Z12test_ds_fmaxf(
// CHECK: call contract float @llvm.amdgcn.ds.fmax.f32(float addrspace(3)* @_ZZ12test_ds_fmaxfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
__global__
void test_ds_fmax(float src) {
__shared__ float shared;
volatile float x = __builtin_amdgcn_ds_fmaxf(&shared, src, 0, 0, false);
}
// CHECK-LABEL: @_Z12test_ds_faddf(
// CHECK: call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
__global__ void test_ds_fadd(float src) {
__shared__ float shared;
volatile float x = __builtin_amdgcn_ds_faddf(&shared, src, 0, 0, false);
}
// CHECK-LABEL: @_Z12test_ds_fminfPf(float %src, float addrspace(1)* %shared.coerce
// CHECK: %shared = alloca float*, align 8, addrspace(5)
// CHECK: %shared.ascast = addrspacecast float* addrspace(5)* %shared to float**
// CHECK: %shared.addr = alloca float*, align 8, addrspace(5)
// CHECK: %shared.addr.ascast = addrspacecast float* addrspace(5)* %shared.addr to float**
// CHECK: %[[S0:.*]] = addrspacecast float addrspace(1)* %shared.coerce to float*
// CHECK: store float* %[[S0]], float** %shared.ascast, align 8
// CHECK: %shared1 = load float*, float** %shared.ascast, align 8
// CHECK: store float* %shared1, float** %shared.addr.ascast, align 8
// CHECK: %[[S1:.*]] = load float*, float** %shared.addr.ascast, align 8
// CHECK: %[[S2:.*]] = addrspacecast float* %[[S1]] to float addrspace(3)*
// CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[S2]]
__global__ void test_ds_fmin(float src, float *shared) {
volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false);
}
// CHECK: @_Z33test_ret_builtin_nondef_addrspace
// CHECK: %[[X:.*]] = alloca i8*, align 8, addrspace(5)
// CHECK: %[[XC:.*]] = addrspacecast i8* addrspace(5)* %[[X]] to i8**
// CHECK: %[[Y:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
// CHECK: %[[YASCAST:.*]] = addrspacecast i8 addrspace(4)* %[[Y]] to i8*
// CHECK: store i8* %[[YASCAST]], i8** %[[XC]], align 8
__device__ void test_ret_builtin_nondef_addrspace() {
void *x = __builtin_amdgcn_dispatch_ptr();
}
// CHECK-LABEL: @_Z6endpgmv(
// CHECK: call void @llvm.amdgcn.endpgm()
__global__ void endpgm() {
__builtin_amdgcn_endpgm();
}
// Check the 64 bit argument is correctly passed to the intrinsic without truncation or assertion.
// CHECK-LABEL: @_Z14test_uicmp_i64
// CHECK: store i64* %out1, i64** %out.addr.ascast
// CHECK-NEXT: store i64 %a, i64* %a.addr.ascast
// CHECK-NEXT: store i64 %b, i64* %b.addr.ascast
// CHECK-NEXT: %[[V0:.*]] = load i64, i64* %a.addr.ascast
// CHECK-NEXT: %[[V1:.*]] = load i64, i64* %b.addr.ascast
// CHECK-NEXT: %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %[[V0]], i64 %[[V1]], i32 35)
// CHECK-NEXT: %[[V3:.*]] = load i64*, i64** %out.addr.ascast
// CHECK-NEXT: store i64 %[[V2]], i64* %[[V3]]
// CHECK-NEXT: ret void
__global__ void test_uicmp_i64(unsigned long long *out, unsigned long long a, unsigned long long b)
{
*out = __builtin_amdgcn_uicmpl(a, b, 30+5);
}
// Check the 64 bit return value is correctly returned without truncation or assertion.
// CHECK-LABEL: @_Z14test_s_memtime
// CHECK: %[[V1:.*]] = call i64 @llvm.amdgcn.s.memtime()
// CHECK-NEXT: %[[PTR:.*]] = load i64*, i64** %out.addr.ascast
// CHECK-NEXT: store i64 %[[V1]], i64* %[[PTR]]
// CHECK-NEXT: ret void
__global__ void test_s_memtime(unsigned long long* out)
{
*out = __builtin_amdgcn_s_memtime();
}
// Check a generic pointer can be passed as a shared pointer and a generic pointer.
__device__ void func(float *x);
// CHECK: @_Z17test_ds_fmin_funcfPf
// CHECK: %[[SHARED:.*]] = alloca float*, align 8, addrspace(5)
// CHECK: %[[SHARED_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED]] to float**
// CHECK: %[[SRC_ADDR:.*]] = alloca float, align 4, addrspace(5)
// CHECK: %[[SRC_ADDR_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[SRC_ADDR]] to float*
// CHECK: %[[SHARED_ADDR:.*]] = alloca float*, align 8, addrspace(5)
// CHECK: %[[SHARED_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED_ADDR]] to float**
// CHECK: %[[X:.*]] = alloca float, align 4, addrspace(5)
// CHECK: %[[X_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[X]] to float*
// CHECK: %[[SHARED1:.*]] = load float*, float** %[[SHARED_ASCAST]], align 8
// CHECK: store float %src, float* %[[SRC_ADDR_ASCAST]], align 4
// CHECK: store float* %[[SHARED1]], float** %[[SHARED_ADDR_ASCAST]], align 8
// CHECK: %[[ARG0_PTR:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
// CHECK: %[[ARG0:.*]] = addrspacecast float* %[[ARG0_PTR]] to float addrspace(3)*
// CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[ARG0]]
// CHECK: %[[ARG0:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
// CHECK: call void @_Z4funcPf(float* %[[ARG0]]) #8
__global__ void test_ds_fmin_func(float src, float *__restrict shared) {
volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false);
func(shared);
}
// CHECK: @_Z14test_is_sharedPf(float addrspace(1)* %[[X_COERCE:.*]])
// CHECK: %[[X:.*]] = alloca float*, align 8, addrspace(5)
// CHECK: %[[X_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[X]] to float**
// CHECK: %[[X_ADDR:.*]] = alloca float*, align 8, addrspace(5)
// CHECK: %[[X_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[X_ADDR]] to float**
// CHECK: %[[X_FP:.*]] = addrspacecast float addrspace(1)* %[[X_COERCE]] to float*
// CHECK: store float* %[[X_FP]], float** %[[X_ASCAST]], align 8
// CHECK: %[[X1:.*]] = load float*, float** %[[X_ASCAST]], align 8
// CHECK: store float* %[[X1]], float** %[[X_ADDR_ASCAST]], align 8
// CHECK: %[[X_TMP:.*]] = load float*, float** %[[X_ADDR_ASCAST]], align 8
// CHECK: %[[X_ARG:.*]] = bitcast float* %[[X_TMP]] to i8*
// CHECK: call i1 @llvm.amdgcn.is.shared(i8* %[[X_ARG]])
__global__ void test_is_shared(float *x){
bool ret = __builtin_amdgcn_is_shared(x);
}
| 79c854e70ba66b5025176a3ad268feee7b39f724.cu | // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
// RUN: -aux-triple x86_64-unknown-linux-gnu -fcuda-is-device -emit-llvm %s \
// RUN: -o - | FileCheck %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
// RUN: -aux-triple x86_64-pc-windows-msvc -fcuda-is-device -emit-llvm %s \
// RUN: -o - | FileCheck %s
#include "Inputs/cuda.h"
// CHECK-LABEL: @_Z16use_dispatch_ptrPi(
// CHECK: %[[PTR:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
// CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i32*
__global__ void use_dispatch_ptr(int* out) {
const int* dispatch_ptr = (const int*)__builtin_amdgcn_dispatch_ptr();
*out = *dispatch_ptr;
}
// CHECK-LABEL: @_Z12test_ds_fmaxf(
// CHECK: call contract float @llvm.amdgcn.ds.fmax.f32(float addrspace(3)* @_ZZ12test_ds_fmaxfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
__global__
void test_ds_fmax(float src) {
__shared__ float shared;
volatile float x = __builtin_amdgcn_ds_fmaxf(&shared, src, 0, 0, false);
}
// CHECK-LABEL: @_Z12test_ds_faddf(
// CHECK: call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
__global__ void test_ds_fadd(float src) {
__shared__ float shared;
volatile float x = __builtin_amdgcn_ds_faddf(&shared, src, 0, 0, false);
}
// CHECK-LABEL: @_Z12test_ds_fminfPf(float %src, float addrspace(1)* %shared.coerce
// CHECK: %shared = alloca float*, align 8, addrspace(5)
// CHECK: %shared.ascast = addrspacecast float* addrspace(5)* %shared to float**
// CHECK: %shared.addr = alloca float*, align 8, addrspace(5)
// CHECK: %shared.addr.ascast = addrspacecast float* addrspace(5)* %shared.addr to float**
// CHECK: %[[S0:.*]] = addrspacecast float addrspace(1)* %shared.coerce to float*
// CHECK: store float* %[[S0]], float** %shared.ascast, align 8
// CHECK: %shared1 = load float*, float** %shared.ascast, align 8
// CHECK: store float* %shared1, float** %shared.addr.ascast, align 8
// CHECK: %[[S1:.*]] = load float*, float** %shared.addr.ascast, align 8
// CHECK: %[[S2:.*]] = addrspacecast float* %[[S1]] to float addrspace(3)*
// CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[S2]]
__global__ void test_ds_fmin(float src, float *shared) {
volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false);
}
// CHECK: @_Z33test_ret_builtin_nondef_addrspace
// CHECK: %[[X:.*]] = alloca i8*, align 8, addrspace(5)
// CHECK: %[[XC:.*]] = addrspacecast i8* addrspace(5)* %[[X]] to i8**
// CHECK: %[[Y:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
// CHECK: %[[YASCAST:.*]] = addrspacecast i8 addrspace(4)* %[[Y]] to i8*
// CHECK: store i8* %[[YASCAST]], i8** %[[XC]], align 8
__device__ void test_ret_builtin_nondef_addrspace() {
void *x = __builtin_amdgcn_dispatch_ptr();
}
// CHECK-LABEL: @_Z6endpgmv(
// CHECK: call void @llvm.amdgcn.endpgm()
__global__ void endpgm() {
__builtin_amdgcn_endpgm();
}
// Check the 64 bit argument is correctly passed to the intrinsic without truncation or assertion.
// CHECK-LABEL: @_Z14test_uicmp_i64
// CHECK: store i64* %out1, i64** %out.addr.ascast
// CHECK-NEXT: store i64 %a, i64* %a.addr.ascast
// CHECK-NEXT: store i64 %b, i64* %b.addr.ascast
// CHECK-NEXT: %[[V0:.*]] = load i64, i64* %a.addr.ascast
// CHECK-NEXT: %[[V1:.*]] = load i64, i64* %b.addr.ascast
// CHECK-NEXT: %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %[[V0]], i64 %[[V1]], i32 35)
// CHECK-NEXT: %[[V3:.*]] = load i64*, i64** %out.addr.ascast
// CHECK-NEXT: store i64 %[[V2]], i64* %[[V3]]
// CHECK-NEXT: ret void
__global__ void test_uicmp_i64(unsigned long long *out, unsigned long long a, unsigned long long b)
{
*out = __builtin_amdgcn_uicmpl(a, b, 30+5);
}
// Check the 64 bit return value is correctly returned without truncation or assertion.
// CHECK-LABEL: @_Z14test_s_memtime
// CHECK: %[[V1:.*]] = call i64 @llvm.amdgcn.s.memtime()
// CHECK-NEXT: %[[PTR:.*]] = load i64*, i64** %out.addr.ascast
// CHECK-NEXT: store i64 %[[V1]], i64* %[[PTR]]
// CHECK-NEXT: ret void
__global__ void test_s_memtime(unsigned long long* out)
{
*out = __builtin_amdgcn_s_memtime();
}
// Check a generic pointer can be passed as a shared pointer and a generic pointer.
__device__ void func(float *x);
// CHECK: @_Z17test_ds_fmin_funcfPf
// CHECK: %[[SHARED:.*]] = alloca float*, align 8, addrspace(5)
// CHECK: %[[SHARED_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED]] to float**
// CHECK: %[[SRC_ADDR:.*]] = alloca float, align 4, addrspace(5)
// CHECK: %[[SRC_ADDR_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[SRC_ADDR]] to float*
// CHECK: %[[SHARED_ADDR:.*]] = alloca float*, align 8, addrspace(5)
// CHECK: %[[SHARED_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED_ADDR]] to float**
// CHECK: %[[X:.*]] = alloca float, align 4, addrspace(5)
// CHECK: %[[X_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[X]] to float*
// CHECK: %[[SHARED1:.*]] = load float*, float** %[[SHARED_ASCAST]], align 8
// CHECK: store float %src, float* %[[SRC_ADDR_ASCAST]], align 4
// CHECK: store float* %[[SHARED1]], float** %[[SHARED_ADDR_ASCAST]], align 8
// CHECK: %[[ARG0_PTR:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
// CHECK: %[[ARG0:.*]] = addrspacecast float* %[[ARG0_PTR]] to float addrspace(3)*
// CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[ARG0]]
// CHECK: %[[ARG0:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
// CHECK: call void @_Z4funcPf(float* %[[ARG0]]) #8
__global__ void test_ds_fmin_func(float src, float *__restrict shared) {
volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false);
func(shared);
}
// CHECK: @_Z14test_is_sharedPf(float addrspace(1)* %[[X_COERCE:.*]])
// CHECK: %[[X:.*]] = alloca float*, align 8, addrspace(5)
// CHECK: %[[X_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[X]] to float**
// CHECK: %[[X_ADDR:.*]] = alloca float*, align 8, addrspace(5)
// CHECK: %[[X_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[X_ADDR]] to float**
// CHECK: %[[X_FP:.*]] = addrspacecast float addrspace(1)* %[[X_COERCE]] to float*
// CHECK: store float* %[[X_FP]], float** %[[X_ASCAST]], align 8
// CHECK: %[[X1:.*]] = load float*, float** %[[X_ASCAST]], align 8
// CHECK: store float* %[[X1]], float** %[[X_ADDR_ASCAST]], align 8
// CHECK: %[[X_TMP:.*]] = load float*, float** %[[X_ADDR_ASCAST]], align 8
// CHECK: %[[X_ARG:.*]] = bitcast float* %[[X_TMP]] to i8*
// CHECK: call i1 @llvm.amdgcn.is.shared(i8* %[[X_ARG]])
__global__ void test_is_shared(float *x){
bool ret = __builtin_amdgcn_is_shared(x);
}
|
1d6d76517520a39d3a7b3a06a9747533ac798841.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) { \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
void identityData(int* I, int nElem) {
for (int i = 0; i < nElem; i++) {
I[i] = i;
}
}
void initialData(float *ip, int size){
time_t t;
srand((unsigned int) time (&t));
for (int i = 0; i < size; i++){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void initialDataInt(int *ip, int size){
time_t t;
srand((unsigned int) time (&t));
for (int i = 0; i < size; i++){
ip[i] = floor((rand() & 0xFF) / 10.0f);
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx = 0; idx < N; idx++) {
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGpu(float *A, float *B, float *C, int* I, int* R, int strike, const int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
int value = A[i];
value = __shfl(value, (threadIdx.x + 1)%32, 32);
C[i] = B[i] + value;
}
}
int main(int argc, char**argv) {
// Configura tamanho dos vetores
int nElem = 100 * 1.e6;
int strike = 1;
// Alocando memoria na CPU
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
int *R, *I;
h_A = (float *) malloc(nBytes);
h_B = (float *) malloc(nBytes);
R = (int *) malloc(nBytes);
I = (int *) malloc(nBytes);
hostRef = (float *) malloc(nBytes);
gpuRef = (float *) malloc(nBytes);
initialData(h_A, nElem);
initialData(h_B, nElem);
initialDataInt(R, nElem);
identityData(I, nElem);
// Alocando memoria global (GPU)
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((float **)&d_A, nBytes));
CHECK(hipMalloc((float **)&d_B, nBytes));
CHECK(hipMalloc((float **)&d_C, nBytes));
// Transferindo dados da CPU pra GPU
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
// CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
// Invocando o Kernel na CPU
int iLen = 512;
dim3 block(iLen);
dim3 grid((nElem + block.x - 1) / block.x);
hipLaunchKernelGGL(( sumArraysOnGpu), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, I, R, strike, nElem);
// Copia os resultados do Kernel de volta pra CPU
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
// Libera memoria da GPU
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
// Libera memria da CPU
free(h_A);
free(h_B);
free(R);
free(I);
free(hostRef);
free(gpuRef);
hipDeviceReset();
return 0;
} | 1d6d76517520a39d3a7b3a06a9747533ac798841.cu | #include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <sys/time.h>
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
void identityData(int* I, int nElem) {
for (int i = 0; i < nElem; i++) {
I[i] = i;
}
}
void initialData(float *ip, int size){
time_t t;
srand((unsigned int) time (&t));
for (int i = 0; i < size; i++){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void initialDataInt(int *ip, int size){
time_t t;
srand((unsigned int) time (&t));
for (int i = 0; i < size; i++){
ip[i] = floor((rand() & 0xFF) / 10.0f);
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx = 0; idx < N; idx++) {
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGpu(float *A, float *B, float *C, int* I, int* R, int strike, const int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
int value = A[i];
value = __shfl(value, (threadIdx.x + 1)%32, 32);
C[i] = B[i] + value;
}
}
int main(int argc, char**argv) {
// Configura tamanho dos vetores
int nElem = 100 * 1.e6;
int strike = 1;
// Alocando memoria na CPU
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
int *R, *I;
h_A = (float *) malloc(nBytes);
h_B = (float *) malloc(nBytes);
R = (int *) malloc(nBytes);
I = (int *) malloc(nBytes);
hostRef = (float *) malloc(nBytes);
gpuRef = (float *) malloc(nBytes);
initialData(h_A, nElem);
initialData(h_B, nElem);
initialDataInt(R, nElem);
identityData(I, nElem);
// Alocando memoria global (GPU)
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float **)&d_A, nBytes));
CHECK(cudaMalloc((float **)&d_B, nBytes));
CHECK(cudaMalloc((float **)&d_C, nBytes));
// Transferindo dados da CPU pra GPU
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
// CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
// Invocando o Kernel na CPU
int iLen = 512;
dim3 block(iLen);
dim3 grid((nElem + block.x - 1) / block.x);
sumArraysOnGpu<<<grid, block>>>(d_A, d_B, d_C, I, R, strike, nElem);
// Copia os resultados do Kernel de volta pra CPU
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
// Libera memoria da GPU
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
// Libera memória da CPU
free(h_A);
free(h_B);
free(R);
free(I);
free(hostRef);
free(gpuRef);
cudaDeviceReset();
return 0;
} |
160aff21c39ceb7b061a232ab1c72f7e60860e9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__ void add(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void) {
std::cout << "CUDA Compiled version: " << __CUDACC_VER__ << std::endl;
int runtime_ver;
hipRuntimeGetVersion(&runtime_ver);
std::cout << "CUDA Runtime version: " << runtime_ver << std::endl;
int driver_ver;
hipDriverGetVersion(&driver_ver);
std::cout << "CUDA Driver version: " << driver_ver << std::endl;
int N = 1 << 20;
float *x, *y;
// Allocate Unified Memory -- accessible from CPU or GPU
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 160aff21c39ceb7b061a232ab1c72f7e60860e9d.cu | #include <iostream>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__ void add(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void) {
std::cout << "CUDA Compiled version: " << __CUDACC_VER__ << std::endl;
int runtime_ver;
cudaRuntimeGetVersion(&runtime_ver);
std::cout << "CUDA Runtime version: " << runtime_ver << std::endl;
int driver_ver;
cudaDriverGetVersion(&driver_ver);
std::cout << "CUDA Driver version: " << driver_ver << std::endl;
int N = 1 << 20;
float *x, *y;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
539cb99ee2312913cde380b727c51ee2e98aac6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Mark Gates
@author Azzam Haidar
@author Ichitaro Yamazaki
@generated from magmablas/zlacpy_sym_out.cu, normal z -> c, Thu Oct 8 23:05:33 2020
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to claset, clacpy, clag2z, clag2z, cgeadd.
*/
static __device__
void clacpy_sym_out_full_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to clacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_sym_out_lower_device(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x; // row
int iby = blockIdx.y*BLK_Y; // col
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n);
for (int jj=0; jj < n; jj++) {
perm[rows[2*jj+1]] = rows[2*jj+1];
}
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m ) {
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int jj=0; jj < BLK_Y; ++jj ) {
int j = rows[2*(iby+jj)+1];
if (ind <= j)
dB[j + ind*ldda] = MAGMA_C_CONJ( dA[ind + (iby+jj)*lddb] );
else
dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb];
}
}
else {
// either partial block-column or diagonal block
for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) {
int j = rows[2*(iby+jj)+1];
if (ind <= j)
dB[j + ind*ldda] = MAGMA_C_CONJ( dA[ind + (iby+jj)*lddb] );
else
dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb];
}
}
}
}
/******************************************************************************/
/*
Similar to clacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_sym_out_upper_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void clacpy_sym_out_full_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_out_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void clacpy_sym_out_lower_kernel(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_out_lower_device(m, n, rows, perm, dA, ldda, dB, lddb);
}
__global__
void clacpy_sym_out_upper_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_out_upper_device(m, n, dA, ldda, dB, lddb);
}
/***************************************************************************//**
Purpose
-------
CLACPY_SYM_OUT copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as CLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
rows INTEGER array, on GPU, dimension (2*n)
On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th
rows are swapped.
@param[in,out]
perm INTEGER array, on GPU, dimension (m)
On entry, it stores the permutation array such that i-th row will be
the original perm[i]-th row after the pivots are applied.
On exit, it is restored to be identity permutation.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, the matrix after the symmetric pivoting is applied.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dB COMPLEX array, dimension (LDDB,N)
The M-by-N matrix dB.
On entry, dB stores the columns after row pivoting is applied.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy
*******************************************************************************/
extern "C" void
magmablas_clacpy_sym_out(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magma_int_t *rows, magma_int_t *perm,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( clacpy_sym_out_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, rows, perm, dA, ldda, dB, lddb );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( clacpy_sym_out_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb );
}
else {
hipLaunchKernelGGL(( clacpy_sym_out_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb );
}
}
| 539cb99ee2312913cde380b727c51ee2e98aac6a.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Mark Gates
@author Azzam Haidar
@author Ichitaro Yamazaki
@generated from magmablas/zlacpy_sym_out.cu, normal z -> c, Thu Oct 8 23:05:33 2020
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to claset, clacpy, clag2z, clag2z, cgeadd.
*/
static __device__
void clacpy_sym_out_full_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to clacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_sym_out_lower_device(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x; // row
int iby = blockIdx.y*BLK_Y; // col
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n);
for (int jj=0; jj < n; jj++) {
perm[rows[2*jj+1]] = rows[2*jj+1];
}
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m ) {
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int jj=0; jj < BLK_Y; ++jj ) {
int j = rows[2*(iby+jj)+1];
if (ind <= j)
dB[j + ind*ldda] = MAGMA_C_CONJ( dA[ind + (iby+jj)*lddb] );
else
dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb];
}
}
else {
// either partial block-column or diagonal block
for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) {
int j = rows[2*(iby+jj)+1];
if (ind <= j)
dB[j + ind*ldda] = MAGMA_C_CONJ( dA[ind + (iby+jj)*lddb] );
else
dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb];
}
}
}
}
/******************************************************************************/
/*
Similar to clacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_sym_out_upper_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void clacpy_sym_out_full_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_out_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void clacpy_sym_out_lower_kernel(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_out_lower_device(m, n, rows, perm, dA, ldda, dB, lddb);
}
__global__
void clacpy_sym_out_upper_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_out_upper_device(m, n, dA, ldda, dB, lddb);
}
/***************************************************************************//**
Purpose
-------
CLACPY_SYM_OUT copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as CLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
rows INTEGER array, on GPU, dimension (2*n)
On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th
rows are swapped.
@param[in,out]
perm INTEGER array, on GPU, dimension (m)
On entry, it stores the permutation array such that i-th row will be
the original perm[i]-th row after the pivots are applied.
On exit, it is restored to be identity permutation.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, the matrix after the symmetric pivoting is applied.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dB COMPLEX array, dimension (LDDB,N)
The M-by-N matrix dB.
On entry, dB stores the columns after row pivoting is applied.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy
*******************************************************************************/
extern "C" void
magmablas_clacpy_sym_out(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magma_int_t *rows, magma_int_t *perm,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) );
if ( uplo == MagmaLower ) {
clacpy_sym_out_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, rows, perm, dA, ldda, dB, lddb );
}
else if ( uplo == MagmaUpper ) {
clacpy_sym_out_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb );
}
else {
clacpy_sym_out_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb );
}
}
|
1d448ba52c3473f52034b6de1aa34cb86010d0dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include "hardswish.h"
#include "utils.h"
namespace nvinfer1
{
HardSwishPlugin::HardSwishPlugin()
{
}
HardSwishPlugin::~HardSwishPlugin()
{
}
// create the plugin at runtime from a byte stream
HardSwishPlugin::HardSwishPlugin(const void* data, size_t length)
{
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mInputSize);
assert(d == a + length);
}
void HardSwishPlugin::serialize(void* buffer) const
{
char* d = static_cast<char*>(buffer), *a = d;
write(d, mInputSize);
assert(d == a + getSerializationSize());
}
size_t HardSwishPlugin::getSerializationSize() const
{
return sizeof(mInputSize);
}
int HardSwishPlugin::initialize()
{
return 0;
}
Dims HardSwishPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
assert(nbInputDims == 1);
assert(index == 0);
return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]);
}
// Set plugin namespace
void HardSwishPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* HardSwishPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType HardSwishPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool HardSwishPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool HardSwishPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void HardSwishPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
mInputSize = in[0].dims.d[0] * in[0].dims.d[1] * in[0].dims.d[2];
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void HardSwishPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void HardSwishPlugin::detachFromContext() {}
const char* HardSwishPlugin::getPluginType() const
{
return "HardSwishLayer_TRT";
}
const char* HardSwishPlugin::getPluginVersion() const
{
return "1";
}
void HardSwishPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* HardSwishPlugin::clone() const
{
HardSwishPlugin *p = new HardSwishPlugin();
p->setPluginNamespace(mPluginNamespace);
p->setInputSize(mInputSize);
return p;
}
__global__ void HardSwishKer(const float *in, float *out, int size) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= size)
return;
if (in[idx] >= 3.0f)
out[idx] = in[idx];
else if (in[idx] < -3.0f)
out[idx] = 0.0f;
else
out[idx] = in[idx] * (in[idx] + 3.0f) / 6.0f;
}
void HardSwishPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize) {
int numElem = batchSize * mInputSize;
hipLaunchKernelGGL(( HardSwishKer), dim3((numElem + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0,
inputs[0], output, numElem);
}
int HardSwishPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection HardSwishPluginCreator::mFC{};
std::vector<PluginField> HardSwishPluginCreator::mPluginAttributes;
HardSwishPluginCreator::HardSwishPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* HardSwishPluginCreator::getPluginName() const
{
return "HardSwishLayer_TRT";
}
const char* HardSwishPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* HardSwishPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* HardSwishPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
HardSwishPlugin* obj = new HardSwishPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* HardSwishPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
HardSwishPlugin* obj = new HardSwishPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| 1d448ba52c3473f52034b6de1aa34cb86010d0dc.cu | #include <assert.h>
#include "hardswish.h"
#include "utils.h"
namespace nvinfer1
{
HardSwishPlugin::HardSwishPlugin()
{
}
HardSwishPlugin::~HardSwishPlugin()
{
}
// create the plugin at runtime from a byte stream
HardSwishPlugin::HardSwishPlugin(const void* data, size_t length)
{
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mInputSize);
assert(d == a + length);
}
void HardSwishPlugin::serialize(void* buffer) const
{
char* d = static_cast<char*>(buffer), *a = d;
write(d, mInputSize);
assert(d == a + getSerializationSize());
}
size_t HardSwishPlugin::getSerializationSize() const
{
return sizeof(mInputSize);
}
int HardSwishPlugin::initialize()
{
return 0;
}
Dims HardSwishPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
assert(nbInputDims == 1);
assert(index == 0);
return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]);
}
// Set plugin namespace
void HardSwishPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* HardSwishPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType HardSwishPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool HardSwishPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool HardSwishPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void HardSwishPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
mInputSize = in[0].dims.d[0] * in[0].dims.d[1] * in[0].dims.d[2];
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void HardSwishPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void HardSwishPlugin::detachFromContext() {}
const char* HardSwishPlugin::getPluginType() const
{
return "HardSwishLayer_TRT";
}
const char* HardSwishPlugin::getPluginVersion() const
{
return "1";
}
void HardSwishPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* HardSwishPlugin::clone() const
{
HardSwishPlugin *p = new HardSwishPlugin();
p->setPluginNamespace(mPluginNamespace);
p->setInputSize(mInputSize);
return p;
}
__global__ void HardSwishKer(const float *in, float *out, int size) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= size)
return;
if (in[idx] >= 3.0f)
out[idx] = in[idx];
else if (in[idx] < -3.0f)
out[idx] = 0.0f;
else
out[idx] = in[idx] * (in[idx] + 3.0f) / 6.0f;
}
void HardSwishPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize) {
int numElem = batchSize * mInputSize;
HardSwishKer<<<(numElem + mThreadCount - 1) / mThreadCount, mThreadCount>>>
(inputs[0], output, numElem);
}
int HardSwishPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection HardSwishPluginCreator::mFC{};
std::vector<PluginField> HardSwishPluginCreator::mPluginAttributes;
HardSwishPluginCreator::HardSwishPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* HardSwishPluginCreator::getPluginName() const
{
return "HardSwishLayer_TRT";
}
const char* HardSwishPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* HardSwishPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* HardSwishPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
HardSwishPlugin* obj = new HardSwishPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* HardSwishPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
HardSwishPlugin* obj = new HardSwishPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
69137e49144b5b865032122301202ceec15b4827.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 69137e49144b5b865032122301202ceec15b4827.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
24b704ebfd02ca652ef51e487264a9a8111d4704.hip | // !!! This is a file automatically generated by hipify!!!
/* -------------------------------------------------------------------------------
* Tomocam Copyright (c) 2018
*
* The Regents of the University of California, through Lawrence Berkeley National
* Laboratory (subject to receipt of any required approvals from the U.S. Dept. of
* Energy). All rights reserved.
*
* If you have questions about your rights to use or distribute this software,
* please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov.
*
* NOTICE. This Software was developed under funding from the U.S. Department of
* Energy and the U.S. Government consequently retains certain rights. As such, the
* U.S. Government has been granted for itself and others acting on its behalf a
* paid-up, nonexclusive, irrevocable, worldwide license in the Software to
* reproduce, distribute copies to the public, prepare derivative works, and
* perform publicly and display publicly, and to permit other to do so.
*---------------------------------------------------------------------------------
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "dev_array.h"
#include "utils.cuh"
#include "potential_function.cuh"
namespace tomocam {
__global__ void tvd_update_kernel(dev_arrayf model, dev_arrayf objfn, float p, float sigma, float lambda) {
// thread ids
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
// global offsets
int I = blockDim.z * blockIdx.z;
int J = blockDim.y * blockIdx.y;
int K = blockDim.x * blockIdx.x;
// global ids
int x = I + i;
int y = J + j;
int z = K + k;
// last thread in the block
dim3_t dims = objfn.dims();
int imax = min(dims.x - I - 1, blockDim.z - 1);
int jmax = min(dims.y - J - 1, blockDim.y - 1);
int kmax = min(dims.z - K - 1, blockDim.x - 1);
if ((x < dims.x) && (y < dims.y) && (z < dims.z)) {
// size of the array
dim3_t dims = objfn.dims();
/* copy values into shared memory. */
__shared__ float s_val[NX + 2][NY + 2][NZ + 2];
// copy from global memory
s_val[i + 1][j + 1][k + 1] = model.at(x, y, z);
__syncthreads();
/* copy ghost cells, on all 6 faces */
// x = 0 face
if (i == 0)
s_val[i][j + 1][k + 1] = model.at(x - 1, y, z);
// x = Nx-1 face
if (i == imax)
s_val[i + 2][j + 1][k + 1] = model.at(x + 1, y, z);
__syncthreads();
if (j == 0)
s_val[i + 1][j][k + 1] = model.at(x, y - 1, z);
if (j == jmax)
s_val[i + 1][j + 2][k + 1] = model.at(x, y + 1, z);
__syncthreads();
if (k == 0)
s_val[i + 1][j + 1][k] = model.at(x, y, z - 1);
if (k == kmax)
s_val[i + 1][j + 1][k + 2] = model.at(x, y, z + 1);
__syncthreads();
/* copy ghost cells along 12 edges */
if (i == 0) {
if (j == 0)
s_val[i][j][k + 1] = model.at(x - 1, y - 1, z);
if (j == jmax)
s_val[i][j + 2][k + 1] = model.at(x - 1, y + 1, z);
}
if (i == imax) {
if (j == 0)
s_val[i + 2][j][k + 1] = model.at(x + 1, y - 1, z);
if (j == jmax)
s_val[i + 2][j + 2][k + 1] = model.at(x + 1, y + 1, z);
}
__syncthreads();
if (j == 0) {
if (k == 0)
s_val[i + 1][j][k] = model.at(x, y - 1, z - 1);
if (k == kmax)
s_val[i + 1][j][k + 2] = model.at(x, y - 1, z + 1);
}
if (j == jmax) {
if (k == 0)
s_val[i + 1][j + 2][k] = model.at(x, y + 1, z - 1);
if (k == kmax)
s_val[i + 1][j + 2][k + 2] = model.at(x, y + 1, z + 1);
}
__syncthreads();
// copy ghost-cells along y-direction
if (k == 0) {
if (i == 0)
s_val[i][j + 1][k] = model.at(x - 1, y, z - 1);
if (i == imax)
s_val[i + 2][j + 1][k] = model.at(x + 1, y, z - 1);
}
if (k == kmax) {
if (i == 0)
s_val[i][j + 1][k + 2] = model.at(x - 1, y, z + 1);
if (i == imax)
s_val[i + 2][j + 1][k + 2] = model.at(x + 1, y, z + 1);
}
__syncthreads();
/* copy ghost cells along 16 corners */
if (k == 0) {
if (j == 0) {
if (i == 0)
s_val[i][j][k] = model.at(x - 1, y - 1, z - 1);
if (i == imax) {
s_val[i + 2][j][k] = model.at(x + 1, y - 1, z - 1);
}
}
if (j == jmax) {
if (i == 0)
s_val[i][j + 2][k] = model.at(x - 1, y + 1, z - 1);
if (i == imax)
s_val[i + 2][j + 2][k] = model.at(x + 1, y + 1, z - 1);
}
}
if (k == kmax) {
if (j == 0) {
if (i == 0)
s_val[i][j][k + 2] = model.at(x - 1, y - 1, z + 1);
if (i == imax)
s_val[i + 2][j][k + 2] = model.at(x + 1, y - 1, z + 1);
}
if (j == jmax) {
if (i == 0)
s_val[i][j + 2][k + 2] = model.at(x - 1, y + 1, z + 1);
if (i == imax)
s_val[i + 2][j + 2][k + 2] = model.at(x + 1, y + 1, z + 1);
}
}
__syncthreads();
float v = s_val[i + 1][j + 1][k + 1];
float temp = 0.f;
for (int ix = 0; ix < 3; ix++)
for (int jy = 0; jy < 3; jy++)
for (int kz = 0; kz < 3; kz++) {
float delta = v - s_val[i+ix][j+jy][k+kz];
temp += weight(ix, jy, kz) * d_pot_func(delta, p, sigma);
}
objfn(x, y, z) += lambda * temp;
}
}
void add_total_var(dev_arrayf &model, dev_arrayf &objfn, float p, float sigma, float lambda, hipStream_t stream) {
// CUDA kernel parameters
Grid grid(objfn.dims());
// update gradients of objective function inplace
hipLaunchKernelGGL(( tvd_update_kernel), dim3(grid.blocks()), dim3(grid.threads()), 0, stream , model, objfn, p, sigma, lambda);
}
} // namespace tomocam
| 24b704ebfd02ca652ef51e487264a9a8111d4704.cu | /* -------------------------------------------------------------------------------
* Tomocam Copyright (c) 2018
*
* The Regents of the University of California, through Lawrence Berkeley National
* Laboratory (subject to receipt of any required approvals from the U.S. Dept. of
* Energy). All rights reserved.
*
* If you have questions about your rights to use or distribute this software,
* please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov.
*
* NOTICE. This Software was developed under funding from the U.S. Department of
* Energy and the U.S. Government consequently retains certain rights. As such, the
* U.S. Government has been granted for itself and others acting on its behalf a
* paid-up, nonexclusive, irrevocable, worldwide license in the Software to
* reproduce, distribute copies to the public, prepare derivative works, and
* perform publicly and display publicly, and to permit other to do so.
*---------------------------------------------------------------------------------
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include "dev_array.h"
#include "utils.cuh"
#include "potential_function.cuh"
namespace tomocam {
__global__ void tvd_update_kernel(dev_arrayf model, dev_arrayf objfn, float p, float sigma, float lambda) {
// thread ids
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
// global offsets
int I = blockDim.z * blockIdx.z;
int J = blockDim.y * blockIdx.y;
int K = blockDim.x * blockIdx.x;
// global ids
int x = I + i;
int y = J + j;
int z = K + k;
// last thread in the block
dim3_t dims = objfn.dims();
int imax = min(dims.x - I - 1, blockDim.z - 1);
int jmax = min(dims.y - J - 1, blockDim.y - 1);
int kmax = min(dims.z - K - 1, blockDim.x - 1);
if ((x < dims.x) && (y < dims.y) && (z < dims.z)) {
// size of the array
dim3_t dims = objfn.dims();
/* copy values into shared memory. */
__shared__ float s_val[NX + 2][NY + 2][NZ + 2];
// copy from global memory
s_val[i + 1][j + 1][k + 1] = model.at(x, y, z);
__syncthreads();
/* copy ghost cells, on all 6 faces */
// x = 0 face
if (i == 0)
s_val[i][j + 1][k + 1] = model.at(x - 1, y, z);
// x = Nx-1 face
if (i == imax)
s_val[i + 2][j + 1][k + 1] = model.at(x + 1, y, z);
__syncthreads();
if (j == 0)
s_val[i + 1][j][k + 1] = model.at(x, y - 1, z);
if (j == jmax)
s_val[i + 1][j + 2][k + 1] = model.at(x, y + 1, z);
__syncthreads();
if (k == 0)
s_val[i + 1][j + 1][k] = model.at(x, y, z - 1);
if (k == kmax)
s_val[i + 1][j + 1][k + 2] = model.at(x, y, z + 1);
__syncthreads();
/* copy ghost cells along 12 edges */
if (i == 0) {
if (j == 0)
s_val[i][j][k + 1] = model.at(x - 1, y - 1, z);
if (j == jmax)
s_val[i][j + 2][k + 1] = model.at(x - 1, y + 1, z);
}
if (i == imax) {
if (j == 0)
s_val[i + 2][j][k + 1] = model.at(x + 1, y - 1, z);
if (j == jmax)
s_val[i + 2][j + 2][k + 1] = model.at(x + 1, y + 1, z);
}
__syncthreads();
if (j == 0) {
if (k == 0)
s_val[i + 1][j][k] = model.at(x, y - 1, z - 1);
if (k == kmax)
s_val[i + 1][j][k + 2] = model.at(x, y - 1, z + 1);
}
if (j == jmax) {
if (k == 0)
s_val[i + 1][j + 2][k] = model.at(x, y + 1, z - 1);
if (k == kmax)
s_val[i + 1][j + 2][k + 2] = model.at(x, y + 1, z + 1);
}
__syncthreads();
// copy ghost-cells along y-direction
if (k == 0) {
if (i == 0)
s_val[i][j + 1][k] = model.at(x - 1, y, z - 1);
if (i == imax)
s_val[i + 2][j + 1][k] = model.at(x + 1, y, z - 1);
}
if (k == kmax) {
if (i == 0)
s_val[i][j + 1][k + 2] = model.at(x - 1, y, z + 1);
if (i == imax)
s_val[i + 2][j + 1][k + 2] = model.at(x + 1, y, z + 1);
}
__syncthreads();
/* copy ghost cells along 16 corners */
if (k == 0) {
if (j == 0) {
if (i == 0)
s_val[i][j][k] = model.at(x - 1, y - 1, z - 1);
if (i == imax) {
s_val[i + 2][j][k] = model.at(x + 1, y - 1, z - 1);
}
}
if (j == jmax) {
if (i == 0)
s_val[i][j + 2][k] = model.at(x - 1, y + 1, z - 1);
if (i == imax)
s_val[i + 2][j + 2][k] = model.at(x + 1, y + 1, z - 1);
}
}
if (k == kmax) {
if (j == 0) {
if (i == 0)
s_val[i][j][k + 2] = model.at(x - 1, y - 1, z + 1);
if (i == imax)
s_val[i + 2][j][k + 2] = model.at(x + 1, y - 1, z + 1);
}
if (j == jmax) {
if (i == 0)
s_val[i][j + 2][k + 2] = model.at(x - 1, y + 1, z + 1);
if (i == imax)
s_val[i + 2][j + 2][k + 2] = model.at(x + 1, y + 1, z + 1);
}
}
__syncthreads();
float v = s_val[i + 1][j + 1][k + 1];
float temp = 0.f;
for (int ix = 0; ix < 3; ix++)
for (int jy = 0; jy < 3; jy++)
for (int kz = 0; kz < 3; kz++) {
float delta = v - s_val[i+ix][j+jy][k+kz];
temp += weight(ix, jy, kz) * d_pot_func(delta, p, sigma);
}
objfn(x, y, z) += lambda * temp;
}
}
void add_total_var(dev_arrayf &model, dev_arrayf &objfn, float p, float sigma, float lambda, cudaStream_t stream) {
// CUDA kernel parameters
Grid grid(objfn.dims());
// update gradients of objective function inplace
tvd_update_kernel<<< grid.blocks(), grid.threads(), 0, stream >>>(model, objfn, p, sigma, lambda);
}
} // namespace tomocam
|
6ce9d8ca9fd72bbbd9f1054ff961d1455b9e7803.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/SlitGeometryFillerGPU.cu
* \brief Defines GPU functions and kernels used by mpcd::SlitGeometryFillerGPU
*/
#include "ParticleDataUtilities.h"
#include "SlitGeometryFillerGPU.cuh"
#include "hoomd/RNGIdentifiers.h"
#include "hoomd/RandomNumbers.h"
namespace mpcd
{
namespace gpu
{
namespace kernel
{
/*!
* \param d_pos Particle positions
* \param d_vel Particle velocities
* \param d_tag Particle tags
* \param geom Slit geometry to fill
* \param z_min Lower bound to lower fill region
* \param z_max Upper bound to upper fill region
* \param box Local simulation box
* \param type Type of fill particles
* \param N_lo Number of particles to fill in lower region
* \param N_hi Number of particles to fill in upper region
* \param first_tag First tag of filled particles
* \param first_idx First (local) particle index of filled particles
* \param vel_factor Scale factor for uniform normal velocities consistent with particle mass /
* temperature \param timestep Current timestep \param seed User seed to PRNG for drawing velocities
*
* \b Implementation:
*
* Using one thread per particle (in both slabs), the thread is assigned to fill either the lower
* or upper region. This defines a local cuboid of volume to fill. The thread index is translated
* into a particle tag and local particle index. A random position is drawn within the cuboid. A
* random velocity is drawn consistent with the speed of the moving wall.
*/
__global__ void slit_draw_particles(Scalar4* d_pos,
Scalar4* d_vel,
unsigned int* d_tag,
const mpcd::detail::SlitGeometry geom,
const Scalar z_min,
const Scalar z_max,
const BoxDim box,
const unsigned int type,
const unsigned int N_lo,
const unsigned int N_tot,
const unsigned int first_tag,
const unsigned int first_idx,
const Scalar vel_factor,
const uint64_t timestep,
const uint16_t seed)
{
// one thread per particle
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_tot)
return;
// determine the fill region based on current index
signed char sign = (idx >= N_lo) - (idx < N_lo);
Scalar3 lo = box.getLo();
Scalar3 hi = box.getHi();
if (sign == -1) // bottom
{
lo.z = z_min;
hi.z = -geom.getH();
}
else // top
{
lo.z = geom.getH();
hi.z = z_max;
}
// particle tag and index
const unsigned int tag = first_tag + idx;
const unsigned int pidx = first_idx + idx;
d_tag[pidx] = tag;
// initialize random number generator for positions and velocity
hoomd::RandomGenerator rng(
hoomd::Seed(hoomd::RNGIdentifier::SlitGeometryFiller, timestep, seed),
hoomd::Counter(tag));
d_pos[pidx] = make_scalar4(hoomd::UniformDistribution<Scalar>(lo.x, hi.x)(rng),
hoomd::UniformDistribution<Scalar>(lo.y, hi.y)(rng),
hoomd::UniformDistribution<Scalar>(lo.z, hi.z)(rng),
__int_as_scalar(type));
hoomd::NormalDistribution<Scalar> gen(vel_factor, 0.0);
Scalar3 vel;
gen(vel.x, vel.y, rng);
vel.z = gen(rng);
// TODO: should these be given zero net-momentum contribution (relative to the frame of
// reference?)
d_vel[pidx] = make_scalar4(vel.x + sign * geom.getVelocity(),
vel.y,
vel.z,
__int_as_scalar(mpcd::detail::NO_CELL));
}
} // end namespace kernel
/*!
* \param d_pos Particle positions
* \param d_vel Particle velocities
* \param d_tag Particle tags
* \param geom Slit geometry to fill
* \param z_min Lower bound to lower fill region
* \param z_max Upper bound to upper fill region
* \param box Local simulation box
* \param mass Mass of fill particles
* \param type Type of fill particles
* \param N_lo Number of particles to fill in lower region
* \param N_hi Number of particles to fill in upper region
* \param first_tag First tag of filled particles
* \param first_idx First (local) particle index of filled particles
* \param kT Temperature for fill particles
* \param timestep Current timestep
* \param seed User seed to PRNG for drawing velocities
* \param block_size Number of threads per block
*
* \sa kernel::slit_draw_particles
*/
hipError_t slit_draw_particles(Scalar4* d_pos,
Scalar4* d_vel,
unsigned int* d_tag,
const mpcd::detail::SlitGeometry& geom,
const Scalar z_min,
const Scalar z_max,
const BoxDim& box,
const Scalar mass,
const unsigned int type,
const unsigned int N_lo,
const unsigned int N_hi,
const unsigned int first_tag,
const unsigned int first_idx,
const Scalar kT,
const uint64_t timestep,
const uint16_t seed,
const unsigned int block_size)
{
const unsigned int N_tot = N_lo + N_hi;
if (N_tot == 0)
return hipSuccess;
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)kernel::slit_draw_particles);
max_block_size = attr.maxThreadsPerBlock;
// precompute factor for rescaling the velocities since it is the same for all particles
const Scalar vel_factor = fast::sqrt(kT / mass);
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_tot / run_block_size + 1);
hipLaunchKernelGGL(( kernel::slit_draw_particles), dim3(grid), dim3(run_block_size), 0, 0, d_pos,
d_vel,
d_tag,
geom,
z_min,
z_max,
box,
type,
N_lo,
N_tot,
first_tag,
first_idx,
vel_factor,
timestep,
seed);
return hipSuccess;
}
} // end namespace gpu
} // end namespace mpcd
| 6ce9d8ca9fd72bbbd9f1054ff961d1455b9e7803.cu | // Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/SlitGeometryFillerGPU.cu
* \brief Defines GPU functions and kernels used by mpcd::SlitGeometryFillerGPU
*/
#include "ParticleDataUtilities.h"
#include "SlitGeometryFillerGPU.cuh"
#include "hoomd/RNGIdentifiers.h"
#include "hoomd/RandomNumbers.h"
namespace mpcd
{
namespace gpu
{
namespace kernel
{
/*!
* \param d_pos Particle positions
* \param d_vel Particle velocities
* \param d_tag Particle tags
* \param geom Slit geometry to fill
* \param z_min Lower bound to lower fill region
* \param z_max Upper bound to upper fill region
* \param box Local simulation box
* \param type Type of fill particles
* \param N_lo Number of particles to fill in lower region
* \param N_hi Number of particles to fill in upper region
* \param first_tag First tag of filled particles
* \param first_idx First (local) particle index of filled particles
* \param vel_factor Scale factor for uniform normal velocities consistent with particle mass /
* temperature \param timestep Current timestep \param seed User seed to PRNG for drawing velocities
*
* \b Implementation:
*
* Using one thread per particle (in both slabs), the thread is assigned to fill either the lower
* or upper region. This defines a local cuboid of volume to fill. The thread index is translated
* into a particle tag and local particle index. A random position is drawn within the cuboid. A
* random velocity is drawn consistent with the speed of the moving wall.
*/
__global__ void slit_draw_particles(Scalar4* d_pos,
Scalar4* d_vel,
unsigned int* d_tag,
const mpcd::detail::SlitGeometry geom,
const Scalar z_min,
const Scalar z_max,
const BoxDim box,
const unsigned int type,
const unsigned int N_lo,
const unsigned int N_tot,
const unsigned int first_tag,
const unsigned int first_idx,
const Scalar vel_factor,
const uint64_t timestep,
const uint16_t seed)
{
// one thread per particle
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_tot)
return;
// determine the fill region based on current index
signed char sign = (idx >= N_lo) - (idx < N_lo);
Scalar3 lo = box.getLo();
Scalar3 hi = box.getHi();
if (sign == -1) // bottom
{
lo.z = z_min;
hi.z = -geom.getH();
}
else // top
{
lo.z = geom.getH();
hi.z = z_max;
}
// particle tag and index
const unsigned int tag = first_tag + idx;
const unsigned int pidx = first_idx + idx;
d_tag[pidx] = tag;
// initialize random number generator for positions and velocity
hoomd::RandomGenerator rng(
hoomd::Seed(hoomd::RNGIdentifier::SlitGeometryFiller, timestep, seed),
hoomd::Counter(tag));
d_pos[pidx] = make_scalar4(hoomd::UniformDistribution<Scalar>(lo.x, hi.x)(rng),
hoomd::UniformDistribution<Scalar>(lo.y, hi.y)(rng),
hoomd::UniformDistribution<Scalar>(lo.z, hi.z)(rng),
__int_as_scalar(type));
hoomd::NormalDistribution<Scalar> gen(vel_factor, 0.0);
Scalar3 vel;
gen(vel.x, vel.y, rng);
vel.z = gen(rng);
// TODO: should these be given zero net-momentum contribution (relative to the frame of
// reference?)
d_vel[pidx] = make_scalar4(vel.x + sign * geom.getVelocity(),
vel.y,
vel.z,
__int_as_scalar(mpcd::detail::NO_CELL));
}
} // end namespace kernel
/*!
* \param d_pos Particle positions
* \param d_vel Particle velocities
* \param d_tag Particle tags
* \param geom Slit geometry to fill
* \param z_min Lower bound to lower fill region
* \param z_max Upper bound to upper fill region
* \param box Local simulation box
* \param mass Mass of fill particles
* \param type Type of fill particles
* \param N_lo Number of particles to fill in lower region
* \param N_hi Number of particles to fill in upper region
* \param first_tag First tag of filled particles
* \param first_idx First (local) particle index of filled particles
* \param kT Temperature for fill particles
* \param timestep Current timestep
* \param seed User seed to PRNG for drawing velocities
* \param block_size Number of threads per block
*
* \sa kernel::slit_draw_particles
*/
cudaError_t slit_draw_particles(Scalar4* d_pos,
Scalar4* d_vel,
unsigned int* d_tag,
const mpcd::detail::SlitGeometry& geom,
const Scalar z_min,
const Scalar z_max,
const BoxDim& box,
const Scalar mass,
const unsigned int type,
const unsigned int N_lo,
const unsigned int N_hi,
const unsigned int first_tag,
const unsigned int first_idx,
const Scalar kT,
const uint64_t timestep,
const uint16_t seed,
const unsigned int block_size)
{
const unsigned int N_tot = N_lo + N_hi;
if (N_tot == 0)
return cudaSuccess;
unsigned int max_block_size;
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)kernel::slit_draw_particles);
max_block_size = attr.maxThreadsPerBlock;
// precompute factor for rescaling the velocities since it is the same for all particles
const Scalar vel_factor = fast::sqrt(kT / mass);
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_tot / run_block_size + 1);
kernel::slit_draw_particles<<<grid, run_block_size>>>(d_pos,
d_vel,
d_tag,
geom,
z_min,
z_max,
box,
type,
N_lo,
N_tot,
first_tag,
first_idx,
vel_factor,
timestep,
seed);
return cudaSuccess;
}
} // end namespace gpu
} // end namespace mpcd
|
76ff3f8d24e5c16006f9d42e04e1e9de6c780e35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "VagueMath.h"
#include "IndiceTools_GPU.h"
using namespace gpu;
// Attention : Choix du nom est impotant!
// VagueDevice.cu et non Vague.cu
// Dans ce dernier cas, problme de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diffrents!
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void vague(uchar4* ptrDevPixels,uint w, uint h,float t);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void vague(uchar4* ptrDevPixels, uint w, uint h, float t)
{
VagueMath vagueMath = VagueMath(w, h);
const int WH=w*h;
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
int i; // in [0,h[
int j; // in [0,w[
int s = TID; // in [0,...
while (s < WH)
{
IndiceTools::toIJ(s, w, &i, &j); // update (i, j)
vagueMath.colorIJ(&ptrDevPixels[s],i, j, t); // update ptrDevPixels[s]
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 76ff3f8d24e5c16006f9d42e04e1e9de6c780e35.cu | #include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "VagueMath.h"
#include "IndiceTools_GPU.h"
using namespace gpu;
// Attention : Choix du nom est impotant!
// VagueDevice.cu et non Vague.cu
// Dans ce dernier cas, probl�me de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diff�rents!
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void vague(uchar4* ptrDevPixels,uint w, uint h,float t);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void vague(uchar4* ptrDevPixels, uint w, uint h, float t)
{
VagueMath vagueMath = VagueMath(w, h);
const int WH=w*h;
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
int i; // in [0,h[
int j; // in [0,w[
int s = TID; // in [0,...
while (s < WH)
{
IndiceTools::toIJ(s, w, &i, &j); // update (i, j)
vagueMath.colorIJ(&ptrDevPixels[s],i, j, t); // update ptrDevPixels[s]
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
cd6042c38e8aad256e25b92e56f8a5d9e50eeace.hip | // !!! This is a file automatically generated by hipify!!!
//System header
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//CUDA header
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void CUParaSgemv(const float *a, float *b, float *c,unsigned int size)//valid
{
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
//int i = threadIdx.x;
if(size<=id)
return;
float temp = 0.0;
for(unsigned int k = 0;k<size; k++)
{
if(id < size)
//Column access - coalesced access
temp += a[k*size+id] * b[k];
//Row access
//temp += a[id*size+k] * b[k];
}
c[id] += temp;
}
//__global__ void transpose(float *odata, float *idata, int width, int height)
//{
// __shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
//
// unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
// unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
// if((xIndex < width) && (yIndex < height))
// {
// unsigned int index_in = yIndex * width + xIndex;
// block[threadIdx.y][threadIdx.x] = idata[index_in];
// }
//
// __syncthreads();
//
// xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
// yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
// if((xIndex < height) && (yIndex < width))
// {
// unsigned int index_out = yIndex * height + xIndex;
// odata[index_out] = block[threadIdx.x][threadIdx.y];
// }
//
// float temp = 0.0;
// unsigned int idx = blockIdx.x * BLOCK_DIM + threadIdx.x;
//
// if(idx<height){
// for(int i=0;i<width;i++)
// {
// temp = idata[idx*width+i];
// odata[i*]
// }
// }
//
//}
//void easyTranspose(float o_a[],float i_a[],int size)
//{
// int col = size*size;
// for(int i = 0;i<col;i++)
// {
// for(int j=0;j<col;j++)
// o_a[j*col+i]=i_a[i*col+j];
// }
//}
void simple_sgemv(float *A, float *B, float *C,unsigned int size) //valid
{
unsigned int i,j;
for(i = 0;i < size; i++)
{
float prod = 0;
for(j = 0;j < size; j++)
{
prod += A[i * size + j] * B[j];
}
C[i] = prod;
}
}
int main()
{
//# of nodes(equations)
//each node has 3-direction displacement
unsigned int Nodes = 100; //threashold 3500-old/4500-new
unsigned int ARRAY_SIZE = 3*Nodes; //Vector Scale;
unsigned int ARRAY_SIZE2 = ARRAY_SIZE*ARRAY_SIZE; //Matrix Scale;
//CPU timing
clock_t start, finish; //CPU_sgemv time elapse
clock_t malloc_start,malloc_fin; //CPU malloc time
clock_t init_start,init_fin; //CPU inital time
clock_t trans_start,trans_fin; //CPU time on transpose Matrix
float duration;
float malloc_duration;
float init_duration;
float trans_duration;
//GPU timing
float cal_time;
float cudamalloctime;
float cudamemcpytime;
float cudamemcpyout;
hipEvent_t d_start, d_stop;
hipEvent_t cuda_mallocstart,cuda_mallocfin;
hipEvent_t cuda_memcpystart,cuda_memcpyfin;
hipEvent_t cuda_memcpyout_start,cuda_memcpyout_fin;
//Host
float *h_a;
float *h_b;
float *h_c;
float *h_cpu;
float *h_check;
float *h_atr;
//Device
float *d_a;
//float *d_atr;
float *d_b;
float *d_c;
//cuda status record
hipError_t cudaStatus;
printf("The nodes number is: %d\n",Nodes);
printf("The total equations number is : %d\n",ARRAY_SIZE);
printf("Total bytes will be transfered\n");
printf("\tMatrix A: %d MB\n",ARRAY_SIZE2*4/1000000);
printf("\tVector b: %d KB\n",ARRAY_SIZE*4/1000);
printf("Pre-processing in CPU...\n");
/******Malloc on CPU*******/
//start the clock
malloc_start = clock();
//generate the input array on the host
h_a=(float*)malloc(sizeof(float)*ARRAY_SIZE2);
h_b=(float*)malloc(sizeof(float)*ARRAY_SIZE);
h_c=(float*)malloc(sizeof(float)*ARRAY_SIZE);
h_cpu=(float*)malloc(sizeof(float)*ARRAY_SIZE);
h_atr = (float*)malloc(sizeof(float)*ARRAY_SIZE2);
//h_check=(float*)malloc(sizeof(float)*ARRAY_SIZE2);
//finish time
malloc_fin = clock();
//Processing Time in CPU
malloc_duration = (float)(malloc_fin - malloc_start) / CLOCKS_PER_SEC;
printf( "\n%f seconds passed in mallocation\n", malloc_duration);
printf("\n");
/****************************/
/******Initalization on CPU*******/
//use h_ = float(i) to standard the value for the initialization
init_start = clock();
srand((int)time(0));
//inital the h_a, h_b
for(unsigned int i = 0;i<ARRAY_SIZE2;i++){
h_a[i] = float(i);//rand();//float(i);//rand();
}
for(unsigned int i = 0;i<ARRAY_SIZE;i++){
h_b[i] = float(i);//rand();//float(i);//rand();
}
for(unsigned int i = 0;i<ARRAY_SIZE;i++){
h_c[i] = float(0);
}
for(unsigned int i = 0;i<ARRAY_SIZE;i++){
h_cpu[i] = float(0);
}
//time on transpose
trans_start = clock();
for(unsigned int i = 0;i<ARRAY_SIZE;i++){
//h_atr[i] = float(0);
for(unsigned int j=0;j<ARRAY_SIZE;j++)
h_atr[j*ARRAY_SIZE+i]=h_a[i*ARRAY_SIZE+j];
}
trans_fin = clock();
trans_duration = (float)(trans_fin - trans_start) / CLOCKS_PER_SEC;
printf( "\n%f seconds passed in transpose..\n", trans_duration);
init_fin = clock();
//Processing Time on CPU
init_duration = (float)(init_fin - init_start) / CLOCKS_PER_SEC;
printf( "\n%f seconds passed in initalizaton\n", init_duration);
printf("\n");
printf("******************End Pre-processing.**************\n");
/**********************************/
/**************CPU sgemv calculation time********************/
start = clock();
//kernel function on CPU
simple_sgemv(h_a,h_b,h_cpu,ARRAY_SIZE);
finish = clock();
//Processing Time in CPU
duration = (float)(finish - start) ;// CLOCKS_PER_SEC;
printf( "\n%f milliseconds passed in CPU_sgemv\n", duration);
printf("\n");
/**********************************/
//system("pause");
////Print Result
//printf("\nThe result Matrix C is:\n");
//for(unsigned int i=0;i<ARRAY_SIZE;i++){
// printf("%f\n", h_cpu[i]);
//}
printf("Pre-processing in GPU...\n");
/**************GPU malloc********************/
hipEventCreate(&cuda_mallocstart);
hipEventCreate(&cuda_mallocfin);
hipEventRecord(cuda_mallocstart,0); //mark event
//allocate GPU memory
//Malloc the memory for matrix and check
cudaStatus = hipMalloc((void**)&d_a, sizeof(float)*ARRAY_SIZE2);
cudaStatus = hipGetLastError();
if(cudaStatus != hipSuccess)
{
printf("\nCuda Error(hipMalloc Matrix):%s\n",hipGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
//Malloc the memory for vector and check
cudaStatus = hipMalloc((void**)&d_b, sizeof(float)*ARRAY_SIZE);
cudaStatus = hipGetLastError();
if(cudaStatus != hipSuccess)
{
printf("\nCuda Error(hipMalloc Vector):%s\n",hipGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
//Malloc the memory for storing result and check
cudaStatus = hipMalloc((void**)&d_c, sizeof(float)*ARRAY_SIZE);
cudaStatus = hipGetLastError();
if(cudaStatus != hipSuccess)
{
printf("\nCuda Error(hipMalloc result):%s\n",hipGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
hipDeviceSynchronize();
hipEventRecord(cuda_mallocfin,0);
hipEventSynchronize(cuda_mallocfin);
hipEventElapsedTime(&cudamalloctime,cuda_mallocstart,cuda_mallocfin);
printf( "\n%f milliseconds passed in GPU malloc\n", cudamalloctime );
/*********************************************/
/**************GPU Memcpy time********************/
//Timer
hipEventCreate(&cuda_memcpystart);
hipEventCreate(&cuda_memcpyfin);
hipEventRecord(cuda_memcpystart,0); //mark event
//transfer the array from Host to device(CPU->GPU) and check the cudaStatus
//Column access
cudaStatus = hipMemcpy(d_a, h_atr, sizeof(float)*ARRAY_SIZE2, hipMemcpyHostToDevice);
//Row access
//cudaStatus = hipMemcpy(d_a, h_a, sizeof(float)*ARRAY_SIZE2, hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess)
{
printf("\nCuda Error(hipMemcpy matrix):%s\n",hipGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
cudaStatus = hipMemcpy(d_b, h_b, sizeof(float)*ARRAY_SIZE, hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess)
{
printf("\nCuda Error(hipMemcpy vector):%s\n",hipGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
cudaStatus = hipMemcpy(d_c, h_c, sizeof(float)*ARRAY_SIZE, hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess)
{
printf("\nCuda Error(hipMemcpy result):%s\n",hipGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
hipDeviceSynchronize();
hipEventRecord(cuda_memcpyfin,0);
hipEventSynchronize(cuda_memcpyfin);
hipEventElapsedTime(&cudamemcpytime,cuda_memcpystart,cuda_memcpyfin);
printf( "\n%f milliseconds passed in cuda memory copy\n", cudamemcpytime );
/*********************************************/
printf("*****************End Pre-processing in GPU********************\n");
/**************GPU Caculation time********************/
printf("\n*****************A transpose before the calculation********************\n");
//easyTranspose(h_atr,h_a,ARRAY_SIZE);
////A transpose Before the calculation...
//cudaStatus = hipMalloc((void**)&d_atr, sizeof(float)*ARRAY_SIZE2);
////Get malloc error
//cudaStatus = hipGetLastError();
//if(cudaStatus != hipSuccess)
//{
// printf("\nCuda Error(hipMalloc Matrix):%s\n",hipGetErrorString(cudaStatus));
// system("pause\n");
// return 0;
//}
////Memory copy
//cudaStatus = hipMemcpy(d_atr, h_a, sizeof(float)*ARRAY_SIZE2, hipMemcpyHostToDevice);
//if(cudaStatus != hipSuccess)
//{
// printf("\nCuda Error(hipMemcpy transpose matrix):%s\n",hipGetErrorString(cudaStatus));
// system("pause\n");
// return 0;
//}
////Run transpose kernel
//transpose<<<1, 128>>>(d_atr,d_a,ARRAY_SIZE,ARRAY_SIZE);//addKernel
////transfer the array from Device to Host(GPU->CPU) & check
//cudaStatus = hipMemcpy(h_check, d_atr, sizeof(float)*ARRAY_SIZE2, hipMemcpyDeviceToHost);
//cudaStatus = hipGetLastError();
//if(cudaStatus != hipSuccess)
//{
// printf("\nCuda Error:%s\n",hipGetErrorString(cudaStatus));
// system("pause\n");
// return 0;
//}
////print out the transpose result
//printf("\After transpose...\n");
//for(long i = 0; i<ARRAY_SIZE2;i++){
// printf("%f\n", h_atr[i]);
//}
////print out the original Matrix A
//printf("\nBefore transpose...\n");
//for(long i = 0; i<ARRAY_SIZE2;i++){
// printf("%f\n", h_a[i]);
//}
printf("\n*****************End of transpose********************\n");
//Run kernel function calculate the matrix-vector multiplication
printf("\n\nRunning Kernel...\n\n");
//Timer
hipEventCreate(&d_start);
hipEventCreate(&d_stop);
hipEventRecord(d_start,0); //mark event
//Check
//hipError_t cudaState = hipSuccess;
cudaStatus = hipSuccess;
//MVKernel
int nblocks= ARRAY_SIZE/512+1;
hipLaunchKernelGGL(( CUParaSgemv), dim3(nblocks), dim3(512), 0, 0, d_a,d_b,d_c,ARRAY_SIZE);//addKernel
//addKernel<<<1, ARRAY_SIZE>>>(d_a,d_b,d_c,ARRAY_SIZE);
hipDeviceSynchronize();
hipEventRecord(d_stop,0);
hipEventSynchronize(d_stop);
hipEventElapsedTime(&cal_time,d_start,d_stop);
printf( "\n%f milliseconds passed in GPU_CUParaSgemv\n", cal_time );
cudaStatus = hipGetLastError();
if(cudaStatus != hipSuccess)
{
printf("\nCuda Error(GPU calculation):%s\n",hipGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
//printf( "\n%f milliseconds passed in calculation\n", time );
/*********************************************/
printf("\n*********Copy Data to Host*********\n");
/**************GPU Memory copy out time********************/
//Timer
hipEventCreate(&cuda_memcpyout_start);
hipEventCreate(&cuda_memcpyout_fin);
hipEventRecord(cuda_memcpyout_start,0); //mark event
//transfer the array from Device to Host(GPU->CPU) & check
cudaStatus = hipMemcpy(h_c, d_c, sizeof(float)*ARRAY_SIZE, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(cuda_memcpyout_fin,0);
hipEventSynchronize(cuda_memcpyout_fin);
hipEventElapsedTime(&cudamemcpyout,cuda_memcpyout_start,cuda_memcpyout_fin);
printf( "\n%f milliseconds passed in cuda memory copy out\n", cudamemcpyout );
cudaStatus = hipGetLastError();
if(cudaStatus != hipSuccess)
{
printf("\nCuda Error:%s\n",hipGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
/***********************************************/
//system("pause");
/**************Print out the result********************/
////print out the result array
//for(long i = 0; i<ARRAY_SIZE;i++){
// printf("%f\n", h_c[i]);
//}
/***********************************************/
//free GPU memory allocation
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
//free Host memory allocation
free(h_atr);
free(h_a);
free(h_b);
free(h_c);
system("pause");
return 0;
}
| cd6042c38e8aad256e25b92e56f8a5d9e50eeace.cu | //System header
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//CUDA header
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void CUParaSgemv(const float *a, float *b, float *c,unsigned int size)//valid
{
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
//int i = threadIdx.x;
if(size<=id)
return;
float temp = 0.0;
for(unsigned int k = 0;k<size; k++)
{
if(id < size)
//Column access - coalesced access
temp += a[k*size+id] * b[k];
//Row access
//temp += a[id*size+k] * b[k];
}
c[id] += temp;
}
//__global__ void transpose(float *odata, float *idata, int width, int height)
//{
// __shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
//
// unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
// unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
// if((xIndex < width) && (yIndex < height))
// {
// unsigned int index_in = yIndex * width + xIndex;
// block[threadIdx.y][threadIdx.x] = idata[index_in];
// }
//
// __syncthreads();
//
// xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
// yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
// if((xIndex < height) && (yIndex < width))
// {
// unsigned int index_out = yIndex * height + xIndex;
// odata[index_out] = block[threadIdx.x][threadIdx.y];
// }
//
// float temp = 0.0;
// unsigned int idx = blockIdx.x * BLOCK_DIM + threadIdx.x;
//
// if(idx<height){
// for(int i=0;i<width;i++)
// {
// temp = idata[idx*width+i];
// odata[i*]
// }
// }
//
//}
//void easyTranspose(float o_a[],float i_a[],int size)
//{
// int col = size*size;
// for(int i = 0;i<col;i++)
// {
// for(int j=0;j<col;j++)
// o_a[j*col+i]=i_a[i*col+j];
// }
//}
void simple_sgemv(float *A, float *B, float *C,unsigned int size) //valid
{
unsigned int i,j;
for(i = 0;i < size; i++)
{
float prod = 0;
for(j = 0;j < size; j++)
{
prod += A[i * size + j] * B[j];
}
C[i] = prod;
}
}
int main()
{
//# of nodes(equations)
//each node has 3-direction displacement
unsigned int Nodes = 100; //threashold 3500-old/4500-new
unsigned int ARRAY_SIZE = 3*Nodes; //Vector Scale;
unsigned int ARRAY_SIZE2 = ARRAY_SIZE*ARRAY_SIZE; //Matrix Scale;
//CPU timing
clock_t start, finish; //CPU_sgemv time elapse
clock_t malloc_start,malloc_fin; //CPU malloc time
clock_t init_start,init_fin; //CPU inital time
clock_t trans_start,trans_fin; //CPU time on transpose Matrix
float duration;
float malloc_duration;
float init_duration;
float trans_duration;
//GPU timing
float cal_time;
float cudamalloctime;
float cudamemcpytime;
float cudamemcpyout;
cudaEvent_t d_start, d_stop;
cudaEvent_t cuda_mallocstart,cuda_mallocfin;
cudaEvent_t cuda_memcpystart,cuda_memcpyfin;
cudaEvent_t cuda_memcpyout_start,cuda_memcpyout_fin;
//Host
float *h_a;
float *h_b;
float *h_c;
float *h_cpu;
float *h_check;
float *h_atr;
//Device
float *d_a;
//float *d_atr;
float *d_b;
float *d_c;
//cuda status record
cudaError_t cudaStatus;
printf("The nodes number is: %d\n",Nodes);
printf("The total equations number is : %d\n",ARRAY_SIZE);
printf("Total bytes will be transfered\n");
printf("\tMatrix A: %d MB\n",ARRAY_SIZE2*4/1000000);
printf("\tVector b: %d KB\n",ARRAY_SIZE*4/1000);
printf("Pre-processing in CPU...\n");
/******Malloc on CPU*******/
//start the clock
malloc_start = clock();
//generate the input array on the host
h_a=(float*)malloc(sizeof(float)*ARRAY_SIZE2);
h_b=(float*)malloc(sizeof(float)*ARRAY_SIZE);
h_c=(float*)malloc(sizeof(float)*ARRAY_SIZE);
h_cpu=(float*)malloc(sizeof(float)*ARRAY_SIZE);
h_atr = (float*)malloc(sizeof(float)*ARRAY_SIZE2);
//h_check=(float*)malloc(sizeof(float)*ARRAY_SIZE2);
//finish time
malloc_fin = clock();
//Processing Time in CPU
malloc_duration = (float)(malloc_fin - malloc_start) / CLOCKS_PER_SEC;
printf( "\n%f seconds passed in mallocation\n", malloc_duration);
printf("\n");
/****************************/
/******Initalization on CPU*******/
//use h_ = float(i) to standard the value for the initialization
init_start = clock();
srand((int)time(0));
//inital the h_a, h_b
for(unsigned int i = 0;i<ARRAY_SIZE2;i++){
h_a[i] = float(i);//rand();//float(i);//rand();
}
for(unsigned int i = 0;i<ARRAY_SIZE;i++){
h_b[i] = float(i);//rand();//float(i);//rand();
}
for(unsigned int i = 0;i<ARRAY_SIZE;i++){
h_c[i] = float(0);
}
for(unsigned int i = 0;i<ARRAY_SIZE;i++){
h_cpu[i] = float(0);
}
//time on transpose
trans_start = clock();
for(unsigned int i = 0;i<ARRAY_SIZE;i++){
//h_atr[i] = float(0);
for(unsigned int j=0;j<ARRAY_SIZE;j++)
h_atr[j*ARRAY_SIZE+i]=h_a[i*ARRAY_SIZE+j];
}
trans_fin = clock();
trans_duration = (float)(trans_fin - trans_start) / CLOCKS_PER_SEC;
printf( "\n%f seconds passed in transpose..\n", trans_duration);
init_fin = clock();
//Processing Time on CPU
init_duration = (float)(init_fin - init_start) / CLOCKS_PER_SEC;
printf( "\n%f seconds passed in initalizaton\n", init_duration);
printf("\n");
printf("******************End Pre-processing.**************\n");
/**********************************/
/**************CPU sgemv calculation time********************/
start = clock();
//kernel function on CPU
simple_sgemv(h_a,h_b,h_cpu,ARRAY_SIZE);
finish = clock();
//Processing Time in CPU
duration = (float)(finish - start) ;// CLOCKS_PER_SEC;
printf( "\n%f milliseconds passed in CPU_sgemv\n", duration);
printf("\n");
/**********************************/
//system("pause");
////Print Result
//printf("\nThe result Matrix C is:\n");
//for(unsigned int i=0;i<ARRAY_SIZE;i++){
// printf("%f\n", h_cpu[i]);
//}
printf("Pre-processing in GPU...\n");
/**************GPU malloc********************/
cudaEventCreate(&cuda_mallocstart);
cudaEventCreate(&cuda_mallocfin);
cudaEventRecord(cuda_mallocstart,0); //mark event
//allocate GPU memory
//Malloc the memory for matrix and check
cudaStatus = cudaMalloc((void**)&d_a, sizeof(float)*ARRAY_SIZE2);
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(cudaMalloc Matrix):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
//Malloc the memory for vector and check
cudaStatus = cudaMalloc((void**)&d_b, sizeof(float)*ARRAY_SIZE);
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(cudaMalloc Vector):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
//Malloc the memory for storing result and check
cudaStatus = cudaMalloc((void**)&d_c, sizeof(float)*ARRAY_SIZE);
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(cudaMalloc result):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
cudaThreadSynchronize();
cudaEventRecord(cuda_mallocfin,0);
cudaEventSynchronize(cuda_mallocfin);
cudaEventElapsedTime(&cudamalloctime,cuda_mallocstart,cuda_mallocfin);
printf( "\n%f milliseconds passed in GPU malloc\n", cudamalloctime );
/*********************************************/
/**************GPU Memcpy time********************/
//Timer
cudaEventCreate(&cuda_memcpystart);
cudaEventCreate(&cuda_memcpyfin);
cudaEventRecord(cuda_memcpystart,0); //mark event
//transfer the array from Host to device(CPU->GPU) and check the cudaStatus
//Column access
cudaStatus = cudaMemcpy(d_a, h_atr, sizeof(float)*ARRAY_SIZE2, cudaMemcpyHostToDevice);
//Row access
//cudaStatus = cudaMemcpy(d_a, h_a, sizeof(float)*ARRAY_SIZE2, cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(cudaMemcpy matrix):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
cudaStatus = cudaMemcpy(d_b, h_b, sizeof(float)*ARRAY_SIZE, cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(cudaMemcpy vector):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
cudaStatus = cudaMemcpy(d_c, h_c, sizeof(float)*ARRAY_SIZE, cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(cudaMemcpy result):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
cudaThreadSynchronize();
cudaEventRecord(cuda_memcpyfin,0);
cudaEventSynchronize(cuda_memcpyfin);
cudaEventElapsedTime(&cudamemcpytime,cuda_memcpystart,cuda_memcpyfin);
printf( "\n%f milliseconds passed in cuda memory copy\n", cudamemcpytime );
/*********************************************/
printf("*****************End Pre-processing in GPU********************\n");
/**************GPU Caculation time********************/
printf("\n*****************A transpose before the calculation********************\n");
//easyTranspose(h_atr,h_a,ARRAY_SIZE);
////A transpose Before the calculation...
//cudaStatus = cudaMalloc((void**)&d_atr, sizeof(float)*ARRAY_SIZE2);
////Get malloc error
//cudaStatus = cudaGetLastError();
//if(cudaStatus != cudaSuccess)
//{
// printf("\nCuda Error(cudaMalloc Matrix):%s\n",cudaGetErrorString(cudaStatus));
// system("pause\n");
// return 0;
//}
////Memory copy
//cudaStatus = cudaMemcpy(d_atr, h_a, sizeof(float)*ARRAY_SIZE2, cudaMemcpyHostToDevice);
//if(cudaStatus != cudaSuccess)
//{
// printf("\nCuda Error(cudaMemcpy transpose matrix):%s\n",cudaGetErrorString(cudaStatus));
// system("pause\n");
// return 0;
//}
////Run transpose kernel
//transpose<<<1, 128>>>(d_atr,d_a,ARRAY_SIZE,ARRAY_SIZE);//addKernel
////transfer the array from Device to Host(GPU->CPU) & check
//cudaStatus = cudaMemcpy(h_check, d_atr, sizeof(float)*ARRAY_SIZE2, cudaMemcpyDeviceToHost);
//cudaStatus = cudaGetLastError();
//if(cudaStatus != cudaSuccess)
//{
// printf("\nCuda Error:%s\n",cudaGetErrorString(cudaStatus));
// system("pause\n");
// return 0;
//}
////print out the transpose result
//printf("\After transpose...\n");
//for(long i = 0; i<ARRAY_SIZE2;i++){
// printf("%f\n", h_atr[i]);
//}
////print out the original Matrix A
//printf("\nBefore transpose...\n");
//for(long i = 0; i<ARRAY_SIZE2;i++){
// printf("%f\n", h_a[i]);
//}
printf("\n*****************End of transpose********************\n");
//Run kernel function calculate the matrix-vector multiplication
printf("\n\nRunning Kernel...\n\n");
//Timer
cudaEventCreate(&d_start);
cudaEventCreate(&d_stop);
cudaEventRecord(d_start,0); //mark event
//Check
//cudaError_t cudaState = cudaSuccess;
cudaStatus = cudaSuccess;
//MVKernel
int nblocks= ARRAY_SIZE/512+1;
CUParaSgemv<<<nblocks, 512>>>(d_a,d_b,d_c,ARRAY_SIZE);//addKernel
//addKernel<<<1, ARRAY_SIZE>>>(d_a,d_b,d_c,ARRAY_SIZE);
cudaThreadSynchronize();
cudaEventRecord(d_stop,0);
cudaEventSynchronize(d_stop);
cudaEventElapsedTime(&cal_time,d_start,d_stop);
printf( "\n%f milliseconds passed in GPU_CUParaSgemv\n", cal_time );
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(GPU calculation):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
//printf( "\n%f milliseconds passed in calculation\n", time );
/*********************************************/
printf("\n*********Copy Data to Host*********\n");
/**************GPU Memory copy out time********************/
//Timer
cudaEventCreate(&cuda_memcpyout_start);
cudaEventCreate(&cuda_memcpyout_fin);
cudaEventRecord(cuda_memcpyout_start,0); //mark event
//transfer the array from Device to Host(GPU->CPU) & check
cudaStatus = cudaMemcpy(h_c, d_c, sizeof(float)*ARRAY_SIZE, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(cuda_memcpyout_fin,0);
cudaEventSynchronize(cuda_memcpyout_fin);
cudaEventElapsedTime(&cudamemcpyout,cuda_memcpyout_start,cuda_memcpyout_fin);
printf( "\n%f milliseconds passed in cuda memory copy out\n", cudamemcpyout );
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error:%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
/***********************************************/
//system("pause");
/**************Print out the result********************/
////print out the result array
//for(long i = 0; i<ARRAY_SIZE;i++){
// printf("%f\n", h_c[i]);
//}
/***********************************************/
//free GPU memory allocation
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//free Host memory allocation
free(h_atr);
free(h_a);
free(h_b);
free(h_c);
system("pause");
return 0;
}
|
16f6dd489a150c61b7e293993db73699489551ad.hip | // !!! This is a file automatically generated by hipify!!!
// Matrix Multiplication in CUDA
#include <stdio.h>
//#include <string.h>
//#include <assert.h>
//#include <stdlib.h>
#include <hip/hip_runtime.h>
// includes, project
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
#define WIDTH 64
#define THREAD_BLOCK_WIDTH 16
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
// FILL HERE: define constant variable
// MatrixMul kernel
/**
* CUDA Kernel Device code
*
* Computes the matrix multiplication of A and B into C. The 3 matrices have the same
* number of elements WIDTH*WIDTH.
*/
// FILL HERE: translate C-version matrixMul to CUDA-version kernel code
__global__ void
MatrixMul(float* A, float* B, float* C, unsigned long long* runtime)
{
// TODO : Kernel Function
// C = A * B
// -->
//Traverse along the blocks first+traverse along the rows+ traverse along the individual row
int g_tid = (blockIdx.y * blockDim.x * blockDim.y * gridDim.x) + (threadIdx.y * blockDim.x * gridDim.x) + ((blockIdx.x * blockDim.x) + threadIdx.x);
int row = g_tid/WIDTH;
int col = g_tid%WIDTH;
unsigned long long start_time= clock64();
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int row1 = tid/THREAD_BLOCK_WIDTH;
int col1 = tid%THREAD_BLOCK_WIDTH;
// Declaration of two 16x16 matrices in the shared memory
__shared__ float sA[THREAD_BLOCK_WIDTH][THREAD_BLOCK_WIDTH];
__shared__ float sB[THREAD_BLOCK_WIDTH][THREAD_BLOCK_WIDTH];
float cl =0.0;
for (int t=0; t<(WIDTH/THREAD_BLOCK_WIDTH); t++ )
{
sA[row1][col1]= A[(row * WIDTH) + threadIdx.x + t*THREAD_BLOCK_WIDTH];
sB[row1][col1]= B[(threadIdx.y * WIDTH) + col + t*THREAD_BLOCK_WIDTH*WIDTH] ;
__syncthreads();
for(int k = 0; k < THREAD_BLOCK_WIDTH; k++)
{
cl += sA[row1][k] * sB[k][col1];
}
}
// <--
C[g_tid]= cl;
unsigned long long stop_time= clock64();
runtime[tid] = (unsigned long long) (stop_time - start_time);
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the matrix size to be used, and compute its size
int size = WIDTH*WIDTH*sizeof(float);
printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH);
// Allocate the host input matrix h_A
float *h_A = (float *)malloc(size);
// Allocate the host input matrix h_B
float *h_B = (float *)malloc(size);
// Allocate the host input matrix h_C
float *h_C = (float *)malloc(size);
// Allocate the host matrix for compute check
float *reference = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL || reference == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input matrices
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
h_A[i*WIDTH + j] = 0.01f;
h_B[i*WIDTH + j] = 1.0f;
}
}
memset(h_C, 0, size);
memset(reference, 0, size);
// compute the matrix multiplication on the CPU for comparison
computeGold(reference, h_A, h_B, WIDTH, WIDTH, WIDTH);
// Allocate device input matrices
// TODO : Leave/Remove the given hipMalloc code properly
// -->
float* d_A = NULL;
err = hipMalloc((void**)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device matrix A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float* d_B = NULL;
err = hipMalloc((void**)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device matrix B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// <--
// Allocate the device output matrix
float* d_C = NULL;
err = hipMalloc((void**)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device matrix C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input matrix A and B in host memory to the device input matrices in
// device memory
// TODO : Add proper mem copy APIs according to the memory that matrix A and B will be stored
// -->
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A,size,hipMemcpyHostToDevice);// FILL HERE
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size,hipMemcpyHostToDevice);// FILL HERE
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// <--
// TODO : Clock Measurements
// Add code to return clock cycles from kernel
// -->
#ifdef TM
unsigned long long* d_runtime;
int r_size = WIDTH*WIDTH*sizeof(unsigned long long);
unsigned long long* runtime = (unsigned long long*)malloc(r_size);
memset(runtime, 0, r_size);
hipMalloc((void**)&d_runtime, r_size);
#endif
// <--
// TODO : Kernel Invocation
// Assign as many threads as the size of matrix in a thread block and
// invoke the kernel function.
// -->
int blocksPerGrid = 16 ;// FILL HERE
int threadsPerBlock = 256 ;// FILL HERiE
dim3 grid (4,4,1);
// ToDo do it 2 dimensionaly
dim3 block (16,16,1);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( MatrixMul), dim3(grid),dim3(block) , 0, 0, d_A, d_B, d_C, d_runtime);
// <--
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch matrixMul kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Copy the device result matrix in device memory to the host result matrix
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Verify that the result matrix is correct
bool res = 1;
for (int i = 0; i < WIDTH*WIDTH; i++)
{
float diff = fabs(reference[i] - h_C[i]);
if(diff > 0.001f)
{
res = 0;
break;
}
}
printf("Test %s\n", (res == 1) ? "PASSED" : "FAILED");
// TODO : Get elapsed clock cycles from device to host
// Take the longest time as kernel execution time
// -->
#ifdef TM
hipMemcpy(runtime, d_runtime, r_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
unsigned long long elapsed_time = 0;
for(int i = 0; i < WIDTH*WIDTH; i++)
if(elapsed_time < runtime[i])
elapsed_time = runtime[i];
printf("Kernel Execution Time: %llu cycles\n", elapsed_time);
#endif
// <--
// TODO : Free device global memory
// Leave/Remove the given hipFree statement according to your data allocation
// -->
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
#ifdef TM
hipFree(d_runtime);
#endif
// <--
// Free host memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
#ifdef TM
free(runtime);
#endif
return 0;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
| 16f6dd489a150c61b7e293993db73699489551ad.cu | // Matrix Multiplication in CUDA
#include <stdio.h>
//#include <string.h>
//#include <assert.h>
//#include <stdlib.h>
#include <cuda_runtime.h>
// includes, project
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
#define WIDTH 64
#define THREAD_BLOCK_WIDTH 16
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
// FILL HERE: define constant variable
// MatrixMul kernel
/**
* CUDA Kernel Device code
*
* Computes the matrix multiplication of A and B into C. The 3 matrices have the same
* number of elements WIDTH*WIDTH.
*/
// FILL HERE: translate C-version matrixMul to CUDA-version kernel code
__global__ void
MatrixMul(float* A, float* B, float* C, unsigned long long* runtime)
{
// TODO : Kernel Function
// C = A * B
// -->
//Traverse along the blocks first+traverse along the rows+ traverse along the individual row
int g_tid = (blockIdx.y * blockDim.x * blockDim.y * gridDim.x) + (threadIdx.y * blockDim.x * gridDim.x) + ((blockIdx.x * blockDim.x) + threadIdx.x);
int row = g_tid/WIDTH;
int col = g_tid%WIDTH;
unsigned long long start_time= clock64();
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int row1 = tid/THREAD_BLOCK_WIDTH;
int col1 = tid%THREAD_BLOCK_WIDTH;
// Declaration of two 16x16 matrices in the shared memory
__shared__ float sA[THREAD_BLOCK_WIDTH][THREAD_BLOCK_WIDTH];
__shared__ float sB[THREAD_BLOCK_WIDTH][THREAD_BLOCK_WIDTH];
float cl =0.0;
for (int t=0; t<(WIDTH/THREAD_BLOCK_WIDTH); t++ )
{
sA[row1][col1]= A[(row * WIDTH) + threadIdx.x + t*THREAD_BLOCK_WIDTH];
sB[row1][col1]= B[(threadIdx.y * WIDTH) + col + t*THREAD_BLOCK_WIDTH*WIDTH] ;
__syncthreads();
for(int k = 0; k < THREAD_BLOCK_WIDTH; k++)
{
cl += sA[row1][k] * sB[k][col1];
}
}
// <--
C[g_tid]= cl;
unsigned long long stop_time= clock64();
runtime[tid] = (unsigned long long) (stop_time - start_time);
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the matrix size to be used, and compute its size
int size = WIDTH*WIDTH*sizeof(float);
printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH);
// Allocate the host input matrix h_A
float *h_A = (float *)malloc(size);
// Allocate the host input matrix h_B
float *h_B = (float *)malloc(size);
// Allocate the host input matrix h_C
float *h_C = (float *)malloc(size);
// Allocate the host matrix for compute check
float *reference = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL || reference == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input matrices
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
h_A[i*WIDTH + j] = 0.01f;
h_B[i*WIDTH + j] = 1.0f;
}
}
memset(h_C, 0, size);
memset(reference, 0, size);
// compute the matrix multiplication on the CPU for comparison
computeGold(reference, h_A, h_B, WIDTH, WIDTH, WIDTH);
// Allocate device input matrices
// TODO : Leave/Remove the given cudaMalloc code properly
// -->
float* d_A = NULL;
err = cudaMalloc((void**)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device matrix A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float* d_B = NULL;
err = cudaMalloc((void**)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device matrix B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// <--
// Allocate the device output matrix
float* d_C = NULL;
err = cudaMalloc((void**)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device matrix C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input matrix A and B in host memory to the device input matrices in
// device memory
// TODO : Add proper mem copy APIs according to the memory that matrix A and B will be stored
// -->
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A,size,cudaMemcpyHostToDevice);// FILL HERE
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size,cudaMemcpyHostToDevice);// FILL HERE
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// <--
// TODO : Clock Measurements
// Add code to return clock cycles from kernel
// -->
#ifdef TM
unsigned long long* d_runtime;
int r_size = WIDTH*WIDTH*sizeof(unsigned long long);
unsigned long long* runtime = (unsigned long long*)malloc(r_size);
memset(runtime, 0, r_size);
cudaMalloc((void**)&d_runtime, r_size);
#endif
// <--
// TODO : Kernel Invocation
// Assign as many threads as the size of matrix in a thread block and
// invoke the kernel function.
// -->
int blocksPerGrid = 16 ;// FILL HERE
int threadsPerBlock = 256 ;// FILL HERiE
dim3 grid (4,4,1);
// ToDo do it 2 dimensionaly
dim3 block (16,16,1);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
MatrixMul<<<grid,block >>> ( d_A, d_B, d_C, d_runtime);
// <--
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch matrixMul kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
// Copy the device result matrix in device memory to the host result matrix
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
// Verify that the result matrix is correct
bool res = 1;
for (int i = 0; i < WIDTH*WIDTH; i++)
{
float diff = fabs(reference[i] - h_C[i]);
if(diff > 0.001f)
{
res = 0;
break;
}
}
printf("Test %s\n", (res == 1) ? "PASSED" : "FAILED");
// TODO : Get elapsed clock cycles from device to host
// Take the longest time as kernel execution time
// -->
#ifdef TM
cudaMemcpy(runtime, d_runtime, r_size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
unsigned long long elapsed_time = 0;
for(int i = 0; i < WIDTH*WIDTH; i++)
if(elapsed_time < runtime[i])
elapsed_time = runtime[i];
printf("Kernel Execution Time: %llu cycles\n", elapsed_time);
#endif
// <--
// TODO : Free device global memory
// Leave/Remove the given cudaFree statement according to your data allocation
// -->
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
#ifdef TM
cudaFree(d_runtime);
#endif
// <--
// Free host memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
#ifdef TM
free(runtime);
#endif
return 0;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
|
43f26b360edcbd1c00cad852a1334a4bdaeadbf4.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <pointercast.h>
#include <types/types.h>
#include <types/float16.h>
#include <op_boilerplate.h>
#include <loops/summarystatsreduce.h>
#include <helpers/shape.h>
#include <helpers/TAD.h>
#include <dll.h>
#include <Environment.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helpers/DebugHelper.h>
#include <specials_cuda.h>
using namespace simdOps;
namespace functions {
namespace summarystats {
template <typename X, typename Z>
void _CUDA_G summaryStatsReduceT(int op, void *dx, Nd4jLong *xShapeInfo, int xRank, void *extraParams, void *z, Nd4jLong *zShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
functions::summarystats::SummaryStatsReduce<X,Z>::transform(op,dx,xShapeInfo,extraParams,z,zShapeInfo,dimension,dimensionLength,biasCorrected,allocationBuffer,reductionBuffer,tadOnlyShapeInfo,tadOffsets);
}
/**
*
* @param sPartialsRef
* @param tid
* @param extraParams
*/
template<typename X, typename Z>
template<typename OpType>
_CUDA_D void SummaryStatsReduce<X,Z>::aggregatePartials(SummaryStatsData<X> **sPartialsRef, Nd4jLong tid, Nd4jLong numElements, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto extraParams = static_cast<Z*>(vextraParams);
SummaryStatsData<X> *sPartials = *sPartialsRef;
Nd4jLong floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
SummaryStatsData<X> prev = sPartials[tid - floorPow2];
SummaryStatsData<X> curr = sPartials[tid];
sPartials[tid - floorPow2] = update(prev, curr, extraParams);
}
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
SummaryStatsData<X> curr = sPartials[tid];
SummaryStatsData<X> next = sPartials[tid + activeThreads];
sPartials[tid] = update(curr, next, extraParams);
}
__syncthreads();
}
};
/**
* @param n n is the number of
* elements to loop through
* @param dx the data to operate on
* @param xVectorInfo the meta data for the vector:
* 0 is the offset
* 1 is the increment/stride
* 2 is the real length of the buffer (n and dx.length won't always be the same)
* 3 is the element wise stride for the buffer
* 4 is the number of elements it takes to get to the next row/column/tensor
* @param gpuInformation
* 0 is the block size
* 1 is the grid size
* 2 is the shared memory size
* @param problemDefinition
* 0 is the number of elements per vector
* 1 is the number of vectors
*/
template<typename X, typename Z>
template<typename OpType>
_CUDA_D void SummaryStatsReduce<X,Z>::transform(void *vx, Nd4jLong *xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
int postProcessOrNot,
int *allocationBuffer, void *vreductionBuffer,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
auto dx = static_cast<X*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
auto reductionBuffer = static_cast<Z*>(vreductionBuffer);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
__shared__ int xElementWiseStride;
int numElements = blockDim.x;
//shared memory space for storing intermediate results
__shared__ SummaryStatsData<X> *sPartials;
if(threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sPartials = reinterpret_cast<SummaryStatsData<X>*>(shmem);
}
__syncthreads();
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
//length for the tad
__shared__ volatile int xLength;
__shared__ volatile int resultLength;
SummaryStatsData<X> reduction;
reduction.initWithValue(0.0);
reduction.n = 0;
if (threadIdx.x == 0) {
if (zShapeInfo != nullptr)
resultLength = shape::length(zShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (resultLength == 1 && (dimension == nullptr || dimension[0] == MAX_DIMENSION))
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
auto xStride = shape::stride(xShapeInfo);
auto xOrder = shape::order(xShapeInfo);
if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) {
xElementWiseStride = xStride[dimension[0]];
}
else {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
}
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int numTads;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
}
__syncthreads();
if (tadEWS == 0) {
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[r];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[xOffset]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]);
}
}
}
else {
for (int i = blockIdx.x; i < numTads; i += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[i];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int x = threadIdx.x; x < tadLength; x += blockDim.x) {
auto indexX = tadOffsetForBlock + x * tadEWS;
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[i] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
}
else if (resultScalar) {
__shared__ int n;
if (threadIdx.x == 0) {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
n = shape::length(xShapeInfo);
}
__syncthreads();
if (xElementWiseStride >= 1) {
for (Nd4jLong i = tid; i < n; i += (blockDim.x * gridDim.x)) {
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[i * xElementWiseStride]);
reduction = update(reduction, indexVal2, extraParams);
}
}
else {
for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) {
auto offset = shape::getIndexOffset(i, xShapeInfo, n);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[offset]);
reduction = update(reduction, indexVal2, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, blockDim.x, extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *)reductionBuffer;
tid = threadIdx.x;
if (threadIdx.x == 0) {
SummaryStatsData<X> *pBuffer = (SummaryStatsData<X>*) reductionBuffer;
pBuffer[blockIdx.x] = sPartials[0];
}
__syncthreads();
__threadfence();
if (tid == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*) reductionBuffer;
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, gridDim.x, extraParams);
__syncthreads();
if (tid == 0) {
z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
else {
if (tid == 0) {
unsigned int *tc = (unsigned *)reductionBuffer;
tc[16384] = 0;
z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
};
template <typename X, typename Y>
_CUDA_D void SummaryStatsReduce<X,Y>::transform(const int opNum, void *dx, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(transform, PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets), SUMMARY_STATS_OPS);
};
template <typename X, typename Z>
_CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduceScalar(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) {
auto x = static_cast<X*>(vx);
auto extraParams = static_cast<Z*>(vextraParams);
auto z = reinterpret_cast<Z*>(vz);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D16 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( summaryStatsReduceT<X,Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hxShapeInfo),
extraParams,
z,
zShapeInfo, shape::rank(hzShapeInfo),
nullptr,
1,
1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
// this is blocking method since method should return scalar
nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed");
}
template <typename X, typename Z>
_CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) {
auto x = static_cast<X*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F17 opNum:[%i]\n", opNum);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
hipLaunchKernelGGL(( summaryStatsReduceT<X,Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hxShapeInfo),
extraParams,
z,
zShapeInfo, shape::rank(hzShapeInfo),
nullptr,
1,
1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
template<typename X, typename Z>
_CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) {
auto x = static_cast<X*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D18 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( summaryStatsReduceT<X, Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hxShapeInfo),
extraParams,
z,
zShapeInfo, shape::rank(hzShapeInfo),
dimension,
dimensionLength,
1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT SummaryStatsReduce, , LIBND4J_TYPES, FLOAT_TYPES);
}
} | 43f26b360edcbd1c00cad852a1334a4bdaeadbf4.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <pointercast.h>
#include <types/types.h>
#include <types/float16.h>
#include <op_boilerplate.h>
#include <loops/summarystatsreduce.h>
#include <helpers/shape.h>
#include <helpers/TAD.h>
#include <dll.h>
#include <Environment.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <helpers/DebugHelper.h>
#include <specials_cuda.h>
using namespace simdOps;
namespace functions {
namespace summarystats {
template <typename X, typename Z>
void _CUDA_G summaryStatsReduceT(int op, void *dx, Nd4jLong *xShapeInfo, int xRank, void *extraParams, void *z, Nd4jLong *zShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
functions::summarystats::SummaryStatsReduce<X,Z>::transform(op,dx,xShapeInfo,extraParams,z,zShapeInfo,dimension,dimensionLength,biasCorrected,allocationBuffer,reductionBuffer,tadOnlyShapeInfo,tadOffsets);
}
/**
*
* @param sPartialsRef
* @param tid
* @param extraParams
*/
template<typename X, typename Z>
template<typename OpType>
_CUDA_D void SummaryStatsReduce<X,Z>::aggregatePartials(SummaryStatsData<X> **sPartialsRef, Nd4jLong tid, Nd4jLong numElements, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto extraParams = static_cast<Z*>(vextraParams);
SummaryStatsData<X> *sPartials = *sPartialsRef;
Nd4jLong floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
SummaryStatsData<X> prev = sPartials[tid - floorPow2];
SummaryStatsData<X> curr = sPartials[tid];
sPartials[tid - floorPow2] = update(prev, curr, extraParams);
}
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
SummaryStatsData<X> curr = sPartials[tid];
SummaryStatsData<X> next = sPartials[tid + activeThreads];
sPartials[tid] = update(curr, next, extraParams);
}
__syncthreads();
}
};
/**
* @param n n is the number of
* elements to loop through
* @param dx the data to operate on
* @param xVectorInfo the meta data for the vector:
* 0 is the offset
* 1 is the increment/stride
* 2 is the real length of the buffer (n and dx.length won't always be the same)
* 3 is the element wise stride for the buffer
* 4 is the number of elements it takes to get to the next row/column/tensor
* @param gpuInformation
* 0 is the block size
* 1 is the grid size
* 2 is the shared memory size
* @param problemDefinition
* 0 is the number of elements per vector
* 1 is the number of vectors
*/
template<typename X, typename Z>
template<typename OpType>
_CUDA_D void SummaryStatsReduce<X,Z>::transform(void *vx, Nd4jLong *xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
int postProcessOrNot,
int *allocationBuffer, void *vreductionBuffer,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
auto dx = static_cast<X*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
auto reductionBuffer = static_cast<Z*>(vreductionBuffer);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
__shared__ int xElementWiseStride;
int numElements = blockDim.x;
//shared memory space for storing intermediate results
__shared__ SummaryStatsData<X> *sPartials;
if(threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sPartials = reinterpret_cast<SummaryStatsData<X>*>(shmem);
}
__syncthreads();
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
//length for the tad
__shared__ volatile int xLength;
__shared__ volatile int resultLength;
SummaryStatsData<X> reduction;
reduction.initWithValue(0.0);
reduction.n = 0;
if (threadIdx.x == 0) {
if (zShapeInfo != nullptr)
resultLength = shape::length(zShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (resultLength == 1 && (dimension == nullptr || dimension[0] == MAX_DIMENSION))
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
auto xStride = shape::stride(xShapeInfo);
auto xOrder = shape::order(xShapeInfo);
if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) {
xElementWiseStride = xStride[dimension[0]];
}
else {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
}
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int numTads;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
}
__syncthreads();
if (tadEWS == 0) {
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[r];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[xOffset]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]);
}
}
}
else {
for (int i = blockIdx.x; i < numTads; i += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[i];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int x = threadIdx.x; x < tadLength; x += blockDim.x) {
auto indexX = tadOffsetForBlock + x * tadEWS;
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[i] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
}
else if (resultScalar) {
__shared__ int n;
if (threadIdx.x == 0) {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
n = shape::length(xShapeInfo);
}
__syncthreads();
if (xElementWiseStride >= 1) {
for (Nd4jLong i = tid; i < n; i += (blockDim.x * gridDim.x)) {
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[i * xElementWiseStride]);
reduction = update(reduction, indexVal2, extraParams);
}
}
else {
for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) {
auto offset = shape::getIndexOffset(i, xShapeInfo, n);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[offset]);
reduction = update(reduction, indexVal2, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, blockDim.x, extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *)reductionBuffer;
tid = threadIdx.x;
if (threadIdx.x == 0) {
SummaryStatsData<X> *pBuffer = (SummaryStatsData<X>*) reductionBuffer;
pBuffer[blockIdx.x] = sPartials[0];
}
__syncthreads();
__threadfence();
if (tid == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*) reductionBuffer;
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, gridDim.x, extraParams);
__syncthreads();
if (tid == 0) {
z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
else {
if (tid == 0) {
unsigned int *tc = (unsigned *)reductionBuffer;
tc[16384] = 0;
z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
};
template <typename X, typename Y>
_CUDA_D void SummaryStatsReduce<X,Y>::transform(const int opNum, void *dx, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(transform, PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets), SUMMARY_STATS_OPS);
};
template <typename X, typename Z>
_CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduceScalar(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) {
auto x = static_cast<X*>(vx);
auto extraParams = static_cast<Z*>(vextraParams);
auto z = reinterpret_cast<Z*>(vz);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D16 opNum:[%i]\n", opNum);
summaryStatsReduceT<X,Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hxShapeInfo),
extraParams,
z,
zShapeInfo, shape::rank(hzShapeInfo),
nullptr,
1,
1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
// this is blocking method since method should return scalar
nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed");
}
template <typename X, typename Z>
_CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) {
auto x = static_cast<X*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F17 opNum:[%i]\n", opNum);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
summaryStatsReduceT<X,Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hxShapeInfo),
extraParams,
z,
zShapeInfo, shape::rank(hzShapeInfo),
nullptr,
1,
1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
template<typename X, typename Z>
_CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) {
auto x = static_cast<X*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D18 opNum:[%i]\n", opNum);
summaryStatsReduceT<X, Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hxShapeInfo),
extraParams,
z,
zShapeInfo, shape::rank(hzShapeInfo),
dimension,
dimensionLength,
1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT SummaryStatsReduce, , LIBND4J_TYPES, FLOAT_TYPES);
}
} |
e5b951b2e63ac723c01ecca53d93afdab1afb2f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/defines.h>
#include <backend.hpp>
#include <dispatch.hpp>
#include <Param.hpp>
#include <debug_cuda.hpp>
#include <math.hpp>
#include "shared.hpp"
#include <convolve.hpp>
namespace cuda
{
namespace kernel
{
static const int THREADS = 256;
static const int THREADS_X = 16;
static const int THREADS_Y = 16;
static const int CUBE_X = 8;
static const int CUBE_Y = 8;
static const int CUBE_Z = 4;
// below shared MAX_*_LEN's are calculated based on
// a maximum shared memory configuration of 48KB per block
// considering complex types as well
static const int MAX_CONV1_FILTER_LEN = 129;
static const int MAX_CONV2_FILTER_LEN = 17;
static const int MAX_CONV3_FILTER_LEN = 5;
// we shall declare the maximum size required of above all three cases
// and re-use the same constant memory locations for every case
__constant__ char cFilter[2*(2*(MAX_CONV1_FILTER_LEN-1)+THREADS)*sizeof(double)];
template<typename T, typename aT, bool expand>
__global__
void convolve1(Param<T> out, CParam<T> signal, int fLen,
int nBBS0, int nBBS1,
int o1, int o2, int o3,
int s1, int s2, int s3)
{
SharedMemory<T> shared;
T * shrdMem = shared.getPointer();
const int padding = fLen-1;
const int shrdLen = blockDim.x + 2*padding;
const unsigned b1 = blockIdx.x/nBBS0; /* [0 {1} 2 3] */
const unsigned b3 = blockIdx.y/nBBS1; /* [0 1 2 {3}] */
const unsigned b2 = blockIdx.y-nBBS1*b3;/* [0 1 {2} 3] */
T *dst = (T *)out.ptr + (b1 * out.strides[1] + /* activated with batched input signal */
o1 * out.strides[1] + /* activated with batched input filter */
b2 * out.strides[2] + /* activated with batched input signal */
o2 * out.strides[2] + /* activated with batched input filter */
b3 * out.strides[3] + /* activated with batched input signal */
o3 * out.strides[3]); /* activated with batched input filter */
const T *src = (const T *)signal.ptr + (b1 * signal.strides[1] + /* activated with batched input signal */
s1 * signal.strides[1] + /* activated with batched input filter */
b2 * signal.strides[2] + /* activated with batched input signal */
s2 * signal.strides[2] + /* activated with batched input filter */
b3 * signal.strides[3] + /* activated with batched input signal */
s3 * signal.strides[3]); /* activated with batched input filter */
const aT *impulse = (const aT *)cFilter;
int gx = blockDim.x*(blockIdx.x-b1*nBBS0);
int s0 = signal.strides[0];
int d0 = signal.dims[0];
for (int i=threadIdx.x; i<shrdLen; i+=blockDim.x) {
int idx= gx-padding + i;
shrdMem[i] = (idx>=0 && idx<d0) ? src[idx*s0] : scalar<T>(0);
}
__syncthreads();
gx += threadIdx.x;
if (gx<out.dims[0]) {
int lx = threadIdx.x + padding + (expand ? 0 : fLen>>1);
aT accum = scalar<aT>(0);
for(int f=0; f<fLen; ++f) {
accum = accum + (shrdMem[lx-f]*impulse[f]);
}
dst[gx] = (T)accum;
}
}
template<typename T, typename aT, bool expand, int fLen0, int fLen1>
__global__
void convolve2(Param<T> out, CParam<T> signal, int nBBS0,
int nBBS1, int o2, int o3, int s2, int s3)
{
const size_t C_SIZE = (THREADS_X+2*(fLen0-1))* (THREADS_Y+2*(fLen1-1));
__shared__ T shrdMem[C_SIZE];
const int radius0 = fLen0-1;
const int radius1 = fLen1-1;
const int padding0 = 2*radius0;
const int padding1 = 2*radius1;
const int shrdLen0 = THREADS_X + padding0;
const int shrdLen1 = THREADS_Y + padding1;
unsigned b0 = blockIdx.x/nBBS0;
unsigned b1 = blockIdx.y/nBBS1;
T *dst = (T *)out.ptr + (b0 * out.strides[2] + /* activated with batched input signal */
o2 * out.strides[2] + /* activated with batched input filter */
b1 * out.strides[3] + /* activated with batched input signal */
o3 * out.strides[3]); /* activated with batched input filter */
const T *src = (const T *)signal.ptr + (b0 * signal.strides[2] + /* activated with batched input signal */
s2 * signal.strides[2] + /* activated with batched input filter */
b1 * signal.strides[3] + /* activated with batched input signal */
s3 * signal.strides[3]); /* activated with batched input filter */
const aT *impulse = (const aT *)cFilter;
int lx = threadIdx.x;
int ly = threadIdx.y;
int gx = THREADS_X * (blockIdx.x-b0*nBBS0) + lx;
int gy = THREADS_Y * (blockIdx.y-b1*nBBS1) + ly;
int s0 = signal.strides[0];
int s1 = signal.strides[1];
int d0 = signal.dims[0];
int d1 = signal.dims[1];
// below loops are traditional loops, they only run multiple
// times filter length is more than launch size
#pragma unroll
for (int b=ly, gy2=gy; b<shrdLen1; b+=THREADS_Y, gy2+=THREADS_Y) {
int j = gy2-radius1;
bool is_j = j>=0 && j<d1;
// move row_set THREADS_Y along coloumns
#pragma unroll
for (int a=lx, gx2=gx; a<shrdLen0; a+=THREADS_X, gx2+=THREADS_X) {
int i = gx2-radius0;
bool is_i = i>=0 && i<d0;
shrdMem[b*shrdLen0+a] = (is_i && is_j ? src[i*s0+j*s1] : scalar<T>(0));
}
}
__syncthreads();
if (gx<out.dims[0] && gy<out.dims[1]) {
int ci = lx + radius0 + (expand ? 0 : fLen0>>1);
int cj = ly + radius1 + (expand ? 0 : fLen1>>1);
aT accum = scalar<aT>(0);
#pragma unroll
for(int fj=0; fj<fLen1; ++fj) {
#pragma unroll
for(int fi=0; fi<fLen0; ++fi) {
aT f_val = impulse[fj*fLen0+fi];
T s_val = shrdMem[(cj-fj)*shrdLen0 + (ci-fi)];
accum = accum + s_val*f_val;
}
}
dst[gy*out.strides[1]+gx] = (T)accum;
}
}
__inline__ __device__
int index(int i, int j, int k, int jstride, int kstride)
{
return i+j*jstride+k*kstride;
}
template<typename T, typename aT, bool expand>
__global__
void convolve3(Param<T> out, CParam<T> signal, int fLen0, int fLen1,
int fLen2, int nBBS, int o3, int s3)
{
SharedMemory<T> shared;
T * shrdMem = shared.getPointer();
int radius0 = fLen0-1;
int radius1 = fLen1-1;
int radius2 = fLen2-1;
int shrdLen0 = blockDim.x + 2*radius0;
int shrdLen1 = blockDim.y + 2*radius1;
int shrdLen2 = blockDim.z + 2*radius2;
int skStride = shrdLen0 * shrdLen1;
int fStride = fLen0 * fLen1;
unsigned b2 = blockIdx.x/nBBS;
T *dst = (T *)out.ptr + (b2 * out.strides[3] + /* activated with batched input signal */
o3 * out.strides[3]); /* activated with batched input filter */
const T *src = (const T *)signal.ptr + (b2 * signal.strides[3] + /* activated with batched input signal */
s3 * signal.strides[3]); /* activated with batched input filter */
const aT *impulse = (const aT *)cFilter;
int lx = threadIdx.x;
int ly = threadIdx.y;
int lz = threadIdx.z;
int gx = blockDim.x * (blockIdx.x-b2*nBBS) + lx;
int gy = blockDim.y * blockIdx.y + ly;
int gz = blockDim.z * blockIdx.z + lz;
int s0 = signal.strides[0];
int s1 = signal.strides[1];
int s2 = signal.strides[2];
int d0 = signal.dims[0];
int d1 = signal.dims[1];
int d2 = signal.dims[2];
#pragma unroll
for (int c=lz, gz2=gz; c<shrdLen2; c+=CUBE_Z, gz2+=CUBE_Z) {
int k = gz2-radius2;
bool is_k = k>=0 && k<d2;
#pragma unroll
for (int b=ly, gy2=gy; b<shrdLen1; b+=CUBE_Y, gy2+=CUBE_Y) {
int j = gy2-radius1;
bool is_j = j>=0 && j<d1;
#pragma unroll
for (int a=lx, gx2=gx; a<shrdLen0; a+=CUBE_X, gx2+=CUBE_X) {
int i = gx2-radius0;
bool is_i = i>=0 && i<d0;
shrdMem[c*skStride+b*shrdLen0+a] =
(is_i && is_j && is_k ? src[i*s0+j*s1+k*s2] : scalar<T>(0));
}
}
}
__syncthreads();
if (gx<out.dims[0] && gy<out.dims[1] && gz<out.dims[2]) {
int ci = lx + radius0 + (expand ? 0 : fLen0>>1);
int cj = ly + radius1 + (expand ? 0 : fLen1>>1);
int ck = lz + radius2 + (expand ? 0 : fLen2>>1);
aT accum = scalar<aT>(0);
#pragma unroll
for(int fk=0; fk<fLen2; ++fk) {
#pragma unroll
for(int fj=0; fj<fLen1; ++fj) {
#pragma unroll
for(int fi=0; fi<fLen0; ++fi) {
aT f_val = impulse[index(fi, fj, fk, fLen0, fStride)];
T s_val = shrdMem[index(ci-fi, cj-fj, ck-fk, shrdLen0, skStride)];
accum = accum + s_val*f_val;
}
}
}
dst[index(gx, gy, gz, out.strides[1], out.strides[2])] = (T)accum;
}
}
struct conv_kparam_t {
dim3 mBlocks;
dim3 mThreads;
size_t mSharedSize;
int mBlk_x;
int mBlk_y;
bool outHasNoOffset;
bool inHasNoOffset;
bool launchMoreBlocks;
int o[3];
int s[3];
};
template<typename T>
void prepareKernelArgs(conv_kparam_t ¶ms, dim_t oDims[], dim_t fDims[], int baseDim)
{
int batchDims[4] = {1, 1, 1, 1};
for(int i=baseDim; i<4; ++i) {
batchDims[i] = (params.launchMoreBlocks ? 1 : oDims[i]);
}
if (baseDim==1) {
params.mThreads = dim3(THREADS, 1);
params.mBlk_x = divup(oDims[0], params.mThreads.x);
params.mBlk_y = batchDims[2];
params.mBlocks = dim3(params.mBlk_x * batchDims[1], params.mBlk_y * batchDims[3]);
params.mSharedSize = (params.mThreads.x+2*(fDims[0]-1)) * sizeof(T);
} else if (baseDim==2) {
params.mThreads = dim3(THREADS_X, THREADS_Y);
params.mBlk_x = divup(oDims[0], params.mThreads.x);
params.mBlk_y = divup(oDims[1], params.mThreads.y);
params.mBlocks = dim3(params.mBlk_x * batchDims[2], params.mBlk_y * batchDims[3]);
} else if (baseDim==3) {
params.mThreads = dim3(CUBE_X, CUBE_Y, CUBE_Z);
params.mBlk_x = divup(oDims[0], params.mThreads.x);
params.mBlk_y = divup(oDims[1], params.mThreads.y);
int blk_z = divup(oDims[2], params.mThreads.z);
params.mBlocks = dim3(params.mBlk_x * batchDims[3], params.mBlk_y, blk_z);
params.mSharedSize = (params.mThreads.x+2*(fDims[0]-1)) *
(params.mThreads.y+2*(fDims[1]-1)) *
(params.mThreads.z+2*(fDims[2]-1)) * sizeof(T);
}
}
template<typename T, typename aT, bool expand, int f0, int f1>
void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig)
{
(convolve2<T, aT, expand, f0, f1hipLaunchKernelGGL((>))
, dim3(p.mBlocks), dim3(p.mThreads), 0, 0, out, sig, p.mBlk_x, p.mBlk_y, p.o[1], p.o[2], p.s[1], p.s[2]);
POST_LAUNCH_CHECK();
}
template<typename T, typename aT, bool expand, int f0>
void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig, int f1)
{
switch(f1) {
case 1: conv2Helper<T, aT, expand, f0, 1>(p, out, sig); break;
case 2: conv2Helper<T, aT, expand, f0, 2>(p, out, sig); break;
case 3: conv2Helper<T, aT, expand, f0, 3>(p, out, sig); break;
case 4: conv2Helper<T, aT, expand, f0, 4>(p, out, sig); break;
case 5: conv2Helper<T, aT, expand, f0, 5>(p, out, sig); break;
default: CUDA_NOT_SUPPORTED();
}
}
template<typename T, typename aT, bool expand>
void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig, int f0, int f1)
{
switch(f0) {
case 1: conv2Helper<T, aT, expand, 1>(p, out, sig, f1); break;
case 2: conv2Helper<T, aT, expand, 2>(p, out, sig, f1); break;
case 3: conv2Helper<T, aT, expand, 3>(p, out, sig, f1); break;
case 4: conv2Helper<T, aT, expand, 4>(p, out, sig, f1); break;
case 5: conv2Helper<T, aT, expand, 5>(p, out, sig, f1); break;
default: {
if (f0==f1) {
switch(f1) {
case 6: conv2Helper<T, aT, expand, 6, 6>(p, out, sig); break;
case 7: conv2Helper<T, aT, expand, 7, 7>(p, out, sig); break;
case 8: conv2Helper<T, aT, expand, 8, 8>(p, out, sig); break;
case 9: conv2Helper<T, aT, expand, 9, 9>(p, out, sig); break;
case 10: conv2Helper<T, aT, expand, 10, 10>(p, out, sig); break;
case 11: conv2Helper<T, aT, expand, 11, 11>(p, out, sig); break;
case 12: conv2Helper<T, aT, expand, 12, 12>(p, out, sig); break;
case 13: conv2Helper<T, aT, expand, 13, 13>(p, out, sig); break;
case 14: conv2Helper<T, aT, expand, 14, 14>(p, out, sig); break;
case 15: conv2Helper<T, aT, expand, 15, 15>(p, out, sig); break;
case 16: conv2Helper<T, aT, expand, 16, 16>(p, out, sig); break;
case 17: conv2Helper<T, aT, expand, 17, 17>(p, out, sig); break;
default: CUDA_NOT_SUPPORTED();
}
} else
CUDA_NOT_SUPPORTED();
} break;
}
}
template<typename T, typename aT, bool expand>
void convolve_1d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt)
{
prepareKernelArgs<T>(p, out.dims, filt.dims, 1);
int filterLen = filt.dims[0];
for (int b3=0; b3<filt.dims[3]; ++b3) {
int f3Off = b3 * filt.strides[3];
for (int b2=0; b2<filt.dims[2]; ++b2) {
int f2Off = b2 * filt.strides[2];
for (int b1=0; b1<filt.dims[1]; ++b1) {
int f1Off = b1 * filt.strides[1];
// FIXME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(hipMemcpyToSymbol(kernel::cFilter,
filt.ptr+(f1Off+f2Off+f3Off),
filterLen*sizeof(aT),
0, hipMemcpyDeviceToDevice));
p.o[0] = (p.outHasNoOffset ? 0 : b1);
p.o[1] = (p.outHasNoOffset ? 0 : b2);
p.o[2] = (p.outHasNoOffset ? 0 : b3);
p.s[0] = (p.inHasNoOffset ? 0 : b1);
p.s[1] = (p.inHasNoOffset ? 0 : b2);
p.s[2] = (p.inHasNoOffset ? 0 : b3);
(convolve1<T, aT, expandhipLaunchKernelGGL((>))
, dim3(p.mBlocks), dim3(p.mThreads), p.mSharedSize, 0,
out, sig, filt.dims[0], p.mBlk_x, p.mBlk_y,
p.o[0], p.o[1], p.o[2], p.s[0], p.s[1], p.s[2]);
POST_LAUNCH_CHECK();
}
}
}
}
template<typename T, typename aT, bool expand>
void convolve_2d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt)
{
prepareKernelArgs<T>(p, out.dims, filt.dims, 2);
int filterLen = filt.dims[0] * filt.dims[1];
for (int b3=0; b3<filt.dims[3]; ++b3) {
int f3Off = b3 * filt.strides[3];
for (int b2=0; b2<filt.dims[2]; ++b2) {
int f2Off = b2 * filt.strides[2];
// FIXME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(hipMemcpyToSymbol(kernel::cFilter,
filt.ptr+(f2Off+f3Off),
filterLen*sizeof(aT),
0, hipMemcpyDeviceToDevice));
p.o[1] = (p.outHasNoOffset ? 0 : b2);
p.o[2] = (p.outHasNoOffset ? 0 : b3);
p.s[1] = (p.inHasNoOffset ? 0 : b2);
p.s[2] = (p.inHasNoOffset ? 0 : b3);
conv2Helper<T, aT, expand>(p, out, sig, filt.dims[0], filt.dims[1]);
}
}
}
template<typename T, typename aT, bool expand>
void convolve_3d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt)
{
prepareKernelArgs<T>(p, out.dims, filt.dims, 3);
int filterLen = filt.dims[0] * filt.dims[1] * filt.dims[2];
for (int b3=0; b3<filt.dims[3]; ++b3) {
int f3Off = b3 * filt.strides[3];
// FIXME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(hipMemcpyToSymbol(kernel::cFilter,
filt.ptr+f3Off,
filterLen*sizeof(aT),
0, hipMemcpyDeviceToDevice));
p.o[2] = (p.outHasNoOffset ? 0 : b3);
p.s[2] = (p.inHasNoOffset ? 0 : b3);
(convolve3<T, aT, expandhipLaunchKernelGGL((>))
, dim3(p.mBlocks), dim3(p.mThreads), p.mSharedSize, 0,
out, sig, filt.dims[0], filt.dims[1], filt.dims[2], p.mBlk_x, p.o[2], p.s[2]);
POST_LAUNCH_CHECK();
}
}
template<typename T, typename aT, int baseDim, bool expand>
void convolve_nd(Param<T> out, CParam<T> signal, CParam<aT> filt, ConvolveBatchKind kind)
{
bool callKernel = true;
int MCFL2 = kernel::MAX_CONV2_FILTER_LEN;
int MCFL3 = kernel::MAX_CONV3_FILTER_LEN;
switch(baseDim) {
case 1: if (filt.dims[0]>kernel::MAX_CONV1_FILTER_LEN) callKernel = false; break;
case 2: if ((filt.dims[0]*filt.dims[1]) > (MCFL2 * MCFL2)) callKernel = false; break;
case 3: if ((filt.dims[0]*filt.dims[1]*filt.dims[2]) > (MCFL3 * MCFL3 * MCFL3)) callKernel = false; break;
}
if (!callKernel) { CUDA_NOT_SUPPORTED(); }
conv_kparam_t param;
for (int i=0; i<3; ++i) {
param.o[i] = 0;
param.s[i] = 0;
}
param.launchMoreBlocks = kind==CONVOLVE_BATCH_SAME || kind==CONVOLVE_BATCH_KERNEL;
param.outHasNoOffset = kind==CONVOLVE_BATCH_SIGNAL || kind==CONVOLVE_BATCH_NONE;
param.inHasNoOffset = kind!=CONVOLVE_BATCH_SAME;
switch(baseDim) {
case 1: convolve_1d<T, aT, expand>(param, out, signal, filt); break;
case 2: convolve_2d<T, aT, expand>(param, out, signal, filt); break;
case 3: convolve_3d<T, aT, expand>(param, out, signal, filt); break;
}
POST_LAUNCH_CHECK();
}
#define INSTANTIATE(T, aT) \
template void convolve_nd<T, aT, 1, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, aT, 1, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, aT, 2, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, aT, 2, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, aT, 3, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, aT, 3, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\
INSTANTIATE(cdouble, cdouble)
INSTANTIATE(cfloat , cfloat)
INSTANTIATE(double , double)
INSTANTIATE(float , float)
INSTANTIATE(uint , float)
INSTANTIATE(int , float)
INSTANTIATE(uchar , float)
INSTANTIATE(char , float)
}
}
| e5b951b2e63ac723c01ecca53d93afdab1afb2f2.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/defines.h>
#include <backend.hpp>
#include <dispatch.hpp>
#include <Param.hpp>
#include <debug_cuda.hpp>
#include <math.hpp>
#include "shared.hpp"
#include <convolve.hpp>
namespace cuda
{
namespace kernel
{
static const int THREADS = 256;
static const int THREADS_X = 16;
static const int THREADS_Y = 16;
static const int CUBE_X = 8;
static const int CUBE_Y = 8;
static const int CUBE_Z = 4;
// below shared MAX_*_LEN's are calculated based on
// a maximum shared memory configuration of 48KB per block
// considering complex types as well
static const int MAX_CONV1_FILTER_LEN = 129;
static const int MAX_CONV2_FILTER_LEN = 17;
static const int MAX_CONV3_FILTER_LEN = 5;
// we shall declare the maximum size required of above all three cases
// and re-use the same constant memory locations for every case
__constant__ char cFilter[2*(2*(MAX_CONV1_FILTER_LEN-1)+THREADS)*sizeof(double)];
template<typename T, typename aT, bool expand>
__global__
void convolve1(Param<T> out, CParam<T> signal, int fLen,
int nBBS0, int nBBS1,
int o1, int o2, int o3,
int s1, int s2, int s3)
{
SharedMemory<T> shared;
T * shrdMem = shared.getPointer();
const int padding = fLen-1;
const int shrdLen = blockDim.x + 2*padding;
const unsigned b1 = blockIdx.x/nBBS0; /* [0 {1} 2 3] */
const unsigned b3 = blockIdx.y/nBBS1; /* [0 1 2 {3}] */
const unsigned b2 = blockIdx.y-nBBS1*b3;/* [0 1 {2} 3] */
T *dst = (T *)out.ptr + (b1 * out.strides[1] + /* activated with batched input signal */
o1 * out.strides[1] + /* activated with batched input filter */
b2 * out.strides[2] + /* activated with batched input signal */
o2 * out.strides[2] + /* activated with batched input filter */
b3 * out.strides[3] + /* activated with batched input signal */
o3 * out.strides[3]); /* activated with batched input filter */
const T *src = (const T *)signal.ptr + (b1 * signal.strides[1] + /* activated with batched input signal */
s1 * signal.strides[1] + /* activated with batched input filter */
b2 * signal.strides[2] + /* activated with batched input signal */
s2 * signal.strides[2] + /* activated with batched input filter */
b3 * signal.strides[3] + /* activated with batched input signal */
s3 * signal.strides[3]); /* activated with batched input filter */
const aT *impulse = (const aT *)cFilter;
int gx = blockDim.x*(blockIdx.x-b1*nBBS0);
int s0 = signal.strides[0];
int d0 = signal.dims[0];
for (int i=threadIdx.x; i<shrdLen; i+=blockDim.x) {
int idx= gx-padding + i;
shrdMem[i] = (idx>=0 && idx<d0) ? src[idx*s0] : scalar<T>(0);
}
__syncthreads();
gx += threadIdx.x;
if (gx<out.dims[0]) {
int lx = threadIdx.x + padding + (expand ? 0 : fLen>>1);
aT accum = scalar<aT>(0);
for(int f=0; f<fLen; ++f) {
accum = accum + (shrdMem[lx-f]*impulse[f]);
}
dst[gx] = (T)accum;
}
}
template<typename T, typename aT, bool expand, int fLen0, int fLen1>
__global__
void convolve2(Param<T> out, CParam<T> signal, int nBBS0,
int nBBS1, int o2, int o3, int s2, int s3)
{
const size_t C_SIZE = (THREADS_X+2*(fLen0-1))* (THREADS_Y+2*(fLen1-1));
__shared__ T shrdMem[C_SIZE];
const int radius0 = fLen0-1;
const int radius1 = fLen1-1;
const int padding0 = 2*radius0;
const int padding1 = 2*radius1;
const int shrdLen0 = THREADS_X + padding0;
const int shrdLen1 = THREADS_Y + padding1;
unsigned b0 = blockIdx.x/nBBS0;
unsigned b1 = blockIdx.y/nBBS1;
T *dst = (T *)out.ptr + (b0 * out.strides[2] + /* activated with batched input signal */
o2 * out.strides[2] + /* activated with batched input filter */
b1 * out.strides[3] + /* activated with batched input signal */
o3 * out.strides[3]); /* activated with batched input filter */
const T *src = (const T *)signal.ptr + (b0 * signal.strides[2] + /* activated with batched input signal */
s2 * signal.strides[2] + /* activated with batched input filter */
b1 * signal.strides[3] + /* activated with batched input signal */
s3 * signal.strides[3]); /* activated with batched input filter */
const aT *impulse = (const aT *)cFilter;
int lx = threadIdx.x;
int ly = threadIdx.y;
int gx = THREADS_X * (blockIdx.x-b0*nBBS0) + lx;
int gy = THREADS_Y * (blockIdx.y-b1*nBBS1) + ly;
int s0 = signal.strides[0];
int s1 = signal.strides[1];
int d0 = signal.dims[0];
int d1 = signal.dims[1];
// below loops are traditional loops, they only run multiple
// times filter length is more than launch size
#pragma unroll
for (int b=ly, gy2=gy; b<shrdLen1; b+=THREADS_Y, gy2+=THREADS_Y) {
int j = gy2-radius1;
bool is_j = j>=0 && j<d1;
// move row_set THREADS_Y along coloumns
#pragma unroll
for (int a=lx, gx2=gx; a<shrdLen0; a+=THREADS_X, gx2+=THREADS_X) {
int i = gx2-radius0;
bool is_i = i>=0 && i<d0;
shrdMem[b*shrdLen0+a] = (is_i && is_j ? src[i*s0+j*s1] : scalar<T>(0));
}
}
__syncthreads();
if (gx<out.dims[0] && gy<out.dims[1]) {
int ci = lx + radius0 + (expand ? 0 : fLen0>>1);
int cj = ly + radius1 + (expand ? 0 : fLen1>>1);
aT accum = scalar<aT>(0);
#pragma unroll
for(int fj=0; fj<fLen1; ++fj) {
#pragma unroll
for(int fi=0; fi<fLen0; ++fi) {
aT f_val = impulse[fj*fLen0+fi];
T s_val = shrdMem[(cj-fj)*shrdLen0 + (ci-fi)];
accum = accum + s_val*f_val;
}
}
dst[gy*out.strides[1]+gx] = (T)accum;
}
}
__inline__ __device__
int index(int i, int j, int k, int jstride, int kstride)
{
return i+j*jstride+k*kstride;
}
template<typename T, typename aT, bool expand>
__global__
void convolve3(Param<T> out, CParam<T> signal, int fLen0, int fLen1,
int fLen2, int nBBS, int o3, int s3)
{
SharedMemory<T> shared;
T * shrdMem = shared.getPointer();
int radius0 = fLen0-1;
int radius1 = fLen1-1;
int radius2 = fLen2-1;
int shrdLen0 = blockDim.x + 2*radius0;
int shrdLen1 = blockDim.y + 2*radius1;
int shrdLen2 = blockDim.z + 2*radius2;
int skStride = shrdLen0 * shrdLen1;
int fStride = fLen0 * fLen1;
unsigned b2 = blockIdx.x/nBBS;
T *dst = (T *)out.ptr + (b2 * out.strides[3] + /* activated with batched input signal */
o3 * out.strides[3]); /* activated with batched input filter */
const T *src = (const T *)signal.ptr + (b2 * signal.strides[3] + /* activated with batched input signal */
s3 * signal.strides[3]); /* activated with batched input filter */
const aT *impulse = (const aT *)cFilter;
int lx = threadIdx.x;
int ly = threadIdx.y;
int lz = threadIdx.z;
int gx = blockDim.x * (blockIdx.x-b2*nBBS) + lx;
int gy = blockDim.y * blockIdx.y + ly;
int gz = blockDim.z * blockIdx.z + lz;
int s0 = signal.strides[0];
int s1 = signal.strides[1];
int s2 = signal.strides[2];
int d0 = signal.dims[0];
int d1 = signal.dims[1];
int d2 = signal.dims[2];
#pragma unroll
for (int c=lz, gz2=gz; c<shrdLen2; c+=CUBE_Z, gz2+=CUBE_Z) {
int k = gz2-radius2;
bool is_k = k>=0 && k<d2;
#pragma unroll
for (int b=ly, gy2=gy; b<shrdLen1; b+=CUBE_Y, gy2+=CUBE_Y) {
int j = gy2-radius1;
bool is_j = j>=0 && j<d1;
#pragma unroll
for (int a=lx, gx2=gx; a<shrdLen0; a+=CUBE_X, gx2+=CUBE_X) {
int i = gx2-radius0;
bool is_i = i>=0 && i<d0;
shrdMem[c*skStride+b*shrdLen0+a] =
(is_i && is_j && is_k ? src[i*s0+j*s1+k*s2] : scalar<T>(0));
}
}
}
__syncthreads();
if (gx<out.dims[0] && gy<out.dims[1] && gz<out.dims[2]) {
int ci = lx + radius0 + (expand ? 0 : fLen0>>1);
int cj = ly + radius1 + (expand ? 0 : fLen1>>1);
int ck = lz + radius2 + (expand ? 0 : fLen2>>1);
aT accum = scalar<aT>(0);
#pragma unroll
for(int fk=0; fk<fLen2; ++fk) {
#pragma unroll
for(int fj=0; fj<fLen1; ++fj) {
#pragma unroll
for(int fi=0; fi<fLen0; ++fi) {
aT f_val = impulse[index(fi, fj, fk, fLen0, fStride)];
T s_val = shrdMem[index(ci-fi, cj-fj, ck-fk, shrdLen0, skStride)];
accum = accum + s_val*f_val;
}
}
}
dst[index(gx, gy, gz, out.strides[1], out.strides[2])] = (T)accum;
}
}
struct conv_kparam_t {
dim3 mBlocks;
dim3 mThreads;
size_t mSharedSize;
int mBlk_x;
int mBlk_y;
bool outHasNoOffset;
bool inHasNoOffset;
bool launchMoreBlocks;
int o[3];
int s[3];
};
template<typename T>
void prepareKernelArgs(conv_kparam_t ¶ms, dim_t oDims[], dim_t fDims[], int baseDim)
{
int batchDims[4] = {1, 1, 1, 1};
for(int i=baseDim; i<4; ++i) {
batchDims[i] = (params.launchMoreBlocks ? 1 : oDims[i]);
}
if (baseDim==1) {
params.mThreads = dim3(THREADS, 1);
params.mBlk_x = divup(oDims[0], params.mThreads.x);
params.mBlk_y = batchDims[2];
params.mBlocks = dim3(params.mBlk_x * batchDims[1], params.mBlk_y * batchDims[3]);
params.mSharedSize = (params.mThreads.x+2*(fDims[0]-1)) * sizeof(T);
} else if (baseDim==2) {
params.mThreads = dim3(THREADS_X, THREADS_Y);
params.mBlk_x = divup(oDims[0], params.mThreads.x);
params.mBlk_y = divup(oDims[1], params.mThreads.y);
params.mBlocks = dim3(params.mBlk_x * batchDims[2], params.mBlk_y * batchDims[3]);
} else if (baseDim==3) {
params.mThreads = dim3(CUBE_X, CUBE_Y, CUBE_Z);
params.mBlk_x = divup(oDims[0], params.mThreads.x);
params.mBlk_y = divup(oDims[1], params.mThreads.y);
int blk_z = divup(oDims[2], params.mThreads.z);
params.mBlocks = dim3(params.mBlk_x * batchDims[3], params.mBlk_y, blk_z);
params.mSharedSize = (params.mThreads.x+2*(fDims[0]-1)) *
(params.mThreads.y+2*(fDims[1]-1)) *
(params.mThreads.z+2*(fDims[2]-1)) * sizeof(T);
}
}
template<typename T, typename aT, bool expand, int f0, int f1>
void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig)
{
(convolve2<T, aT, expand, f0, f1>)
<<<p.mBlocks, p.mThreads>>>(out, sig, p.mBlk_x, p.mBlk_y, p.o[1], p.o[2], p.s[1], p.s[2]);
POST_LAUNCH_CHECK();
}
template<typename T, typename aT, bool expand, int f0>
void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig, int f1)
{
switch(f1) {
case 1: conv2Helper<T, aT, expand, f0, 1>(p, out, sig); break;
case 2: conv2Helper<T, aT, expand, f0, 2>(p, out, sig); break;
case 3: conv2Helper<T, aT, expand, f0, 3>(p, out, sig); break;
case 4: conv2Helper<T, aT, expand, f0, 4>(p, out, sig); break;
case 5: conv2Helper<T, aT, expand, f0, 5>(p, out, sig); break;
default: CUDA_NOT_SUPPORTED();
}
}
template<typename T, typename aT, bool expand>
void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig, int f0, int f1)
{
switch(f0) {
case 1: conv2Helper<T, aT, expand, 1>(p, out, sig, f1); break;
case 2: conv2Helper<T, aT, expand, 2>(p, out, sig, f1); break;
case 3: conv2Helper<T, aT, expand, 3>(p, out, sig, f1); break;
case 4: conv2Helper<T, aT, expand, 4>(p, out, sig, f1); break;
case 5: conv2Helper<T, aT, expand, 5>(p, out, sig, f1); break;
default: {
if (f0==f1) {
switch(f1) {
case 6: conv2Helper<T, aT, expand, 6, 6>(p, out, sig); break;
case 7: conv2Helper<T, aT, expand, 7, 7>(p, out, sig); break;
case 8: conv2Helper<T, aT, expand, 8, 8>(p, out, sig); break;
case 9: conv2Helper<T, aT, expand, 9, 9>(p, out, sig); break;
case 10: conv2Helper<T, aT, expand, 10, 10>(p, out, sig); break;
case 11: conv2Helper<T, aT, expand, 11, 11>(p, out, sig); break;
case 12: conv2Helper<T, aT, expand, 12, 12>(p, out, sig); break;
case 13: conv2Helper<T, aT, expand, 13, 13>(p, out, sig); break;
case 14: conv2Helper<T, aT, expand, 14, 14>(p, out, sig); break;
case 15: conv2Helper<T, aT, expand, 15, 15>(p, out, sig); break;
case 16: conv2Helper<T, aT, expand, 16, 16>(p, out, sig); break;
case 17: conv2Helper<T, aT, expand, 17, 17>(p, out, sig); break;
default: CUDA_NOT_SUPPORTED();
}
} else
CUDA_NOT_SUPPORTED();
} break;
}
}
template<typename T, typename aT, bool expand>
void convolve_1d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt)
{
prepareKernelArgs<T>(p, out.dims, filt.dims, 1);
int filterLen = filt.dims[0];
for (int b3=0; b3<filt.dims[3]; ++b3) {
int f3Off = b3 * filt.strides[3];
for (int b2=0; b2<filt.dims[2]; ++b2) {
int f2Off = b2 * filt.strides[2];
for (int b1=0; b1<filt.dims[1]; ++b1) {
int f1Off = b1 * filt.strides[1];
// FIXME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(cudaMemcpyToSymbol(kernel::cFilter,
filt.ptr+(f1Off+f2Off+f3Off),
filterLen*sizeof(aT),
0, cudaMemcpyDeviceToDevice));
p.o[0] = (p.outHasNoOffset ? 0 : b1);
p.o[1] = (p.outHasNoOffset ? 0 : b2);
p.o[2] = (p.outHasNoOffset ? 0 : b3);
p.s[0] = (p.inHasNoOffset ? 0 : b1);
p.s[1] = (p.inHasNoOffset ? 0 : b2);
p.s[2] = (p.inHasNoOffset ? 0 : b3);
(convolve1<T, aT, expand>)
<<<p.mBlocks, p.mThreads, p.mSharedSize>>>
(out, sig, filt.dims[0], p.mBlk_x, p.mBlk_y,
p.o[0], p.o[1], p.o[2], p.s[0], p.s[1], p.s[2]);
POST_LAUNCH_CHECK();
}
}
}
}
template<typename T, typename aT, bool expand>
void convolve_2d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt)
{
prepareKernelArgs<T>(p, out.dims, filt.dims, 2);
int filterLen = filt.dims[0] * filt.dims[1];
for (int b3=0; b3<filt.dims[3]; ++b3) {
int f3Off = b3 * filt.strides[3];
for (int b2=0; b2<filt.dims[2]; ++b2) {
int f2Off = b2 * filt.strides[2];
// FIXME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(cudaMemcpyToSymbol(kernel::cFilter,
filt.ptr+(f2Off+f3Off),
filterLen*sizeof(aT),
0, cudaMemcpyDeviceToDevice));
p.o[1] = (p.outHasNoOffset ? 0 : b2);
p.o[2] = (p.outHasNoOffset ? 0 : b3);
p.s[1] = (p.inHasNoOffset ? 0 : b2);
p.s[2] = (p.inHasNoOffset ? 0 : b3);
conv2Helper<T, aT, expand>(p, out, sig, filt.dims[0], filt.dims[1]);
}
}
}
template<typename T, typename aT, bool expand>
void convolve_3d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt)
{
prepareKernelArgs<T>(p, out.dims, filt.dims, 3);
int filterLen = filt.dims[0] * filt.dims[1] * filt.dims[2];
for (int b3=0; b3<filt.dims[3]; ++b3) {
int f3Off = b3 * filt.strides[3];
// FIXME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(cudaMemcpyToSymbol(kernel::cFilter,
filt.ptr+f3Off,
filterLen*sizeof(aT),
0, cudaMemcpyDeviceToDevice));
p.o[2] = (p.outHasNoOffset ? 0 : b3);
p.s[2] = (p.inHasNoOffset ? 0 : b3);
(convolve3<T, aT, expand>)
<<<p.mBlocks, p.mThreads, p.mSharedSize>>>
(out, sig, filt.dims[0], filt.dims[1], filt.dims[2], p.mBlk_x, p.o[2], p.s[2]);
POST_LAUNCH_CHECK();
}
}
template<typename T, typename aT, int baseDim, bool expand>
void convolve_nd(Param<T> out, CParam<T> signal, CParam<aT> filt, ConvolveBatchKind kind)
{
bool callKernel = true;
int MCFL2 = kernel::MAX_CONV2_FILTER_LEN;
int MCFL3 = kernel::MAX_CONV3_FILTER_LEN;
switch(baseDim) {
case 1: if (filt.dims[0]>kernel::MAX_CONV1_FILTER_LEN) callKernel = false; break;
case 2: if ((filt.dims[0]*filt.dims[1]) > (MCFL2 * MCFL2)) callKernel = false; break;
case 3: if ((filt.dims[0]*filt.dims[1]*filt.dims[2]) > (MCFL3 * MCFL3 * MCFL3)) callKernel = false; break;
}
if (!callKernel) { CUDA_NOT_SUPPORTED(); }
conv_kparam_t param;
for (int i=0; i<3; ++i) {
param.o[i] = 0;
param.s[i] = 0;
}
param.launchMoreBlocks = kind==CONVOLVE_BATCH_SAME || kind==CONVOLVE_BATCH_KERNEL;
param.outHasNoOffset = kind==CONVOLVE_BATCH_SIGNAL || kind==CONVOLVE_BATCH_NONE;
param.inHasNoOffset = kind!=CONVOLVE_BATCH_SAME;
switch(baseDim) {
case 1: convolve_1d<T, aT, expand>(param, out, signal, filt); break;
case 2: convolve_2d<T, aT, expand>(param, out, signal, filt); break;
case 3: convolve_3d<T, aT, expand>(param, out, signal, filt); break;
}
POST_LAUNCH_CHECK();
}
#define INSTANTIATE(T, aT) \
template void convolve_nd<T, aT, 1, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, aT, 1, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, aT, 2, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, aT, 2, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, aT, 3, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, aT, 3, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\
INSTANTIATE(cdouble, cdouble)
INSTANTIATE(cfloat , cfloat)
INSTANTIATE(double , double)
INSTANTIATE(float , float)
INSTANTIATE(uint , float)
INSTANTIATE(int , float)
INSTANTIATE(uchar , float)
INSTANTIATE(char , float)
}
}
|
40621bac5a7759ff06b8352cbd1dd9d0334e8561.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
//#define PRUNE_UPDATE
//#define PRUNE
#define TBLOCK_SIZE 128
inline int compare(const void*a, const void*b)
{
return *(int *)b - *(int *)a;
}
__global__ void prune_weights_kernel( float* d_weights, const float* __restrict__ d_prune_index, const int N )
{
const int tidx = blockIdx.x * blockDim.x + threadIdx.x;
if( tidx < N ) d_weights[tidx] *= __ldg(d_prune_index+tidx);
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += abs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, input, n, size, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_weights_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, weights, n, size, binary);
check_error(hipPeekAtLastError());
}
//code by lh
//kernel for PRUNE_UPDATE
__global__ void mean_sq_gpu(const float * __restrict__ weights_gpu, float *weights_mean_gpu,float *weights_sq_gpu,const int N,float *d_prune_index)
{
const int tidx = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ float SMEM_FABS[128];
__shared__ float SMEM_SQ[128];
float weights_per_thread = 0;
float temp = 0;
if(tidx<N) {
weights_per_thread = __ldg(weights_gpu+tidx); //FOR FABS MEAN
temp = weights_per_thread * weights_per_thread;//FOR SQ
d_prune_index[tidx] = 1.0f;
}
SMEM_FABS[threadIdx.x] = fabs(weights_per_thread);
SMEM_SQ[threadIdx.x] = temp;
__syncthreads();
float mySum_FABS = fabs(weights_per_thread);
float mySum_SQ = temp;
if(threadIdx.x<64) {
SMEM_FABS[threadIdx.x] = mySum_FABS = mySum_FABS + SMEM_FABS[threadIdx.x+64];
SMEM_SQ[threadIdx.x] = mySum_SQ = mySum_SQ + SMEM_SQ[threadIdx.x+64];
}
__syncthreads();
if(threadIdx.x<32) {
SMEM_FABS[threadIdx.x] = mySum_FABS = mySum_FABS + SMEM_FABS[threadIdx.x+32];
SMEM_SQ[threadIdx.x] = mySum_SQ = mySum_SQ + SMEM_SQ[threadIdx.x+32];
}
mySum_FABS+=__shfl_down(mySum_FABS,16);
mySum_SQ +=__shfl_down(mySum_SQ,16);
mySum_FABS+=__shfl_down(mySum_FABS,8);
mySum_SQ +=__shfl_down(mySum_SQ,8);
mySum_FABS+=__shfl_down(mySum_FABS,4);
mySum_SQ +=__shfl_down(mySum_SQ,4);
mySum_FABS+=__shfl_down(mySum_FABS,2);
mySum_SQ +=__shfl_down(mySum_SQ,2);
mySum_FABS+=__shfl_down(mySum_FABS,1);
mySum_SQ +=__shfl_down(mySum_SQ,1);
if(threadIdx.x == 0){
atomicAdd(weights_mean_gpu,mySum_FABS/N);
atomicAdd(weights_sq_gpu,mySum_SQ);
}
}
__global__ void forward_prune( float* weights_gpu_tmp, const float * __restrict__ weights_gpu, float* d_prune_index,
const int N, const float rate, const float weights_mean_gpu, const float weights_std_gpu )
{
const int tidx = blockDim.x*blockIdx.x + threadIdx.x;
if(tidx<N)
{
const float MAX_ = max(weights_mean_gpu+rate*weights_std_gpu,0.f);
float d_data = __ldg(weights_gpu + tidx);
if(d_prune_index[tidx] == 1.f && d_data <= 0.9f*MAX_){
d_prune_index[tidx] = 0.f;
weights_gpu_tmp[tidx] = 0.f;
}
else if(d_prune_index[tidx] == 0.f && d_data > 1.1f*MAX_){
d_prune_index[tidx] = 1.f;
weights_gpu_tmp[tidx] = d_data;
}
}
}
//coded by linhao for darknet prune
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state,int n)
{
//printf("enter_forward_gpu\n");
//printf("flag = %d\n", *(l.flag));
//coded by linhao
// #ifdef PRUNE
// if(l.size > 1 && *(l.flag)==1)
// {
// // open
// const float ratio = 0.3; //PRUNE 70%
// const int num_weights = l.n*l.c*l.size*l.size;
// const int index_threshold = (int)(num_weights * ratio);
// float *mutable_weights = l.weights;
// float *fabs_weights = NULL;
// fabs_weights = (float*)malloc(num_weights*sizeof(float));
// for(int i = 0;i<num_weights;++i) fabs_weights[i] = fabs(mutable_weights[i]);
// //sort for weights for each layer, from large to small
// qsort(fabs_weights, num_weights, sizeof(float), compare);
// if(index_threshold >= 1){
// float threshold_weight = fabs_weights[index_threshold - 1];
// for(int i = 0;i<num_weights;++i){
// if(mutable_weights[i]>= threshold_weight || mutable_weights[i] <= -threshold_weight){
// (l.h_prune_index)[i] = 1.f;
// }
// else{
// mutable_weights[i] = 0.f;
// }
// }
// }
// else{
// for(int i =0;i<num_weights;++i) (l.h_prune_index)[i] = (float)1.0f;
// }
// check_error(hipMemcpy(l.d_prune_index, l.h_prune_index, sizeof(float)*num_weights, hipMemcpyHostToDevice));
// printf("prune_over\n");
// if(fabs_weights){
// free(fabs_weights); fabs_weights = NULL;
// }
// hipMemcpy(l.weights_gpu, l.weights, sizeof(float)*l.size*l.size*l.n*l.c, hipMemcpyHostToDevice);
// *(l.flag) = 0;
// }
// #endif
//coded by linhao August 22
// #ifdef PRUNE_UPDATE
// const float rate = 1.8f;
// const int weights_num = l.n*l.c*l.size*l.size;
// //mx: std use managed memory
// if(l.size == 3 && *(l.flag) == 1){
// hipMemcpy(l.weights_gpu_temp, l.weights_gpu,sizeof(float)*weights_num, hipMemcpyDeviceToDevice);
// mean_sq_gpu<<<(weights_num+TBLOCK_SIZE-1)/TBLOCK_SIZE,TBLOCK_SIZE>>>(l.weights_gpu,l.weights_mean_gpu,l.weights_sq_gpu,weights_num,l.d_prune_index);
// //sqrtf(std / N - mu * mu)
// hipDeviceSynchronize();
// *(l.weights_sq_gpu) = sqrtf(*(l.weights_sq_gpu)/weights_num - (*(l.weights_mean_gpu)) * (*(l.weights_mean_gpu)));
// *(l.flag) = 0;
// }
// const float random = rand() * 1.f / (float)(RAND_MAX);
// const float threshold = 1.f / (1.f + 0.0001f * (*(l.iters)));
// if(l.size == 3 /*&& *(l.iters)<l.local_iter*/ && threshold >= random ){
// forward_prune<<<(weights_num+TBLOCK_SIZE-1)/TBLOCK_SIZE,TBLOCK_SIZE>>>(l.weights_gpu_temp,l.weights_gpu,l.d_prune_index,weights_num,rate,*(l.weights_mean_gpu),*(l.weights_sq_gpu));
// }
// (*(l.iters)) += 1;
// #endif
int bottom_size = l.batch*l.c*l.w*l.h;
int top_size = l.batch*l.out_c*l.out_w*l.out_h;
if(l.bottom_name == "data") {
hipMemcpy(l.bottom_data_gpu,state.input,sizeof(bottom_size)*float,hipMemcpyDeviceToDevice);
}
else{
for(int i =0;i<state.net.n;i++){
if(state.net.layer[i].name == l.bottom_name) hipMemcpy(l.bottom_data_gpu, state.net.layer[i].top_data, sizeof(top_size), hipMemcpyDeviceToDevice);
}
}
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(l.bottom_data_gpu, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
l.bottom_data_gpu = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1;
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
l.bottom_data_gpu,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#else
int i;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
im2col_ongpu(l.bottom_data_gpu + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
//coded by linhao August 22
// #ifdef PRUNE_UPDATE
// float* a = NULL;
// if(l.size > 1 ){a = l.weights_gpu_temp;}
// else{a = l.weights_gpu;}
// #endif
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n);
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
return;
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state,int n)
{
//l.bottom_dif,l.top_dif:need add this value to layer
//l.top_dif:l.delta l.bottom_dif
//constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
//int bottom_size = l.batch*l.c*l.w*l.h;
//int top_size = l.batch*l.out_c*l.out_w*l.out_h;
//if(l.bottom_name == "data") {
//hipMemcpy(l.bottom_data_gpu,state.input,sizeof(bottom_size)*float,hipMemcpyDeviceToDevice);
//}
//else{
for(int i =0;i<state.net.n;i++){
if(state.net.layer[i].name == l.top_name) hipMemcpy(l.delta_gpu, state.net.layer[i].delta_gpu, sizeof(top_size), hipMemcpyDeviceToDevice);
}
//}
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
} else {
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
}
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#else
int m = l.n;
int n = l.size*l.size*l.c;
int k = l.out_w*l.out_h;
for(int i = 0; i < l.batch; ++i)
{
float * a = l.delta_gpu;
float * b = state.workspace;
float * c = l.weight_updates_gpu;
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
//float * a = l.weights_gpu; // weights
//coded by linhao August 22
float* a = NULL;
if(l.size > 1){a = l.weights_gpu_temp;}
else{a = l.weights_gpu;}
float * b = l.delta_gpu; // top_diff
float * c = state.workspace; // bottom_dif
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
if(l.binary || l.xnor) {
swap_binary(&l);
}
if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
//coded by linhao for prune
// #ifdef PRUNE
// if( 3 == l.size)
// {
// const int count = l.n*l.c*l.size*l.size;
// prune_weights_kernel<<<(count + TBLOCK_SIZE - 1)/TBLOCK_SIZE, TBLOCK_SIZE>>>(l.weight_updates_gpu, l.d_prune_index, count);
// }
// #endif
#endif
return;
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
}
| 40621bac5a7759ff06b8352cbd1dd9d0334e8561.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
//#define PRUNE_UPDATE
//#define PRUNE
#define TBLOCK_SIZE 128
inline int compare(const void*a, const void*b)
{
return *(int *)b - *(int *)a;
}
__global__ void prune_weights_kernel( float* d_weights, const float* __restrict__ d_prune_index, const int N )
{
const int tidx = blockIdx.x * blockDim.x + threadIdx.x;
if( tidx < N ) d_weights[tidx] *= __ldg(d_prune_index+tidx);
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += abs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
binarize_input_kernel<<<cuda_gridsize(size), BLOCK>>>(input, n, size, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel<<<cuda_gridsize(n), BLOCK>>>(weights, n, size, binary);
check_error(cudaPeekAtLastError());
}
//code by lh
//kernel for PRUNE_UPDATE
__global__ void mean_sq_gpu(const float * __restrict__ weights_gpu, float *weights_mean_gpu,float *weights_sq_gpu,const int N,float *d_prune_index)
{
const int tidx = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ float SMEM_FABS[128];
__shared__ float SMEM_SQ[128];
float weights_per_thread = 0;
float temp = 0;
if(tidx<N) {
weights_per_thread = __ldg(weights_gpu+tidx); //FOR FABS MEAN
temp = weights_per_thread * weights_per_thread;//FOR SQ
d_prune_index[tidx] = 1.0f;
}
SMEM_FABS[threadIdx.x] = fabs(weights_per_thread);
SMEM_SQ[threadIdx.x] = temp;
__syncthreads();
float mySum_FABS = fabs(weights_per_thread);
float mySum_SQ = temp;
if(threadIdx.x<64) {
SMEM_FABS[threadIdx.x] = mySum_FABS = mySum_FABS + SMEM_FABS[threadIdx.x+64];
SMEM_SQ[threadIdx.x] = mySum_SQ = mySum_SQ + SMEM_SQ[threadIdx.x+64];
}
__syncthreads();
if(threadIdx.x<32) {
SMEM_FABS[threadIdx.x] = mySum_FABS = mySum_FABS + SMEM_FABS[threadIdx.x+32];
SMEM_SQ[threadIdx.x] = mySum_SQ = mySum_SQ + SMEM_SQ[threadIdx.x+32];
}
mySum_FABS+=__shfl_down(mySum_FABS,16);
mySum_SQ +=__shfl_down(mySum_SQ,16);
mySum_FABS+=__shfl_down(mySum_FABS,8);
mySum_SQ +=__shfl_down(mySum_SQ,8);
mySum_FABS+=__shfl_down(mySum_FABS,4);
mySum_SQ +=__shfl_down(mySum_SQ,4);
mySum_FABS+=__shfl_down(mySum_FABS,2);
mySum_SQ +=__shfl_down(mySum_SQ,2);
mySum_FABS+=__shfl_down(mySum_FABS,1);
mySum_SQ +=__shfl_down(mySum_SQ,1);
if(threadIdx.x == 0){
atomicAdd(weights_mean_gpu,mySum_FABS/N);
atomicAdd(weights_sq_gpu,mySum_SQ);
}
}
__global__ void forward_prune( float* weights_gpu_tmp, const float * __restrict__ weights_gpu, float* d_prune_index,
const int N, const float rate, const float weights_mean_gpu, const float weights_std_gpu )
{
const int tidx = blockDim.x*blockIdx.x + threadIdx.x;
if(tidx<N)
{
const float MAX_ = max(weights_mean_gpu+rate*weights_std_gpu,0.f);
float d_data = __ldg(weights_gpu + tidx);
if(d_prune_index[tidx] == 1.f && d_data <= 0.9f*MAX_){
d_prune_index[tidx] = 0.f;
weights_gpu_tmp[tidx] = 0.f;
}
else if(d_prune_index[tidx] == 0.f && d_data > 1.1f*MAX_){
d_prune_index[tidx] = 1.f;
weights_gpu_tmp[tidx] = d_data;
}
}
}
//coded by linhao for darknet prune
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state,int n)
{
//printf("enter_forward_gpu\n");
//printf("flag = %d\n", *(l.flag));
//coded by linhao
// #ifdef PRUNE
// if(l.size > 1 && *(l.flag)==1)
// {
// // open
// const float ratio = 0.3; //PRUNE 70%
// const int num_weights = l.n*l.c*l.size*l.size;
// const int index_threshold = (int)(num_weights * ratio);
// float *mutable_weights = l.weights;
// float *fabs_weights = NULL;
// fabs_weights = (float*)malloc(num_weights*sizeof(float));
// for(int i = 0;i<num_weights;++i) fabs_weights[i] = fabs(mutable_weights[i]);
// //sort for weights for each layer, from large to small
// qsort(fabs_weights, num_weights, sizeof(float), compare);
// if(index_threshold >= 1){
// float threshold_weight = fabs_weights[index_threshold - 1];
// for(int i = 0;i<num_weights;++i){
// if(mutable_weights[i]>= threshold_weight || mutable_weights[i] <= -threshold_weight){
// (l.h_prune_index)[i] = 1.f;
// }
// else{
// mutable_weights[i] = 0.f;
// }
// }
// }
// else{
// for(int i =0;i<num_weights;++i) (l.h_prune_index)[i] = (float)1.0f;
// }
// check_error(cudaMemcpy(l.d_prune_index, l.h_prune_index, sizeof(float)*num_weights, cudaMemcpyHostToDevice));
// printf("prune_over\n");
// if(fabs_weights){
// free(fabs_weights); fabs_weights = NULL;
// }
// cudaMemcpy(l.weights_gpu, l.weights, sizeof(float)*l.size*l.size*l.n*l.c, cudaMemcpyHostToDevice);
// *(l.flag) = 0;
// }
// #endif
//coded by linhao August 22
// #ifdef PRUNE_UPDATE
// const float rate = 1.8f;
// const int weights_num = l.n*l.c*l.size*l.size;
// //mx: std use managed memory
// if(l.size == 3 && *(l.flag) == 1){
// cudaMemcpy(l.weights_gpu_temp, l.weights_gpu,sizeof(float)*weights_num, cudaMemcpyDeviceToDevice);
// mean_sq_gpu<<<(weights_num+TBLOCK_SIZE-1)/TBLOCK_SIZE,TBLOCK_SIZE>>>(l.weights_gpu,l.weights_mean_gpu,l.weights_sq_gpu,weights_num,l.d_prune_index);
// //sqrtf(std / N - mu * mu)
// cudaDeviceSynchronize();
// *(l.weights_sq_gpu) = sqrtf(*(l.weights_sq_gpu)/weights_num - (*(l.weights_mean_gpu)) * (*(l.weights_mean_gpu)));
// *(l.flag) = 0;
// }
// const float random = rand() * 1.f / (float)(RAND_MAX);
// const float threshold = 1.f / (1.f + 0.0001f * (*(l.iters)));
// if(l.size == 3 /*&& *(l.iters)<l.local_iter*/ && threshold >= random ){
// forward_prune<<<(weights_num+TBLOCK_SIZE-1)/TBLOCK_SIZE,TBLOCK_SIZE>>>(l.weights_gpu_temp,l.weights_gpu,l.d_prune_index,weights_num,rate,*(l.weights_mean_gpu),*(l.weights_sq_gpu));
// }
// (*(l.iters)) += 1;
// #endif
int bottom_size = l.batch*l.c*l.w*l.h;
int top_size = l.batch*l.out_c*l.out_w*l.out_h;
if(l.bottom_name == "data") {
cudaMemcpy(l.bottom_data_gpu,state.input,sizeof(bottom_size)*float,cudaMemcpyDeviceToDevice);
}
else{
for(int i =0;i<state.net.n;i++){
if(state.net.layer[i].name == l.bottom_name) cudaMemcpy(l.bottom_data_gpu, state.net.layer[i].top_data, sizeof(top_size), cudaMemcpyDeviceToDevice);
}
}
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(l.bottom_data_gpu, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
l.bottom_data_gpu = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1;
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
l.bottom_data_gpu,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#else
int i;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
im2col_ongpu(l.bottom_data_gpu + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
//coded by linhao August 22
// #ifdef PRUNE_UPDATE
// float* a = NULL;
// if(l.size > 1 ){a = l.weights_gpu_temp;}
// else{a = l.weights_gpu;}
// #endif
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n);
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
return;
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state,int n)
{
//l.bottom_dif,l.top_dif:need add this value to layer
//l.top_dif:l.delta l.bottom_dif
//constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
//int bottom_size = l.batch*l.c*l.w*l.h;
//int top_size = l.batch*l.out_c*l.out_w*l.out_h;
//if(l.bottom_name == "data") {
//cudaMemcpy(l.bottom_data_gpu,state.input,sizeof(bottom_size)*float,cudaMemcpyDeviceToDevice);
//}
//else{
for(int i =0;i<state.net.n;i++){
if(state.net.layer[i].name == l.top_name) cudaMemcpy(l.delta_gpu, state.net.layer[i].delta_gpu, sizeof(top_size), cudaMemcpyDeviceToDevice);
}
//}
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
} else {
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
}
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#else
int m = l.n;
int n = l.size*l.size*l.c;
int k = l.out_w*l.out_h;
for(int i = 0; i < l.batch; ++i)
{
float * a = l.delta_gpu;
float * b = state.workspace;
float * c = l.weight_updates_gpu;
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
//float * a = l.weights_gpu; // weights
//coded by linhao August 22
float* a = NULL;
if(l.size > 1){a = l.weights_gpu_temp;}
else{a = l.weights_gpu;}
float * b = l.delta_gpu; // top_diff
float * c = state.workspace; // bottom_dif
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
if(l.binary || l.xnor) {
swap_binary(&l);
}
if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
//coded by linhao for prune
// #ifdef PRUNE
// if( 3 == l.size)
// {
// const int count = l.n*l.c*l.size*l.size;
// prune_weights_kernel<<<(count + TBLOCK_SIZE - 1)/TBLOCK_SIZE, TBLOCK_SIZE>>>(l.weight_updates_gpu, l.d_prune_index, count);
// }
// #endif
#endif
return;
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
}
|
93fea0a7a64f13d4856b4d8a510ba010108e3cb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
\file lanczos.cu
\brief Controller code for general Lanczos diagonalization
*/
// Katharine Hyatt
// A set of functions to implement the Lanczos method for a generic Hamiltonian
// Based on the codes Lanczos_07.cpp and Lanczos07.h by Roger Melko
//-------------------------------------------------------------------------------
#include"lanczos.h"
/*Function lanczos: takes a hermitian matrix H, tridiagonalizes it, and finds the n smallest eigenvalues - this version only returns eigenvalues, not
eigenvectors.
---------------------------------------------------------------------------------------------------------------------------------------------------
Input: howMany, the number of Hamiltonians to process
numElem - the number of nonzero elements per matrix
Hamiltonian - an array of Hamiltonians, each element being a custom struct containing the rows, cols, and vals in COO format as well as the dimensions
maxIter, the starting number of iterations we'll try
numEig, the number of eigenvalues we're interested in seeing
convReq, the convergence we'd like to see
---------------------------------------------------------------------------------------------------------------------------------------------------
Output: h_ordered, the array of the numEig smallest eigenvalues, ordered from smallest to largest
---------------------------------------------------------------------------------------------------------------------------------------------------
*/
__host__ void lanczos(const int howMany, const int* numElem, d_hamiltonian*& Hamiltonian, double**& groundstates, double**& eigenvalues, int maxIter, const int numEig, const double convReq)
{
//----------Initializing CUBLAS and CUSPARSE libraries as well as storage on GPU----------------
int* dim = (int*)malloc(howMany*sizeof(int));
for(int i = 0; i < howMany; i++)
{
dim[i] = Hamiltonian[i].sectorDim;
}
/*!
First it is necessary to create handles, streams, and to initialize the two CUDA libraries which will be used:
\verbatim
*/
hipStream_t stream[howMany];
hipError_t status[howMany];
hipblasStatus_t cublasStatus[howMany];
hipblasHandle_t linAlgHandle;
status[0] = hipPeekAtLastError();
if (status[0] != hipSuccess)
{
cout<<"Error before lanczos: "<<hipGetErrorString(status[0])<<endl;
}
cublasStatus[0] = hipblasCreate(&linAlgHandle);
if (cublasStatus[0] != HIPBLAS_STATUS_SUCCESS)
{
std::cout<<"Initializing CUBLAS failed! Error: "<<cublasStatus[0]<<std::endl;
}
hipsparseHandle_t sparseHandle;
hipsparseStatus_t cusparseStatus[howMany];
cusparseStatus[0] = hipsparseCreate(&sparseHandle);
if (cusparseStatus[0] != HIPSPARSE_STATUS_SUCCESS)
{
std::cout<<"Failed to initialize CUSPARSE! Error: "<<cusparseStatus[0]<<std::endl;
}
/*!
\endverbatim
The function also transforms the Hamiltonian into CSR format so that CUSPARSE can use it for matrix-vector multiplications.
\verbatim
*/
hipsparseMatDescr_t H_descr[howMany];
for(int i = 0; i<howMany; i++)
{
cusparseStatus[i] = hipsparseCreateMatDescr(&H_descr[i]);
if (cusparseStatus[i] != HIPSPARSE_STATUS_SUCCESS)
{
std::cout<<"Error creating matrix description: "<<cusparseStatus[i]<<std::endl;
}
cusparseStatus[i] = hipsparseSetMatType(H_descr[i], HIPSPARSE_MATRIX_TYPE_GENERAL);
if (cusparseStatus[i] != HIPSPARSE_STATUS_SUCCESS)
{
std::cout<<"Error setting matrix type: "<<cusparseStatus[i]<<std::endl;
}
cusparseStatus[i] = hipsparseSetMatIndexBase(H_descr[i], HIPSPARSE_INDEX_BASE_ZERO);
if (cusparseStatus[i] != HIPSPARSE_STATUS_SUCCESS)
{
std::cout<<"Error setting matrix index base: "<<cusparseStatus[i]<<std::endl;
}
}
int** d_H_rowPtrs;
d_H_rowPtrs = (int**)malloc(howMany*sizeof(int*));
for(int i = 0; i < howMany; i++)
{
status[i] = hipStreamCreate(&stream[i]);
if (status[i] != hipSuccess)
{
std::cout<<"Error creating streams: "<<hipGetErrorString(status[i])<<std::endl;
}
status[i] = hipMalloc(&d_H_rowPtrs[i], (dim[i] + 1)*sizeof(int));
if (status[i] != hipSuccess)
{
std::cout<<"Error allocating d_H_rowPtrs: "<<hipGetErrorString(status[i])<<std::endl;
}
}
//---------------Converting from COO to CSR format for Hamiltonians----------------
//cusparseHybMat_t hyb_Ham[howMany];
for(int i = 0; i < howMany; i++)
{
/*cusparseStatus[i] = cusparseCreateHybMat(&hyb_Ham[i]);
if (cusparseStatus[i] != cusparseStatus_SUCCESS)
{
std::cout<<"Error creating HYB matrix: "<<cusparseStatus[i]<<std::endl;
}
cout<<"Done creating HYB matrices"<<endl;*/
cusparseStatus[i] = hipsparseSetStream(sparseHandle, stream[i]);
if (cusparseStatus[i] != HIPSPARSE_STATUS_SUCCESS)
{
std::cout<<"Error switching streams: "<<cusparseStatus[i]<<std::endl;
}
status[i] = hipPeekAtLastError();
if (status[i] != hipSuccess)
{
std::cout<<"Error synchronizing stream: "<<hipGetErrorString(status[i])<<std::endl;
}
cusparseStatus[i] = hipsparseXcoo2csr(sparseHandle, Hamiltonian[i].rows, numElem[i], dim[i], d_H_rowPtrs[i], HIPSPARSE_INDEX_BASE_ZERO);
if (cusparseStatus[i] != HIPSPARSE_STATUS_SUCCESS)
{
std::cout<<"Error converting to CSR: "<<cusparseStatus[i]<<std::endl;
}
status[i] = hipPeekAtLastError();
if (status[i] != hipSuccess)
{
std::cout<<"Error synchronizing stream: "<<hipGetErrorString(status[i])<<std::endl;
}
/*cusparseStatus[i] = cusparseDcsr2hyb(sparseHandle, dim[i], dim[i], H_descr[i], Hamiltonian[i].vals, d_H_rowPtrs[i], Hamiltonian[i].cols, hyb_Ham[i], 0, CUSPARSE_HYB_PARTITION_AUTO);
if (cusparseStatus[i] != cusparseStatus_SUCCESS)
{
std::cout<<"Error converting to HYB: "<<cusparseStatus[i]<<std::endl;
}*/
}
/*!
\endverbatim
status[0] = hipPeekAtLastError();
if (status[0] != hipSuccess)
{
std::cout<<"Error before thread sync: "<<hipGetErrorString(status[0])<<std::endl;
}
*/
//----------------Create arrays to hold current Lanczos vectors----------
vector< vector<double> > h_a(howMany);
vector< vector<double> > h_b(howMany);
//Making the "random" starting vector
/*!
The function then sets up Lanczos diagonalization by initializing a random starting vector on the CPU, creating storage for the Lanczos vectors on the GPU, and copying this starting vector across.
\verbatim
*/
double** v0 = (double**)malloc(howMany*sizeof(double*));
double** v1 = (double**)malloc(howMany*sizeof(double*));
double** v2 = (double**)malloc(howMany*sizeof(double*));
double*** lanczosStore = (double***)malloc(howMany*sizeof(double**));
double** host_v0 = (double**)malloc(howMany*sizeof(double*));
for(int i = 0; i < howMany; i++)
{
status[i] = hipMalloc(&v0[i], dim[i]*sizeof(double));
if (status[i] != hipSuccess)
{
std::cout<<"Error creating storage for v0 on GPU: "<<hipGetErrorString(status[i])<<std::endl;
}
status[i] = hipMalloc(&v1[i], dim[i]*sizeof(double));
if (status[i] != hipSuccess)
{
std::cout<<"Error creating storage for v1 on GPU: "<<hipGetErrorString(status[i])<<std::endl;
}
status[i] = hipMalloc(&v2[i], dim[i]*sizeof(double));
if (status[i] != hipSuccess)
{
std::cout<<"Error creating storage for v2 on GPU: "<<hipGetErrorString(status[i])<<std::endl;
}
lanczosStore[i] = (double**)malloc(maxIter*sizeof(double*));
host_v0[i] = (double*)malloc(dim[i]*sizeof(double));
for(int j = 0; j<dim[i]; j++)
{
host_v0[i][j] = 0.;
if (j%4 == 0) host_v0[i][j] = 1. ;
else if (j%5 == 0) host_v0[i][j] = -2.;
else if (j%7 == 0) host_v0[i][j] = 3.;
else if (j%9 == 0) host_v0[i][j] = -4.;
}
status[i] = hipMalloc(&lanczosStore[i][0], dim[i]*sizeof(double));
if (status[i] != hipSuccess)
{
std::cout<<"Error creating storage for v0 in lanczosStore: "<<hipGetErrorString(status[i])<<std::endl;
}
status[i] = hipMemcpyAsync(v0[i], host_v0[i], dim[i]*sizeof(double), hipMemcpyHostToDevice, stream[i]);
if (status[i] != hipSuccess)
{
std::cout<<"Error copying v0 to the device: "<<hipGetErrorString(status[i])<<std::endl;
}
}
/*!
\endverbatim
First, storage variables are created to hold the results of the CUBLAS functions.
\verbatim
*/
//--------------Create dummy variables for CUBLAS functions----------------
double* normTemp = (double*)malloc(howMany*sizeof(double));
double* alpha = (double*)malloc(howMany*sizeof(double));
double* beta = (double*)malloc(howMany*sizeof(double));
double* dotTemp = (double*)malloc(howMany*sizeof(double));
double* axpyTemp = (double*)malloc(howMany*sizeof(double));
double** y = (double**)malloc(howMany*sizeof(double*));
/*!
\endverbatim
*/
//--------------Generate first Lanczos vector--------------------------
for(int i = 0; i < howMany; i++)
{
hipblasSetStream(linAlgHandle, stream[i]);
hipsparseSetStream(sparseHandle, stream[i]);
/*!
Then the initial multiplication to generate the first Lanczos vector is performed.
\verbatim
*/
cublasStatus[i] = hipblasDnrm2(linAlgHandle, dim[i], v0[i], 1, &normTemp[i]);
normTemp[i] = 1./normTemp[i];
cublasStatus[i] = hipblasDscal(linAlgHandle, dim[i], &normTemp[i], v0[i], 1);
alpha[i] = 1.;
beta[i] = 0.;
hipMemcpyAsync(lanczosStore[i][0], v0[i], dim[i]*sizeof(double), hipMemcpyDeviceToDevice, stream[i]);
//-----------Apply Hamiltonian to V0--------------------
cusparseStatus[i] = hipsparseDcsrmv(sparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, dim[i], dim[i], numElem[i], &alpha[i], H_descr[i], Hamiltonian[i].vals, d_H_rowPtrs[i], Hamiltonian[i].cols, v0[i], &beta[i], v1[i]); // the Hamiltonian is applied here
/*!
\endverbatim
*/
if (cusparseStatus[i] != HIPSPARSE_STATUS_SUCCESS)
{
std::cout<<"Getting V1 = H*V0 failed! Error: ";
std::cout<<cusparseStatus[i]<<std::endl;
}
//hipStreamSynchronize(stream[i]);
if (hipPeekAtLastError() != 0 )
{
std::cout<<"Getting V1 = H*V0 failed! Error: ";
std::cout<<hipGetErrorString(hipPeekAtLastError())<<std::endl;
}
}
for(int i = 0; i < howMany; i++)
{
hipblasSetStream(linAlgHandle, stream[i]);
dotTemp[i] = 0.;
cublasStatus[i] = hipblasDdot(linAlgHandle, dim[i], v1[i], 1, v0[i], 1, &dotTemp[i]);
h_a[i].push_back(dotTemp[i]);
h_b[i].push_back(0.);
if (cublasStatus[i] != HIPBLAS_STATUS_SUCCESS)
{
std::cout<<"Getting d_a[0] failed! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
//hipStreamSynchronize(stream[i]);
if (cublasStatus[i] != HIPBLAS_STATUS_SUCCESS)
{
std::cout<<"Getting h_a[0] failed! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
if (status[i] != hipSuccess)
{
std::cout<<"Memory allocation of y dummy vector failed! Error:";
std::cout<<hipGetErrorString( status[i] )<<std::endl;
}
status[i] = hipMalloc(&y[i], dim[i]*sizeof(double));
/*!
The new vector must be rescaled and stored before Lanczos iteration can begin.
\verbatim
*/
cublasStatus[i] = hipblasDscal(linAlgHandle, dim[i], &beta[i], y[i], 1);
//hipStreamSynchronize(stream[i]);
axpyTemp[i] = -1*h_a[i][0];
cublasStatus[i] = hipblasDaxpy(linAlgHandle, 0, &axpyTemp[i], v0[i], 1, v1[i], 1);
//hipStreamSynchronize(stream[i]);
if (cublasStatus[i] != HIPBLAS_STATUS_SUCCESS)
{
std::cout<<"V1 = V1 - alpha*V0 failed! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
if (hipPeekAtLastError() != 0 )
{
std::cout<<"Getting V1 = V1 - a*V0 failed! Error: ";
std::cout<<hipGetErrorString(hipPeekAtLastError())<<std::endl;
}
//---------Normalize V1 and copy it to Lanczos storage-----------
normTemp[i] = 0.;
cublasStatus[i] = hipblasDnrm2(linAlgHandle, dim[i], v1[i], 1, &normTemp[i]); //this is slow for some reason
//hipStreamSynchronize(stream[i]);
if (cublasStatus[i] != HIPBLAS_STATUS_SUCCESS)
{
std::cout<<"Getting the norm of v1 failed! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
if (hipPeekAtLastError() != 0 )
{
std::cout<<"Getting nrm(V1) failed! Error: ";
std::cout<<hipGetErrorString(hipPeekAtLastError())<<std::endl;
}
}
double* gamma = (double*)malloc(howMany*sizeof(double));
for(int i = 0; i < howMany; i++)
{
hipblasSetStream(linAlgHandle, stream[i]);
h_b[i].push_back(normTemp[i]);
normTemp[i] = 1./normTemp[i];
gamma[i] = 1./h_b[i][1]; //alpha = 1/beta in v1 = v1 - alpha*v0
cublasStatus[i] = hipblasDscal(linAlgHandle, dim[i], &normTemp[i], v1[i], 1);
if (cublasStatus[i] != HIPBLAS_STATUS_SUCCESS)
{
std::cout<<"Normalizing v1 failed! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
if (hipPeekAtLastError() != 0 )
{
std::cout<<"Normalizing V1 failed! Error: ";
std::cout<<hipGetErrorString(hipPeekAtLastError())<<std::endl;
}
hipMalloc(&lanczosStore[i][1], dim[i]*sizeof(double));
hipMemcpyAsync(lanczosStore[i][1], v1[i], dim[i]*sizeof(double), hipMemcpyDeviceToDevice, stream[i]);
}
/*!
\endverbatim
*/
/*!
Storage space for the tridiagonal matrix is created and flags are initialized to track progress:
\verbatim
*/
double* gsEnergy = (double*)malloc(howMany*sizeof(double));
double* eigTemp = (double*)malloc(howMany*sizeof(double));
int* returned = (int*)malloc(howMany*sizeof(int));
int* iter = (int*)malloc(howMany*sizeof(int));
bool* doneFlag = (bool*)malloc(howMany*sizeof(bool));
double** h_H_eigen = (double**)malloc(howMany*sizeof(double*));
double** d_H_eigen = (double**)malloc(howMany*sizeof(double*));
double** h_diag = (double**)malloc(howMany*sizeof(double*));
double** h_offdia = (double**)malloc(howMany*sizeof(double*));
vector< vector < double > > h_ordered(howMany);
/*!
\endverbatim
*/
/*!
The flags and storage are initialized for the interations
\verbatim
*/
for(int i = 0; i<howMany; i++)
{
gsEnergy[i] = 1.;
eigTemp[i] = 0.;
iter[i] = 0;
doneFlag[i] = false;
h_ordered[i].resize(numEig, 0);
h_H_eigen[i] = (double*)malloc(maxIter*maxIter*sizeof(double));
hipMalloc(&d_H_eigen[i], maxIter*maxIter*sizeof(double));
h_diag[i] = (double*)malloc(h_a[i].size()*sizeof(double));
h_offdia[i] = (double*)malloc(h_b[i].size()*sizeof(double));
}
/*!
\endverbatim
*/
//---------Begin Lanczos iteration-----------------------------
bool allDone = false;
while( !allDone )
{
allDone = true;
for(int i = 0; i < howMany; i++)
{
hipblasSetStream(linAlgHandle, stream[i]);
hipsparseSetStream(sparseHandle, stream[i]);
hipStreamSynchronize(stream[i]);
/*!
If the current diagonalization is not complete, multiply H*V1 to get a new V2
\verbatim
*/
if (!doneFlag[i])
{
iter[i]++;
cusparseStatus[i] = hipsparseDcsrmv(sparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, dim[i], dim[i], numElem[i], &alpha[i], H_descr[i], Hamiltonian[i].vals, d_H_rowPtrs[i], Hamiltonian[i].cols, v1[i], &beta[i], v2[i]);
if( cusparseStatus[i] != 0)
{
cout<<"Error applying H to V1 in "<<iter[i]<<"th iteration"<<endl;
}
//cusparseStatus[i] = cusparseDhybmv(sparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &alpha[i], H_descr[i], hyb_Ham[i], v1[i], &beta[i], v2[i]); // the Hamiltonian is applied here, in this gross expression
}
/*!
\endverbatim
*/
}
for(int i = 0; i < howMany; i++)
{
hipblasSetStream(linAlgHandle, stream[i]);
if (!doneFlag[i])
{
if (cusparseStatus[i] != HIPSPARSE_STATUS_SUCCESS)
{
std::cout<<"Error applying the Hamiltonian in "<<iter[i]<<"th iteration!";
std::cout<<"Error: "<<cusparseStatus[i]<<std::endl;
}
cublasStatus[i] = hipblasDdot(linAlgHandle, dim[i], v1[i], 1, v2[i], 1, &dotTemp[i]);
//hipStreamSynchronize(stream[i]);
h_a[i].push_back(dotTemp[i]);
if (cublasStatus[i] != HIPBLAS_STATUS_SUCCESS)
{
std::cout<<"Error getting v1 * v2 in "<<iter[i]<<"th iteration! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
axpyTemp[i] = -1.*h_b[i][iter[i]];
cublasStatus[i] = hipblasDaxpy( linAlgHandle, dim[i], &axpyTemp[i], v0[i], 1, v2[i], 1);
if (cublasStatus[i] != HIPBLAS_STATUS_SUCCESS)
{
std::cout<<"Error getting (d_b/d_a)*v0 + v1 in "<<iter[i]<<"th iteration!";
std::cout<<"Error: "<<cublasStatus[i]<<std::endl;
}
}
}
//--------Find next set of elements in Lanczos Hamiltonian----
for(int i = 0; i < howMany; i++)
{
hipblasSetStream(linAlgHandle, stream[i]);
//hipStreamSynchronize(stream[i]);
if (!doneFlag[i])
{
/*!
Similarly to setting up V1, V2 must be rescaled
\verbatim
*/
axpyTemp[i] = -1.*h_a[i][iter[i]];
cublasStatus[i] = hipblasDaxpy( linAlgHandle, dim[i], &axpyTemp[i], v1[i], 1, v2[i], 1);
if (cublasStatus[i] != HIPBLAS_STATUS_SUCCESS)
{
std::cout<<"Error getting v2 + d_a*v1 in "<<iter[i]<<"th iteration! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
cublasStatus[i] = hipblasDnrm2( linAlgHandle, dim[i], v2[i], 1, &normTemp[i]);
if (cublasStatus[i] != HIPBLAS_STATUS_SUCCESS)
{
std::cout<<"Error getting norm of v2 in "<<iter[i]<<"th iteration! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
h_b[i].push_back(normTemp[i]);
gamma[i] = 1./normTemp[i];
/*!
\endverbatim
*/
cublasStatus[i] = hipblasDscal(linAlgHandle, dim[i], &gamma[i], v2[i], 1);
if (cublasStatus[i] != HIPBLAS_STATUS_SUCCESS)
{
std::cout<<"Error getting 1/d_b * v2 in "<<iter[i]<<"th iteration! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
}
}
for(int i = 0; i < howMany; i++)
{
hipblasSetStream(linAlgHandle, stream[i]);
//status[i] = hipStreamSynchronize(stream[i]);
if (status[i] != hipSuccess)
{
std::cout<<"Error syncing before copying v1 to v0: "<<hipGetErrorString(status[i])<<std::endl;
}
if (!doneFlag[i])
{
/*!
Reorthogonalization is performed on v2 to ensure that the excited states do not collapse into the groundstate
\verbatim
*/
for(int j = 0; j < iter[i] + 1; j++)
{
hipblasDdot(linAlgHandle, dim[i], v2[i], 1, lanczosStore[i][j], 1, &dotTemp[i]);
dotTemp[i] *= -1.;
hipblasDaxpy(linAlgHandle, dim[i], &dotTemp[i], lanczosStore[i][j], 1, v2[i], 1);
dotTemp[i] = 1. - dotTemp[i]*dotTemp[i];
hipblasDscal(linAlgHandle, dim[i], &dotTemp[i], v2[i], 1);
}
/*!
\endverbatim
The vectors are copied down one and stored to prepare for the next iteration
\verbatim
*/
status[i] = hipMemcpyAsync(v0[i], v1[i], dim[i]*sizeof(double), hipMemcpyDeviceToDevice, stream[i]);
if (status[i] != hipSuccess)
{
std::cout<<"Error copying v1 to v0: "<<hipGetErrorString(status[i])<<std::endl;
}
status[i] = hipMemcpyAsync(v1[i], v2[i], dim[i]*sizeof(double), hipMemcpyDeviceToDevice, stream[i]);
if (status[i] != hipSuccess)
{
std::cout<<"Error copying v2 to v1: "<<hipGetErrorString(status[i])<<std::endl;
}
status[i] = hipMalloc(&lanczosStore[i][iter[i] + 1], dim[i]*sizeof(double));
status[i] = hipMemcpyAsync(lanczosStore[i][iter[i] + 1], v2[i], dim[i]*sizeof(double), hipMemcpyDeviceToDevice, stream[i]);
/*!
\endverbatim
*/
}
}
for(int i = 0; i < howMany; i++)
{
if (!doneFlag[i] && iter[i] > 5)
{
//---Copy Lanczos matrix information for diagonalization-----
free(h_diag[i]);
free(h_offdia[i]);
h_diag[i] = (double*)malloc(h_a[i].size()*sizeof(double));
h_offdia[i] = (double*)malloc(h_b[i].size()*sizeof(double));
h_diag[i][0] = h_a[i][0];
for (int ii=1; ii<=iter[i]; ii++)
{
h_diag[i][ii] = h_a[i][ii];
h_offdia[i][ii] = h_b[i][ii];
h_offdia[i][ii-1] = h_offdia[i][ii];
}
h_offdia[i][iter[i]] = 0;
//hipStreamSynchronize(stream[i]);
//---------Diagonalize Lanczos matrix and check for convergence------------------
returned[i] = tqli(h_diag[i], h_offdia[i], iter[i] + 1, maxIter, h_H_eigen[i]);
status[i] = hipPeekAtLastError();
if( status[i] != hipSuccess)
{
cout<<"Error in identity! Error: "<<hipGetErrorString(status[i])<<endl;
}
//cout<<"Done tqli in "<<iter[i]<<"th iteration"<<endl;
hipMemcpyAsync(d_H_eigen[i], h_H_eigen[i], maxIter*maxIter*sizeof(double), hipMemcpyHostToDevice, stream[i]);
std::sort(h_diag[i], h_diag[i] + h_a[i].size());
for (int j = 0; j < numEig; j++)
{
h_ordered[i][j] = h_diag[i][j];
//cout<<h_ordered[i][j]<<" ";
}
//cout<<endl;
gsEnergy[i] = h_ordered[i][numEig - 1];
doneFlag[i] = (fabs(gsEnergy[i] - eigTemp[i]) < convReq);// && iter[i] > 10;// ? (iter[i] > 10) : false;
//doneFlag[i] = iter[i] == maxIter - 2;
eigTemp[i] = h_ordered[i][numEig - 1];
if (iter[i] == maxIter - 2) // have to use this or d_b will overflow
{
//this stuff here is used to resize the main arrays in the case that we aren't converging quickly enough
h_a[i].resize(2*maxIter);
h_b[i].resize(2*maxIter);
maxIter *= 2;
}
}
}
allDone = true;
for(int i = 0; i< howMany; i++)
{
allDone = (allDone && doneFlag[i]);
}
}
cout<<"Done finding the eigenvalues"<<endl;
//-------------Get groundstates------------------------------------------
for( int i = 0; i < howMany; i++)
{
//hipStreamSynchronize(stream[i]);
status[i] = hipMalloc(&groundstates[i], dim[i]*sizeof(double));
if( status[i] != hipSuccess)
{
cout<<"Error allocating space for eigenvectors: "<<hipGetErrorString(status[i]);
}
hipLaunchKernelGGL(( GetGroundstate), dim3(dim[i]/512 + 1), dim3(512), 0, stream[i], groundstates[i], lanczosStore[i], d_H_eigen[i], iter[i], dim[i]);
}
//--------------Free arrays to prevent memory leaks------------------------
for(int i = 0; i < howMany; i++)
{
for(int j = 0; j < numEig; j++)
{
eigenvalues[i][j] = h_ordered[i][j];
}
for(int j = 0; j < iter[i]; j++)
{
hipFree(lanczosStore[i][j]);
}
free(lanczosStore[i]);
hipFree(d_H_rowPtrs[i]);
hipFree(v0[i]);
hipFree(v1[i]);
hipFree(v2[i]);
hipFree(y[i]);
free(h_H_eigen[i]);
hipFree(d_H_eigen[i]);
free(host_v0[i]);
free(h_diag[i]);
free(h_offdia[i]);
//cusparseDestroyHybMat(hyb_Ham[i]);
}
free(gsEnergy);
free(eigTemp);
free(alpha);
free(beta);
free(returned);
free(iter);
free(doneFlag);
free(h_H_eigen);
free(d_H_eigen);
free(gamma);
free(y);
free(normTemp);
free(axpyTemp);
free(dotTemp);
free(host_v0);
free(v0);
free(v1);
free(v2);
free(h_diag);
free(h_offdia);
free(lanczosStore);
//free(dim);
free(d_H_rowPtrs);
cublasStatus[0] = hipblasDestroy(linAlgHandle);
//----------Output groundstate to file to check for correctness------
double* host_groundstate = (double*)malloc(dim[0]*sizeof(double));
std::ofstream fout;
fout.open("lanczos.log");
hipMemcpy(host_groundstate, groundstates[0], dim[0]*sizeof(double), hipMemcpyDeviceToHost);
cout<<"Outputting GS to file"<<endl;
for(int i = 0; i < dim[0] ; i++)
{
fout<<host_groundstate[i]<<std::endl;
}
fout.close();
free(host_groundstate);
free(dim);
if (cublasStatus[0] != HIPBLAS_STATUS_SUCCESS)
{
printf("CUBLAS failed to shut down properly! \n");
}
cusparseStatus[0] = hipsparseDestroy(sparseHandle);
if (cusparseStatus[0] != HIPSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE failed to release handle! \n");
}
/*if (iter == 1) {
std::ofstream fout;
fout.open("lanczos.log");
//fout<<normTemp<<std::endl;
fout<<std::endl;
//int* h_H_vals = (int*)malloc((dim+1)*sizeof(int));
hipMemcpy(host_v0, v2, dim*sizeof(hipDoubleComplex), hipMemcpyDeviceToHost);
for(int i = 0; i < dim ; i++){
fout<<host_v0[i].x<<std::endl;
}
fout.close();
}*/
}
// things left to do:
// write a thing (separate file) to call routines to find expectation values, should be faster on GPU
// make the tqli thing better!
int tqli(double* d, double* e, int n, int maxIter, double *z)
{
int m,l,iter,i,k;
double s,r,p,g,f,dd,c,b;
for (l=0; l<n; l++)
{
iter=0;
do
{
for (m=l; m<n-1; m++)
{
dd=fabs(d[m])+fabs(d[m+1]);
if (fabs(e[m])+dd == dd) break;
}
if (m!=l)
{
if (iter++ == 60)
{
std::cout <<"Too many iterations in tqli() \n";
return 0;
}
g=(d[l+1]-d[l])/(2.0*e[l]);
r=sqrt((g*g)+1.0);
g=d[m]-d[l]+e[l]/(g+SIGN(r,g));
s=c=1.0;
p=0.0;
for (i=m-1; i>=l; i--)
{
f=s*e[i];
b=c*e[i];
if (fabs(f) >= fabs(g))
{
c=g/f;
r=sqrt((c*c)+1.0);
e[i + 1]=f*r;
c *= (s=1.0/r);
}
else
{
s=f/g;
r=sqrt((s*s)+1.0);
e[i+1]=g*r;
s *= (c=1.0/r);
}
g=d[i+1]-p;
r=(d[i]-g)*s+2.0*c*b;
p=s*r;
d[i+1]=g+p;
g=c*r-b;
/*EVECTS*/
for (k=0; k<n; k++)
{
f=z[k * n + i+1];
z[k*n + i+1]=s*z[k*n + i]+c*f;
z[k*n + i ]=c*z[k*n+i]-s*f;
}
}
d[l]=d[l]-p;
e[l]=g;
e[m]=0.0;
}
}
while (m!=l);
}
return 1;
}
double pythag(double a, double b)
{
double absa, absb;
absa=fabs(a);
absb=fabs(b);
if (absa > absb) return absa*sqrt(1.0+(absb/absa)*(absb/absa));
else return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+(absa/absb)*(absa/absb)));
}
__global__ void GetGroundstate(double* groundstates, double** lanczosStore, double* H_eigen, int mat_dim, int vec_dim)
{
int element = blockIdx.x*blockDim.x + threadIdx.x;
if ( element < vec_dim )
{
groundstates[element] = H_eigen[0]*lanczosStore[0][element];
for (int lancIter = 1; lancIter < mat_dim; lancIter++)
{
groundstates[element] += H_eigen[lancIter]*lanczosStore[lancIter][element];
}
}
};
| 93fea0a7a64f13d4856b4d8a510ba010108e3cb0.cu | /*!
\file lanczos.cu
\brief Controller code for general Lanczos diagonalization
*/
// Katharine Hyatt
// A set of functions to implement the Lanczos method for a generic Hamiltonian
// Based on the codes Lanczos_07.cpp and Lanczos07.h by Roger Melko
//-------------------------------------------------------------------------------
#include"lanczos.h"
/*Function lanczos: takes a hermitian matrix H, tridiagonalizes it, and finds the n smallest eigenvalues - this version only returns eigenvalues, not
eigenvectors.
---------------------------------------------------------------------------------------------------------------------------------------------------
Input: howMany, the number of Hamiltonians to process
numElem - the number of nonzero elements per matrix
Hamiltonian - an array of Hamiltonians, each element being a custom struct containing the rows, cols, and vals in COO format as well as the dimensions
maxIter, the starting number of iterations we'll try
numEig, the number of eigenvalues we're interested in seeing
convReq, the convergence we'd like to see
---------------------------------------------------------------------------------------------------------------------------------------------------
Output: h_ordered, the array of the numEig smallest eigenvalues, ordered from smallest to largest
---------------------------------------------------------------------------------------------------------------------------------------------------
*/
__host__ void lanczos(const int howMany, const int* numElem, d_hamiltonian*& Hamiltonian, double**& groundstates, double**& eigenvalues, int maxIter, const int numEig, const double convReq)
{
//----------Initializing CUBLAS and CUSPARSE libraries as well as storage on GPU----------------
int* dim = (int*)malloc(howMany*sizeof(int));
for(int i = 0; i < howMany; i++)
{
dim[i] = Hamiltonian[i].sectorDim;
}
/*!
First it is necessary to create handles, streams, and to initialize the two CUDA libraries which will be used:
\verbatim
*/
cudaStream_t stream[howMany];
cudaError_t status[howMany];
cublasStatus_t cublasStatus[howMany];
cublasHandle_t linAlgHandle;
status[0] = cudaPeekAtLastError();
if (status[0] != cudaSuccess)
{
cout<<"Error before lanczos: "<<cudaGetErrorString(status[0])<<endl;
}
cublasStatus[0] = cublasCreate(&linAlgHandle);
if (cublasStatus[0] != CUBLAS_STATUS_SUCCESS)
{
std::cout<<"Initializing CUBLAS failed! Error: "<<cublasStatus[0]<<std::endl;
}
cusparseHandle_t sparseHandle;
cusparseStatus_t cusparseStatus[howMany];
cusparseStatus[0] = cusparseCreate(&sparseHandle);
if (cusparseStatus[0] != CUSPARSE_STATUS_SUCCESS)
{
std::cout<<"Failed to initialize CUSPARSE! Error: "<<cusparseStatus[0]<<std::endl;
}
/*!
\endverbatim
The function also transforms the Hamiltonian into CSR format so that CUSPARSE can use it for matrix-vector multiplications.
\verbatim
*/
cusparseMatDescr_t H_descr[howMany];
for(int i = 0; i<howMany; i++)
{
cusparseStatus[i] = cusparseCreateMatDescr(&H_descr[i]);
if (cusparseStatus[i] != CUSPARSE_STATUS_SUCCESS)
{
std::cout<<"Error creating matrix description: "<<cusparseStatus[i]<<std::endl;
}
cusparseStatus[i] = cusparseSetMatType(H_descr[i], CUSPARSE_MATRIX_TYPE_GENERAL);
if (cusparseStatus[i] != CUSPARSE_STATUS_SUCCESS)
{
std::cout<<"Error setting matrix type: "<<cusparseStatus[i]<<std::endl;
}
cusparseStatus[i] = cusparseSetMatIndexBase(H_descr[i], CUSPARSE_INDEX_BASE_ZERO);
if (cusparseStatus[i] != CUSPARSE_STATUS_SUCCESS)
{
std::cout<<"Error setting matrix index base: "<<cusparseStatus[i]<<std::endl;
}
}
int** d_H_rowPtrs;
d_H_rowPtrs = (int**)malloc(howMany*sizeof(int*));
for(int i = 0; i < howMany; i++)
{
status[i] = cudaStreamCreate(&stream[i]);
if (status[i] != cudaSuccess)
{
std::cout<<"Error creating streams: "<<cudaGetErrorString(status[i])<<std::endl;
}
status[i] = cudaMalloc(&d_H_rowPtrs[i], (dim[i] + 1)*sizeof(int));
if (status[i] != cudaSuccess)
{
std::cout<<"Error allocating d_H_rowPtrs: "<<cudaGetErrorString(status[i])<<std::endl;
}
}
//---------------Converting from COO to CSR format for Hamiltonians----------------
//cusparseHybMat_t hyb_Ham[howMany];
for(int i = 0; i < howMany; i++)
{
/*cusparseStatus[i] = cusparseCreateHybMat(&hyb_Ham[i]);
if (cusparseStatus[i] != cusparseStatus_SUCCESS)
{
std::cout<<"Error creating HYB matrix: "<<cusparseStatus[i]<<std::endl;
}
cout<<"Done creating HYB matrices"<<endl;*/
cusparseStatus[i] = cusparseSetStream(sparseHandle, stream[i]);
if (cusparseStatus[i] != CUSPARSE_STATUS_SUCCESS)
{
std::cout<<"Error switching streams: "<<cusparseStatus[i]<<std::endl;
}
status[i] = cudaPeekAtLastError();
if (status[i] != cudaSuccess)
{
std::cout<<"Error synchronizing stream: "<<cudaGetErrorString(status[i])<<std::endl;
}
cusparseStatus[i] = cusparseXcoo2csr(sparseHandle, Hamiltonian[i].rows, numElem[i], dim[i], d_H_rowPtrs[i], CUSPARSE_INDEX_BASE_ZERO);
if (cusparseStatus[i] != CUSPARSE_STATUS_SUCCESS)
{
std::cout<<"Error converting to CSR: "<<cusparseStatus[i]<<std::endl;
}
status[i] = cudaPeekAtLastError();
if (status[i] != cudaSuccess)
{
std::cout<<"Error synchronizing stream: "<<cudaGetErrorString(status[i])<<std::endl;
}
/*cusparseStatus[i] = cusparseDcsr2hyb(sparseHandle, dim[i], dim[i], H_descr[i], Hamiltonian[i].vals, d_H_rowPtrs[i], Hamiltonian[i].cols, hyb_Ham[i], 0, CUSPARSE_HYB_PARTITION_AUTO);
if (cusparseStatus[i] != cusparseStatus_SUCCESS)
{
std::cout<<"Error converting to HYB: "<<cusparseStatus[i]<<std::endl;
}*/
}
/*!
\endverbatim
status[0] = cudaPeekAtLastError();
if (status[0] != cudaSuccess)
{
std::cout<<"Error before thread sync: "<<cudaGetErrorString(status[0])<<std::endl;
}
*/
//----------------Create arrays to hold current Lanczos vectors----------
vector< vector<double> > h_a(howMany);
vector< vector<double> > h_b(howMany);
//Making the "random" starting vector
/*!
The function then sets up Lanczos diagonalization by initializing a random starting vector on the CPU, creating storage for the Lanczos vectors on the GPU, and copying this starting vector across.
\verbatim
*/
double** v0 = (double**)malloc(howMany*sizeof(double*));
double** v1 = (double**)malloc(howMany*sizeof(double*));
double** v2 = (double**)malloc(howMany*sizeof(double*));
double*** lanczosStore = (double***)malloc(howMany*sizeof(double**));
double** host_v0 = (double**)malloc(howMany*sizeof(double*));
for(int i = 0; i < howMany; i++)
{
status[i] = cudaMalloc(&v0[i], dim[i]*sizeof(double));
if (status[i] != cudaSuccess)
{
std::cout<<"Error creating storage for v0 on GPU: "<<cudaGetErrorString(status[i])<<std::endl;
}
status[i] = cudaMalloc(&v1[i], dim[i]*sizeof(double));
if (status[i] != cudaSuccess)
{
std::cout<<"Error creating storage for v1 on GPU: "<<cudaGetErrorString(status[i])<<std::endl;
}
status[i] = cudaMalloc(&v2[i], dim[i]*sizeof(double));
if (status[i] != cudaSuccess)
{
std::cout<<"Error creating storage for v2 on GPU: "<<cudaGetErrorString(status[i])<<std::endl;
}
lanczosStore[i] = (double**)malloc(maxIter*sizeof(double*));
host_v0[i] = (double*)malloc(dim[i]*sizeof(double));
for(int j = 0; j<dim[i]; j++)
{
host_v0[i][j] = 0.;
if (j%4 == 0) host_v0[i][j] = 1. ;
else if (j%5 == 0) host_v0[i][j] = -2.;
else if (j%7 == 0) host_v0[i][j] = 3.;
else if (j%9 == 0) host_v0[i][j] = -4.;
}
status[i] = cudaMalloc(&lanczosStore[i][0], dim[i]*sizeof(double));
if (status[i] != cudaSuccess)
{
std::cout<<"Error creating storage for v0 in lanczosStore: "<<cudaGetErrorString(status[i])<<std::endl;
}
status[i] = cudaMemcpyAsync(v0[i], host_v0[i], dim[i]*sizeof(double), cudaMemcpyHostToDevice, stream[i]);
if (status[i] != cudaSuccess)
{
std::cout<<"Error copying v0 to the device: "<<cudaGetErrorString(status[i])<<std::endl;
}
}
/*!
\endverbatim
First, storage variables are created to hold the results of the CUBLAS functions.
\verbatim
*/
//--------------Create dummy variables for CUBLAS functions----------------
double* normTemp = (double*)malloc(howMany*sizeof(double));
double* alpha = (double*)malloc(howMany*sizeof(double));
double* beta = (double*)malloc(howMany*sizeof(double));
double* dotTemp = (double*)malloc(howMany*sizeof(double));
double* axpyTemp = (double*)malloc(howMany*sizeof(double));
double** y = (double**)malloc(howMany*sizeof(double*));
/*!
\endverbatim
*/
//--------------Generate first Lanczos vector--------------------------
for(int i = 0; i < howMany; i++)
{
cublasSetStream(linAlgHandle, stream[i]);
cusparseSetStream(sparseHandle, stream[i]);
/*!
Then the initial multiplication to generate the first Lanczos vector is performed.
\verbatim
*/
cublasStatus[i] = cublasDnrm2(linAlgHandle, dim[i], v0[i], 1, &normTemp[i]);
normTemp[i] = 1./normTemp[i];
cublasStatus[i] = cublasDscal(linAlgHandle, dim[i], &normTemp[i], v0[i], 1);
alpha[i] = 1.;
beta[i] = 0.;
cudaMemcpyAsync(lanczosStore[i][0], v0[i], dim[i]*sizeof(double), cudaMemcpyDeviceToDevice, stream[i]);
//-----------Apply Hamiltonian to V0--------------------
cusparseStatus[i] = cusparseDcsrmv(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, dim[i], dim[i], numElem[i], &alpha[i], H_descr[i], Hamiltonian[i].vals, d_H_rowPtrs[i], Hamiltonian[i].cols, v0[i], &beta[i], v1[i]); // the Hamiltonian is applied here
/*!
\endverbatim
*/
if (cusparseStatus[i] != CUSPARSE_STATUS_SUCCESS)
{
std::cout<<"Getting V1 = H*V0 failed! Error: ";
std::cout<<cusparseStatus[i]<<std::endl;
}
//cudaStreamSynchronize(stream[i]);
if (cudaPeekAtLastError() != 0 )
{
std::cout<<"Getting V1 = H*V0 failed! Error: ";
std::cout<<cudaGetErrorString(cudaPeekAtLastError())<<std::endl;
}
}
for(int i = 0; i < howMany; i++)
{
cublasSetStream(linAlgHandle, stream[i]);
dotTemp[i] = 0.;
cublasStatus[i] = cublasDdot(linAlgHandle, dim[i], v1[i], 1, v0[i], 1, &dotTemp[i]);
h_a[i].push_back(dotTemp[i]);
h_b[i].push_back(0.);
if (cublasStatus[i] != CUBLAS_STATUS_SUCCESS)
{
std::cout<<"Getting d_a[0] failed! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
//cudaStreamSynchronize(stream[i]);
if (cublasStatus[i] != CUBLAS_STATUS_SUCCESS)
{
std::cout<<"Getting h_a[0] failed! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
if (status[i] != cudaSuccess)
{
std::cout<<"Memory allocation of y dummy vector failed! Error:";
std::cout<<cudaGetErrorString( status[i] )<<std::endl;
}
status[i] = cudaMalloc(&y[i], dim[i]*sizeof(double));
/*!
The new vector must be rescaled and stored before Lanczos iteration can begin.
\verbatim
*/
cublasStatus[i] = cublasDscal(linAlgHandle, dim[i], &beta[i], y[i], 1);
//cudaStreamSynchronize(stream[i]);
axpyTemp[i] = -1*h_a[i][0];
cublasStatus[i] = cublasDaxpy(linAlgHandle, 0, &axpyTemp[i], v0[i], 1, v1[i], 1);
//cudaStreamSynchronize(stream[i]);
if (cublasStatus[i] != CUBLAS_STATUS_SUCCESS)
{
std::cout<<"V1 = V1 - alpha*V0 failed! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
if (cudaPeekAtLastError() != 0 )
{
std::cout<<"Getting V1 = V1 - a*V0 failed! Error: ";
std::cout<<cudaGetErrorString(cudaPeekAtLastError())<<std::endl;
}
//---------Normalize V1 and copy it to Lanczos storage-----------
normTemp[i] = 0.;
cublasStatus[i] = cublasDnrm2(linAlgHandle, dim[i], v1[i], 1, &normTemp[i]); //this is slow for some reason
//cudaStreamSynchronize(stream[i]);
if (cublasStatus[i] != CUBLAS_STATUS_SUCCESS)
{
std::cout<<"Getting the norm of v1 failed! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
if (cudaPeekAtLastError() != 0 )
{
std::cout<<"Getting nrm(V1) failed! Error: ";
std::cout<<cudaGetErrorString(cudaPeekAtLastError())<<std::endl;
}
}
double* gamma = (double*)malloc(howMany*sizeof(double));
for(int i = 0; i < howMany; i++)
{
cublasSetStream(linAlgHandle, stream[i]);
h_b[i].push_back(normTemp[i]);
normTemp[i] = 1./normTemp[i];
gamma[i] = 1./h_b[i][1]; //alpha = 1/beta in v1 = v1 - alpha*v0
cublasStatus[i] = cublasDscal(linAlgHandle, dim[i], &normTemp[i], v1[i], 1);
if (cublasStatus[i] != CUBLAS_STATUS_SUCCESS)
{
std::cout<<"Normalizing v1 failed! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
if (cudaPeekAtLastError() != 0 )
{
std::cout<<"Normalizing V1 failed! Error: ";
std::cout<<cudaGetErrorString(cudaPeekAtLastError())<<std::endl;
}
cudaMalloc(&lanczosStore[i][1], dim[i]*sizeof(double));
cudaMemcpyAsync(lanczosStore[i][1], v1[i], dim[i]*sizeof(double), cudaMemcpyDeviceToDevice, stream[i]);
}
/*!
\endverbatim
*/
/*!
Storage space for the tridiagonal matrix is created and flags are initialized to track progress:
\verbatim
*/
double* gsEnergy = (double*)malloc(howMany*sizeof(double));
double* eigTemp = (double*)malloc(howMany*sizeof(double));
int* returned = (int*)malloc(howMany*sizeof(int));
int* iter = (int*)malloc(howMany*sizeof(int));
bool* doneFlag = (bool*)malloc(howMany*sizeof(bool));
double** h_H_eigen = (double**)malloc(howMany*sizeof(double*));
double** d_H_eigen = (double**)malloc(howMany*sizeof(double*));
double** h_diag = (double**)malloc(howMany*sizeof(double*));
double** h_offdia = (double**)malloc(howMany*sizeof(double*));
vector< vector < double > > h_ordered(howMany);
/*!
\endverbatim
*/
/*!
The flags and storage are initialized for the interations
\verbatim
*/
for(int i = 0; i<howMany; i++)
{
gsEnergy[i] = 1.;
eigTemp[i] = 0.;
iter[i] = 0;
doneFlag[i] = false;
h_ordered[i].resize(numEig, 0);
h_H_eigen[i] = (double*)malloc(maxIter*maxIter*sizeof(double));
cudaMalloc(&d_H_eigen[i], maxIter*maxIter*sizeof(double));
h_diag[i] = (double*)malloc(h_a[i].size()*sizeof(double));
h_offdia[i] = (double*)malloc(h_b[i].size()*sizeof(double));
}
/*!
\endverbatim
*/
//---------Begin Lanczos iteration-----------------------------
bool allDone = false;
while( !allDone )
{
allDone = true;
for(int i = 0; i < howMany; i++)
{
cublasSetStream(linAlgHandle, stream[i]);
cusparseSetStream(sparseHandle, stream[i]);
cudaStreamSynchronize(stream[i]);
/*!
If the current diagonalization is not complete, multiply H*V1 to get a new V2
\verbatim
*/
if (!doneFlag[i])
{
iter[i]++;
cusparseStatus[i] = cusparseDcsrmv(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, dim[i], dim[i], numElem[i], &alpha[i], H_descr[i], Hamiltonian[i].vals, d_H_rowPtrs[i], Hamiltonian[i].cols, v1[i], &beta[i], v2[i]);
if( cusparseStatus[i] != 0)
{
cout<<"Error applying H to V1 in "<<iter[i]<<"th iteration"<<endl;
}
//cusparseStatus[i] = cusparseDhybmv(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha[i], H_descr[i], hyb_Ham[i], v1[i], &beta[i], v2[i]); // the Hamiltonian is applied here, in this gross expression
}
/*!
\endverbatim
*/
}
for(int i = 0; i < howMany; i++)
{
cublasSetStream(linAlgHandle, stream[i]);
if (!doneFlag[i])
{
if (cusparseStatus[i] != CUSPARSE_STATUS_SUCCESS)
{
std::cout<<"Error applying the Hamiltonian in "<<iter[i]<<"th iteration!";
std::cout<<"Error: "<<cusparseStatus[i]<<std::endl;
}
cublasStatus[i] = cublasDdot(linAlgHandle, dim[i], v1[i], 1, v2[i], 1, &dotTemp[i]);
//cudaStreamSynchronize(stream[i]);
h_a[i].push_back(dotTemp[i]);
if (cublasStatus[i] != CUBLAS_STATUS_SUCCESS)
{
std::cout<<"Error getting v1 * v2 in "<<iter[i]<<"th iteration! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
axpyTemp[i] = -1.*h_b[i][iter[i]];
cublasStatus[i] = cublasDaxpy( linAlgHandle, dim[i], &axpyTemp[i], v0[i], 1, v2[i], 1);
if (cublasStatus[i] != CUBLAS_STATUS_SUCCESS)
{
std::cout<<"Error getting (d_b/d_a)*v0 + v1 in "<<iter[i]<<"th iteration!";
std::cout<<"Error: "<<cublasStatus[i]<<std::endl;
}
}
}
//--------Find next set of elements in Lanczos Hamiltonian----
for(int i = 0; i < howMany; i++)
{
cublasSetStream(linAlgHandle, stream[i]);
//cudaStreamSynchronize(stream[i]);
if (!doneFlag[i])
{
/*!
Similarly to setting up V1, V2 must be rescaled
\verbatim
*/
axpyTemp[i] = -1.*h_a[i][iter[i]];
cublasStatus[i] = cublasDaxpy( linAlgHandle, dim[i], &axpyTemp[i], v1[i], 1, v2[i], 1);
if (cublasStatus[i] != CUBLAS_STATUS_SUCCESS)
{
std::cout<<"Error getting v2 + d_a*v1 in "<<iter[i]<<"th iteration! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
cublasStatus[i] = cublasDnrm2( linAlgHandle, dim[i], v2[i], 1, &normTemp[i]);
if (cublasStatus[i] != CUBLAS_STATUS_SUCCESS)
{
std::cout<<"Error getting norm of v2 in "<<iter[i]<<"th iteration! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
h_b[i].push_back(normTemp[i]);
gamma[i] = 1./normTemp[i];
/*!
\endverbatim
*/
cublasStatus[i] = cublasDscal(linAlgHandle, dim[i], &gamma[i], v2[i], 1);
if (cublasStatus[i] != CUBLAS_STATUS_SUCCESS)
{
std::cout<<"Error getting 1/d_b * v2 in "<<iter[i]<<"th iteration! Error: ";
std::cout<<cublasStatus[i]<<std::endl;
}
}
}
for(int i = 0; i < howMany; i++)
{
cublasSetStream(linAlgHandle, stream[i]);
//status[i] = cudaStreamSynchronize(stream[i]);
if (status[i] != cudaSuccess)
{
std::cout<<"Error syncing before copying v1 to v0: "<<cudaGetErrorString(status[i])<<std::endl;
}
if (!doneFlag[i])
{
/*!
Reorthogonalization is performed on v2 to ensure that the excited states do not collapse into the groundstate
\verbatim
*/
for(int j = 0; j < iter[i] + 1; j++)
{
cublasDdot(linAlgHandle, dim[i], v2[i], 1, lanczosStore[i][j], 1, &dotTemp[i]);
dotTemp[i] *= -1.;
cublasDaxpy(linAlgHandle, dim[i], &dotTemp[i], lanczosStore[i][j], 1, v2[i], 1);
dotTemp[i] = 1. - dotTemp[i]*dotTemp[i];
cublasDscal(linAlgHandle, dim[i], &dotTemp[i], v2[i], 1);
}
/*!
\endverbatim
The vectors are copied down one and stored to prepare for the next iteration
\verbatim
*/
status[i] = cudaMemcpyAsync(v0[i], v1[i], dim[i]*sizeof(double), cudaMemcpyDeviceToDevice, stream[i]);
if (status[i] != cudaSuccess)
{
std::cout<<"Error copying v1 to v0: "<<cudaGetErrorString(status[i])<<std::endl;
}
status[i] = cudaMemcpyAsync(v1[i], v2[i], dim[i]*sizeof(double), cudaMemcpyDeviceToDevice, stream[i]);
if (status[i] != cudaSuccess)
{
std::cout<<"Error copying v2 to v1: "<<cudaGetErrorString(status[i])<<std::endl;
}
status[i] = cudaMalloc(&lanczosStore[i][iter[i] + 1], dim[i]*sizeof(double));
status[i] = cudaMemcpyAsync(lanczosStore[i][iter[i] + 1], v2[i], dim[i]*sizeof(double), cudaMemcpyDeviceToDevice, stream[i]);
/*!
\endverbatim
*/
}
}
for(int i = 0; i < howMany; i++)
{
if (!doneFlag[i] && iter[i] > 5)
{
//---Copy Lanczos matrix information for diagonalization-----
free(h_diag[i]);
free(h_offdia[i]);
h_diag[i] = (double*)malloc(h_a[i].size()*sizeof(double));
h_offdia[i] = (double*)malloc(h_b[i].size()*sizeof(double));
h_diag[i][0] = h_a[i][0];
for (int ii=1; ii<=iter[i]; ii++)
{
h_diag[i][ii] = h_a[i][ii];
h_offdia[i][ii] = h_b[i][ii];
h_offdia[i][ii-1] = h_offdia[i][ii];
}
h_offdia[i][iter[i]] = 0;
//cudaStreamSynchronize(stream[i]);
//---------Diagonalize Lanczos matrix and check for convergence------------------
returned[i] = tqli(h_diag[i], h_offdia[i], iter[i] + 1, maxIter, h_H_eigen[i]);
status[i] = cudaPeekAtLastError();
if( status[i] != cudaSuccess)
{
cout<<"Error in identity! Error: "<<cudaGetErrorString(status[i])<<endl;
}
//cout<<"Done tqli in "<<iter[i]<<"th iteration"<<endl;
cudaMemcpyAsync(d_H_eigen[i], h_H_eigen[i], maxIter*maxIter*sizeof(double), cudaMemcpyHostToDevice, stream[i]);
std::sort(h_diag[i], h_diag[i] + h_a[i].size());
for (int j = 0; j < numEig; j++)
{
h_ordered[i][j] = h_diag[i][j];
//cout<<h_ordered[i][j]<<" ";
}
//cout<<endl;
gsEnergy[i] = h_ordered[i][numEig - 1];
doneFlag[i] = (fabs(gsEnergy[i] - eigTemp[i]) < convReq);// && iter[i] > 10;// ? (iter[i] > 10) : false;
//doneFlag[i] = iter[i] == maxIter - 2;
eigTemp[i] = h_ordered[i][numEig - 1];
if (iter[i] == maxIter - 2) // have to use this or d_b will overflow
{
//this stuff here is used to resize the main arrays in the case that we aren't converging quickly enough
h_a[i].resize(2*maxIter);
h_b[i].resize(2*maxIter);
maxIter *= 2;
}
}
}
allDone = true;
for(int i = 0; i< howMany; i++)
{
allDone = (allDone && doneFlag[i]);
}
}
cout<<"Done finding the eigenvalues"<<endl;
//-------------Get groundstates------------------------------------------
for( int i = 0; i < howMany; i++)
{
//cudaStreamSynchronize(stream[i]);
status[i] = cudaMalloc(&groundstates[i], dim[i]*sizeof(double));
if( status[i] != cudaSuccess)
{
cout<<"Error allocating space for eigenvectors: "<<cudaGetErrorString(status[i]);
}
GetGroundstate<<<dim[i]/512 + 1, 512, 0, stream[i]>>>(groundstates[i], lanczosStore[i], d_H_eigen[i], iter[i], dim[i]);
}
//--------------Free arrays to prevent memory leaks------------------------
for(int i = 0; i < howMany; i++)
{
for(int j = 0; j < numEig; j++)
{
eigenvalues[i][j] = h_ordered[i][j];
}
for(int j = 0; j < iter[i]; j++)
{
cudaFree(lanczosStore[i][j]);
}
free(lanczosStore[i]);
cudaFree(d_H_rowPtrs[i]);
cudaFree(v0[i]);
cudaFree(v1[i]);
cudaFree(v2[i]);
cudaFree(y[i]);
free(h_H_eigen[i]);
cudaFree(d_H_eigen[i]);
free(host_v0[i]);
free(h_diag[i]);
free(h_offdia[i]);
//cusparseDestroyHybMat(hyb_Ham[i]);
}
free(gsEnergy);
free(eigTemp);
free(alpha);
free(beta);
free(returned);
free(iter);
free(doneFlag);
free(h_H_eigen);
free(d_H_eigen);
free(gamma);
free(y);
free(normTemp);
free(axpyTemp);
free(dotTemp);
free(host_v0);
free(v0);
free(v1);
free(v2);
free(h_diag);
free(h_offdia);
free(lanczosStore);
//free(dim);
free(d_H_rowPtrs);
cublasStatus[0] = cublasDestroy(linAlgHandle);
//----------Output groundstate to file to check for correctness------
double* host_groundstate = (double*)malloc(dim[0]*sizeof(double));
std::ofstream fout;
fout.open("lanczos.log");
cudaMemcpy(host_groundstate, groundstates[0], dim[0]*sizeof(double), cudaMemcpyDeviceToHost);
cout<<"Outputting GS to file"<<endl;
for(int i = 0; i < dim[0] ; i++)
{
fout<<host_groundstate[i]<<std::endl;
}
fout.close();
free(host_groundstate);
free(dim);
if (cublasStatus[0] != CUBLAS_STATUS_SUCCESS)
{
printf("CUBLAS failed to shut down properly! \n");
}
cusparseStatus[0] = cusparseDestroy(sparseHandle);
if (cusparseStatus[0] != CUSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE failed to release handle! \n");
}
/*if (iter == 1) {
std::ofstream fout;
fout.open("lanczos.log");
//fout<<normTemp<<std::endl;
fout<<std::endl;
//int* h_H_vals = (int*)malloc((dim+1)*sizeof(int));
cudaMemcpy(host_v0, v2, dim*sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost);
for(int i = 0; i < dim ; i++){
fout<<host_v0[i].x<<std::endl;
}
fout.close();
}*/
}
// things left to do:
// write a thing (separate file) to call routines to find expectation values, should be faster on GPU
// make the tqli thing better!
int tqli(double* d, double* e, int n, int maxIter, double *z)
{
int m,l,iter,i,k;
double s,r,p,g,f,dd,c,b;
for (l=0; l<n; l++)
{
iter=0;
do
{
for (m=l; m<n-1; m++)
{
dd=fabs(d[m])+fabs(d[m+1]);
if (fabs(e[m])+dd == dd) break;
}
if (m!=l)
{
if (iter++ == 60)
{
std::cout <<"Too many iterations in tqli() \n";
return 0;
}
g=(d[l+1]-d[l])/(2.0*e[l]);
r=sqrt((g*g)+1.0);
g=d[m]-d[l]+e[l]/(g+SIGN(r,g));
s=c=1.0;
p=0.0;
for (i=m-1; i>=l; i--)
{
f=s*e[i];
b=c*e[i];
if (fabs(f) >= fabs(g))
{
c=g/f;
r=sqrt((c*c)+1.0);
e[i + 1]=f*r;
c *= (s=1.0/r);
}
else
{
s=f/g;
r=sqrt((s*s)+1.0);
e[i+1]=g*r;
s *= (c=1.0/r);
}
g=d[i+1]-p;
r=(d[i]-g)*s+2.0*c*b;
p=s*r;
d[i+1]=g+p;
g=c*r-b;
/*EVECTS*/
for (k=0; k<n; k++)
{
f=z[k * n + i+1];
z[k*n + i+1]=s*z[k*n + i]+c*f;
z[k*n + i ]=c*z[k*n+i]-s*f;
}
}
d[l]=d[l]-p;
e[l]=g;
e[m]=0.0;
}
}
while (m!=l);
}
return 1;
}
double pythag(double a, double b)
{
double absa, absb;
absa=fabs(a);
absb=fabs(b);
if (absa > absb) return absa*sqrt(1.0+(absb/absa)*(absb/absa));
else return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+(absa/absb)*(absa/absb)));
}
__global__ void GetGroundstate(double* groundstates, double** lanczosStore, double* H_eigen, int mat_dim, int vec_dim)
{
int element = blockIdx.x*blockDim.x + threadIdx.x;
if ( element < vec_dim )
{
groundstates[element] = H_eigen[0]*lanczosStore[0][element];
for (int lancIter = 1; lancIter < mat_dim; lancIter++)
{
groundstates[element] += H_eigen[lancIter]*lanczosStore[lancIter][element];
}
}
};
|
29cdf0a894048373e354c4e409a9e06aadb7e085.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/square_sum_kernel_util.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <hipcub/hipcub.hpp>
namespace oneflow {
namespace {
template<typename T, bool ONE_BLOCK>
__global__ void SquareSumGpu(int64_t n, const T* x, T* y) {
T t_sum = 0;
CUDA_1D_KERNEL_LOOP(i, n) { t_sum += x[i] * x[i]; }
typedef hipcub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T b_sum = BlockReduce(temp_storage).Sum(t_sum);
if (threadIdx.x == 0) {
if (ONE_BLOCK) {
*y = b_sum;
} else {
cuda::atomic::Add(y, b_sum);
}
}
}
constexpr int64_t kMultiSquareSumMaxSize = 64;
template<typename T>
struct MultiSquareSumParams {
SquareSumParam<T> params[kMultiSquareSumMaxSize];
int32_t size;
};
template<typename T>
__global__ void MultiSquareSumGpu(const MultiSquareSumParams<T> params, T* y) {
T t_sum = 0;
for (int i = 0; i < params.size; ++i) {
const SquareSumParam<T> param = params.params[i];
CUDA_1D_KERNEL_LOOP(j, param.count) { t_sum += param.ptr[j] * param.ptr[j]; }
}
typedef hipcub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T b_sum = BlockReduce(temp_storage).Sum(t_sum);
if (threadIdx.x == 0) { cuda::atomic::Add(y, b_sum); }
}
} // namespace
template<typename T>
struct SquareSumKernelUtil<DeviceType::kGPU, T> {
static void SquareSum(DeviceCtx* ctx, int64_t n, const T* x, T* y) {
const int32_t num_blocks = BlocksNum4ThreadsNum(n);
CHECK_GE(num_blocks, 0);
if (num_blocks == 0) {
Memset<DeviceType::kGPU>(ctx, y, 0, sizeof(T));
} else if (num_blocks == 1) {
hipLaunchKernelGGL(( SquareSumGpu<T, true>), dim3(1), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, x, y);
} else {
Memset<DeviceType::kGPU>(ctx, y, 0, sizeof(T));
hipLaunchKernelGGL(( SquareSumGpu<T, false>)
, dim3(num_blocks), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, x, y);
}
}
static void MultiSquareSum(DeviceCtx* ctx, const std::vector<SquareSumParam<T>>& params, T* y) {
Memset<DeviceType::kGPU>(ctx, y, 0, sizeof(T));
for (int64_t start = 0; start < params.size(); start += kMultiSquareSumMaxSize) {
MultiSquareSumParams<T> gpu_params{};
int64_t max_count = 0;
gpu_params.size = std::min<int64_t>(start + kMultiSquareSumMaxSize, params.size()) - start;
for (int64_t i = 0; i < gpu_params.size; ++i) {
gpu_params.params[i] = params[start + i];
max_count = ::max(max_count, gpu_params.params[i].count);
}
hipLaunchKernelGGL(( MultiSquareSumGpu<T>)
, dim3(BlocksNum4ThreadsNum(max_count)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
gpu_params, y);
}
}
};
#define INSTANTIATE_SQUARE_SUM_KERNEL_UTIL_GPU(type_cpp, type_proto) \
template struct SquareSumKernelUtil<DeviceType::kGPU, type_cpp>;
OF_PP_FOR_EACH_TUPLE(INSTANTIATE_SQUARE_SUM_KERNEL_UTIL_GPU, FLOATING_DATA_TYPE_SEQ);
#undef INSTANTIATE_SQUARE_SUM_KERNEL_UTIL_GPU
} // namespace oneflow
| 29cdf0a894048373e354c4e409a9e06aadb7e085.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/square_sum_kernel_util.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <cub/cub.cuh>
namespace oneflow {
namespace {
template<typename T, bool ONE_BLOCK>
__global__ void SquareSumGpu(int64_t n, const T* x, T* y) {
T t_sum = 0;
CUDA_1D_KERNEL_LOOP(i, n) { t_sum += x[i] * x[i]; }
typedef cub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T b_sum = BlockReduce(temp_storage).Sum(t_sum);
if (threadIdx.x == 0) {
if (ONE_BLOCK) {
*y = b_sum;
} else {
cuda::atomic::Add(y, b_sum);
}
}
}
constexpr int64_t kMultiSquareSumMaxSize = 64;
template<typename T>
struct MultiSquareSumParams {
SquareSumParam<T> params[kMultiSquareSumMaxSize];
int32_t size;
};
template<typename T>
__global__ void MultiSquareSumGpu(const MultiSquareSumParams<T> params, T* y) {
T t_sum = 0;
for (int i = 0; i < params.size; ++i) {
const SquareSumParam<T> param = params.params[i];
CUDA_1D_KERNEL_LOOP(j, param.count) { t_sum += param.ptr[j] * param.ptr[j]; }
}
typedef cub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T b_sum = BlockReduce(temp_storage).Sum(t_sum);
if (threadIdx.x == 0) { cuda::atomic::Add(y, b_sum); }
}
} // namespace
template<typename T>
struct SquareSumKernelUtil<DeviceType::kGPU, T> {
static void SquareSum(DeviceCtx* ctx, int64_t n, const T* x, T* y) {
const int32_t num_blocks = BlocksNum4ThreadsNum(n);
CHECK_GE(num_blocks, 0);
if (num_blocks == 0) {
Memset<DeviceType::kGPU>(ctx, y, 0, sizeof(T));
} else if (num_blocks == 1) {
SquareSumGpu<T, true><<<1, kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(n, x, y);
} else {
Memset<DeviceType::kGPU>(ctx, y, 0, sizeof(T));
SquareSumGpu<T, false>
<<<num_blocks, kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(n, x, y);
}
}
static void MultiSquareSum(DeviceCtx* ctx, const std::vector<SquareSumParam<T>>& params, T* y) {
Memset<DeviceType::kGPU>(ctx, y, 0, sizeof(T));
for (int64_t start = 0; start < params.size(); start += kMultiSquareSumMaxSize) {
MultiSquareSumParams<T> gpu_params{};
int64_t max_count = 0;
gpu_params.size = std::min<int64_t>(start + kMultiSquareSumMaxSize, params.size()) - start;
for (int64_t i = 0; i < gpu_params.size; ++i) {
gpu_params.params[i] = params[start + i];
max_count = std::max(max_count, gpu_params.params[i].count);
}
MultiSquareSumGpu<T>
<<<BlocksNum4ThreadsNum(max_count), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
gpu_params, y);
}
}
};
#define INSTANTIATE_SQUARE_SUM_KERNEL_UTIL_GPU(type_cpp, type_proto) \
template struct SquareSumKernelUtil<DeviceType::kGPU, type_cpp>;
OF_PP_FOR_EACH_TUPLE(INSTANTIATE_SQUARE_SUM_KERNEL_UTIL_GPU, FLOATING_DATA_TYPE_SEQ);
#undef INSTANTIATE_SQUARE_SUM_KERNEL_UTIL_GPU
} // namespace oneflow
|
293102a8f46d771948e2ecc4896232074d7fbe2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Based on CUDA SDK template from NVIDIA
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
// includes, project
#include <cutil_inline.h>
#define max(a,b) (((a)>(b))?(a):(b))
#define min(a,b) (((a)<(b))?(a):(b))
#define MAX_BRIGHTNESS 255
// Use int instead `unsigned char' so that we can
// store negative values.
typedef int pixel_t;
// convolution of in image to out image using kernel of kn width
void convolution(const pixel_t *in, pixel_t *out, const float *kernel,
const int nx, const int ny, const int kn)
{
assert(kn % 2 == 1);
assert(nx > kn && ny > kn);
const int khalf = kn / 2;
for (int m = khalf; m < nx - khalf; m++)
for (int n = khalf; n < ny - khalf; n++) {
float pixel = 0.0;
size_t c = 0;
for (int j = -khalf; j <= khalf; j++)
for (int i = -khalf; i <= khalf; i++) {
pixel += in[(n + j) * nx + m + i] * kernel[c];
c++;
}
out[n * nx + m] = (pixel_t)pixel;
}
}
// convolution of in image to out image using kernel of kn width
__global__ void CUDA_convolution_kernel(pixel_t *A, pixel_t *B, float *kirnel, int nx, int ny, int kn)
{
int m = threadIdx.x + blockIdx.x * blockDim.x;
int n = threadIdx.y + blockIdx.y * blockDim.y;
int id = m + n * nx;
int khalf = kn/2;
if( (m < khalf) || (n < khalf) || (m >= nx - khalf) || (n >= ny - khalf) ){
return;
}
float pixel = 0.0;
size_t c = 0;
for (int j = -khalf; j <= khalf; j++){
for (int i = -khalf; i <= khalf; i++) {
pixel += A[(n + j) * nx + m + i] * kirnel[c];
c++;
}
}
B[id] = (pixel_t)pixel;
}
void CUDA_convolution(pixel_t *in, pixel_t *out, float *kernel,
int nx, int ny, int kn)
{
//assert(kn % 2 == 1);
//assert(nx > kn && ny > kn);
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* B;
float* kirnel;
hipMalloc((void**)&A, memsize);
hipMalloc((void**)&B, memsize);
hipMalloc((void**)&kirnel, kn*kn*sizeof(float));
hipMemcpy(A, in, memsize, hipMemcpyHostToDevice);
hipMemcpy(kirnel, kernel, kn*kn*sizeof(float), hipMemcpyHostToDevice);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
hipLaunchKernelGGL(( CUDA_convolution_kernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, kirnel, nx, ny, kn);
hipMemcpy(out, B, memsize, hipMemcpyDeviceToHost);
hipFree(A);
hipFree(B);
hipFree(kirnel);
}
// determines min and max of in image
void min_max(const pixel_t *in, const int nx, const int ny, pixel_t *pmin, pixel_t *pmax)
{
int min = INT_MAX, max = -INT_MAX;
for (int m = 0; m < nx; m++)
for (int n = 0; n < ny ; n++) {
int pixel = in[n*nx + m];
if (pixel < min)
min = pixel;
if (pixel > max)
max = pixel;
}
*pmin = min; *pmax = max;
}
// normalizes inout image using min and max values
void normalize( pixel_t *inout,
const int nx, const int ny, const int kn,
const int min, const int max)
{
const int khalf = kn / 2;
for (int m = khalf; m < nx - khalf; m++)
for (int n = khalf; n < ny - khalf; n++) {
pixel_t pixel = MAX_BRIGHTNESS * ((int)inout[n * nx + m] -(float) min) / ((float)max - (float)min);
inout[n * nx + m] = pixel;
}
}
/*
* gaussianFilter:
* http://www.songho.ca/dsp/cannyedge/cannyedge.html
* determine size of kernel (odd #)
* 0.0 <= sigma < 0.5 : 3
* 0.5 <= sigma < 1.0 : 5
* 1.0 <= sigma < 1.5 : 7
* 1.5 <= sigma < 2.0 : 9
* 2.0 <= sigma < 2.5 : 11
* 2.5 <= sigma < 3.0 : 13 ...
* kernelSize = 2 * int(2*sigma) + 3;
*/
void gaussian_filter(const pixel_t *in, pixel_t *out,
const int nx, const int ny, const float sigma)
{
const int n = 2 * (int)(2 * sigma) + 3;
const float mean = (float)floor(n / 2.0);
float kernel[n * n]; // variable length array
fprintf(stderr, "gaussian_filter: kernel size %d, sigma=%g\n",
n, sigma);
size_t c = 0;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++) {
kernel[c] = exp(-0.5 * (pow((i - mean) / sigma, 2.0) +
pow((j - mean) / sigma, 2.0)))
/ (2 * M_PI * sigma * sigma);
c++;
}
convolution(in, out, kernel, nx, ny, n);
pixel_t max, min;
min_max(out, nx, ny, &min, &max);
normalize(out, nx, ny, n, min, max);
}
// Canny non-maximum suppression
void non_maximum_supression(const pixel_t *after_Gx, const pixel_t * after_Gy, const pixel_t *G, pixel_t *nms,
const int nx, const int ny)
{
for (int i = 1; i < nx - 1; i++)
for (int j = 1; j < ny - 1; j++) {
const int c = i + nx * j;
const int nn = c - nx;
const int ss = c + nx;
const int ww = c + 1;
const int ee = c - 1;
const int nw = nn + 1;
const int ne = nn - 1;
const int sw = ss + 1;
const int se = ss - 1;
const float dir = (float)(fmod(atan2(after_Gy[c],
after_Gx[c]) + M_PI,
M_PI) / M_PI) * 8;
if (((dir <= 1 || dir > 7) && G[c] > G[ee] &&
G[c] > G[ww]) || // 0 deg
((dir > 1 && dir <= 3) && G[c] > G[nw] &&
G[c] > G[se]) || // 45 deg
((dir > 3 && dir <= 5) && G[c] > G[nn] &&
G[c] > G[ss]) || // 90 deg
((dir > 5 && dir <= 7) && G[c] > G[ne] &&
G[c] > G[sw])) // 135 deg
nms[c] = G[c];
else
nms[c] = 0;
}
}
__global__ void CUDA_non_maximum_supression_kernel(pixel_t *A, pixel_t *B, pixel_t *C, pixel_t *out, int nx, int ny)
{
int m = threadIdx.x + blockIdx.x * blockDim.x;
int n = threadIdx.y + blockIdx.y * blockDim.y;
const int c = m + nx * n; // id
if( (m < 1) || (n < 1) || (m >= nx - 1) || (n >= ny - 1) ){
return;
}
const int nn = c - nx;
const int ss = c + nx;
const int ww = c + 1;
const int ee = c - 1;
const int nw = nn + 1;
const int ne = nn - 1;
const int sw = ss + 1;
const int se = ss - 1;
const float dir = (float)(fmod(atan2((double)B[c],(double)A[c]) + M_PI, M_PI) / M_PI) * 8;
if (((dir <= 1 || dir > 7) && C[c] > C[ee] &&
C[c] > C[ww]) || // 0 deg
((dir > 1 && dir <= 3) && C[c] > C[nw] &&
C[c] > C[se]) || // 45 deg
((dir > 3 && dir <= 5) && C[c] > C[nn] &&
C[c] > C[ss]) || // 90 deg
((dir > 5 && dir <= 7) && C[c] > C[ne] &&
C[c] > C[sw])) // 135 deg
out[c] = C[c];
else
out[c] = 0;
}
void CUDA_non_maximum_supression(const pixel_t *after_Gx, const pixel_t * after_Gy, const pixel_t *G, pixel_t *nms, const int nx, const int ny)
{
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* B;
pixel_t* C;
pixel_t* out;
// float* kirnel;
hipMalloc((void**)&A, memsize);
hipMalloc((void**)&B, memsize);
hipMalloc((void**)&C, memsize);
hipMalloc((void**)&out, memsize);
hipMemcpy(A, after_Gx, memsize, hipMemcpyHostToDevice);
hipMemcpy(B, after_Gy, memsize, hipMemcpyHostToDevice);
hipMemset( out, 0, memsize);
hipMemcpy(C, G, memsize, hipMemcpyHostToDevice);
// hipMemcpy(GG, kernel, kn*kn*sizeof(float), hipMemcpyHostToDevice);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
hipLaunchKernelGGL(( CUDA_non_maximum_supression_kernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, C, out, nx, ny);
hipMemcpy(nms, out, memsize, hipMemcpyDeviceToHost);
hipFree(A);
hipFree(B);
hipFree(C);
hipFree(out);
}
// edges found in first pass for nms > tmax
void first_edges(const pixel_t *nms, pixel_t *reference,
const int nx, const int ny, const int tmax)
{
size_t c = 1;
for (int j = 1; j < ny - 1; j++) {
for (int i = 1; i < nx - 1; i++) {
if (nms[c] >= tmax) { // trace edges
reference[c] = MAX_BRIGHTNESS;
}
c++;
}
c+=2; // because borders are not considered
}
}
__global__ void CUDA_first_edges_kernel(pixel_t *nms, pixel_t *reference, int nx, int ny, int tmax){
int xx = threadIdx.x + blockIdx.x * blockDim.x;
int yy = threadIdx.y + blockIdx.y * blockDim.y;
int id = xx + yy * nx;
if ((xx>0 && xx<nx-1) && (yy>0 && yy<ny-1))
{
if (nms[id+1] >= tmax) { // trace edges
reference[id+1] = MAX_BRIGHTNESS;
}
}
}
void CUDA_first_edges(pixel_t *nms, pixel_t *reference, int nx, int ny, int tmax)
{
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* out;
hipMalloc((void**)&A, memsize);
hipMalloc((void**)&out, memsize);
hipMemcpy(A, nms, memsize, hipMemcpyHostToDevice);
hipMemset(out, 0, memsize);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
hipLaunchKernelGGL(( CUDA_first_edges_kernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, A, out, nx, ny, tmax);
hipMemcpy(reference, out, memsize, hipMemcpyDeviceToHost);
hipFree(A);
hipFree(out);
}
// edges found in after first passes for nms > tmin && neighbor is edge
void hysteresis_edges(const pixel_t *nms, pixel_t *reference,
const int nx, const int ny, const int tmin, bool *pchanged)
{
// Tracing edges with hysteresis . Non-recursive implementation.
for (int i = 1; i < nx - 1; i++) {
for (int j = 1; j < ny - 1; j++) {
size_t t = i + j * nx;
int nbs[8]; // neighbours
nbs[0] = t - nx; // nn
nbs[1] = t + nx; // ss
nbs[2] = t + 1; // ww
nbs[3] = t - 1; // ee
nbs[4] = nbs[0] + 1; // nw
nbs[5] = nbs[0] - 1; // ne
nbs[6] = nbs[1] + 1; // sw
nbs[7] = nbs[1] - 1; // se
if (nms[t] >= tmin && reference[t] == 0) {
for(int k = 0; k < 8; k++)
if (reference[nbs[k]] != 0) {
reference[t] = MAX_BRIGHTNESS;
*pchanged = true;
}
}
}
}
}
/*
* Links:
* http://en.wikipedia.org/wiki/Canny_edge_detector
* http://www.tomgibara.com/computer-vision/CannyEdgeDetector.java
* http://fourier.eng.hmc.edu/e161/lectures/canny/node1.html
* http://www.songho.ca/dsp/cannyedge/cannyedge.html
*
* Note: T1 and T2 are lower and upper thresholds.
*/
//canny edge detector code to run on the host
void cannyHost( const int *h_idata, const int w, const int h,
const int tmin, // tmin canny parameter
const int tmax, // tmax canny parameter
const float sigma, // sigma canny parameter
int * reference)
{
const int nx = w;
const int ny = h;
pixel_t *G = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gx = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gy = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *nms = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
if (G == NULL || after_Gx == NULL || after_Gy == NULL ||
nms == NULL || reference == NULL) {
fprintf(stderr, "canny_edge_detection:"
" Failed memory allocation(s).\n");
exit(1);
}
// Gaussian filter
gaussian_filter(h_idata, reference, nx, ny, sigma);
const float Gx[] = {-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
// Gradient along x
convolution(reference, after_Gx, Gx, nx, ny, 3);
const float Gy[] = { 1, 2, 1,
0, 0, 0,
-1,-2,-1};
// Gradient along y
convolution(reference, after_Gy, Gy, nx, ny, 3);
// Merging gradients
for (int i = 1; i < nx - 1; i++)
for (int j = 1; j < ny - 1; j++) {
const int c = i + nx * j;
G[c] = (pixel_t)(hypot((double)(after_Gx[c]), (double)( after_Gy[c]) ));
}
// Non-maximum suppression, straightforward implementation.
non_maximum_supression(after_Gx, after_Gy, G, nms, nx, ny);
// edges with nms >= tmax
memset(reference, 0, sizeof(pixel_t) * nx * ny);
first_edges(nms, reference, nx, ny, tmax);
// edges with nms >= tmin && neighbor is edge
bool changed;
do {
changed = false;
hysteresis_edges(nms, reference, nx, ny, tmin, &changed);
} while (changed==true);
free(after_Gx);
free(after_Gy);
free(G);
free(nms);
}
// canny edge detector code to run on the GPU
void cannyDevice( const int *h_idata, const int w, const int h,
const int tmin, const int tmax,
const float sigma,
int * h_odata)
{
const int nx = w;
const int ny = h;
pixel_t *G = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gx = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gy = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *nms = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
if (G == NULL || after_Gx == NULL || after_Gy == NULL ||
nms == NULL || h_odata == NULL) {
fprintf(stderr, "canny_edge_detection:"
" Failed memory allocation(s).\n");
exit(1);
}
// Gaussian filter
gaussian_filter(h_idata, h_odata, nx, ny, sigma);
//CUDA_gaussian_filter(h_idata, h_odata, nx, ny, sigma);
const float Gx[] = {-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
// Gradient along x
//convolution(h_odata, after_Gx, Gx, nx, ny, 3);
CUDA_convolution(h_odata, after_Gx,(float*) Gx, nx, ny, 3); // convolution function in CUDA
const float Gy[] = { 1, 2, 1,
0, 0, 0,
-1,-2,-1};
// Gradient along y
//convolution(h_odata, after_Gy, Gy, nx, ny, 3);
CUDA_convolution(h_odata, after_Gy, (float *) Gy, nx, ny, 3); // convolution function in CUDA
// Merging gradients
for (int i = 1; i < nx - 1; i++)
for (int j = 1; j < ny - 1; j++) {
const int c = i + nx * j;
G[c] = (pixel_t)(hypot((double)(after_Gx[c]), (double)( after_Gy[c]) ));
}
// Non-maximum suppression, straightforward implementation.
//non_maximum_supression(after_Gx, after_Gy, G, nms, nx, ny);
CUDA_non_maximum_supression(after_Gx, after_Gy, G, nms, nx, ny);
// edges with nms >= tmaxstaff-1879.wirel
memset(h_odata, 0, sizeof(pixel_t) * nx * ny);
//first_edges(nms, h_odata, nx, ny, tmax);
CUDA_first_edges(nms, h_odata, nx, ny, tmax);
// edges with nms >= tmin && neighbor is edge
bool changed;
do {
changed = false;
hysteresis_edges(nms, h_odata, nx, ny, tmin, &changed);
//CUDA_hysteresis_edges(nms, h_odata, nx, ny, tmin, &changed);
} while (changed==true);
free(after_Gx);
free(after_Gy);
free(G);
free(nms);
}
// print command line format
void usage(char *command)
{
printf("Usage: %s [-h] [-d device] [-i inputfile] [-o outputfile] [-r referenceFile] [-w windowsize] [-t threshold]\n",command);
}
// main
int main( int argc, char** argv)
{
// default command line options
int deviceId = 0;
char *fileIn=(char *)"lena.pgm",*fileOut=(char *)"lenaOut.pgm",*referenceOut=(char *)"reference.pgm";
int tmin = 45, tmax = 50;
float sigma=1.0f;
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:i:o:r:n:x:s:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'i': // input image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 'r': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'n': // tmin
if(strlen(optarg)==0 || sscanf(optarg,"%d",&tmin)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'x': // tmax
if(strlen(optarg)==0 || sscanf(optarg,"%d",&tmax)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 's': // sigma
if(strlen(optarg)==0 || sscanf(optarg,"%f",&sigma)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
// select cuda device
cutilSafeCall( hipSetDevice( deviceId ) );
// create events to measure host canny detector time and device canny detector time
hipEvent_t startH, stopH, startD, stopD;
hipEventCreate(&startH);
hipEventCreate(&stopH);
hipEventCreate(&startD);
hipEventCreate(&stopD);
// allocate host memory
int* h_idata=NULL;
unsigned int h,w;
//load pgm
if (cutLoadPGMi(fileIn, (unsigned int **)&h_idata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", fileIn);
exit(1);
}
// allocate mem for the result on host side
//int* h_odata = (int*) malloc( h*w*sizeof(unsigned int));
//int* reference = (int*) malloc( h*w*sizeof(unsigned int));
int* h_odata = (int*) calloc( h*w, sizeof(unsigned int)); // fazer hipMalloc??
int* reference = (int*) calloc( h*w, sizeof(unsigned int));
// detect edges at host
hipEventRecord( startH, 0 );
cannyHost(h_idata, w, h, tmin, tmax, sigma, reference);
hipEventRecord( stopH, 0 );
hipEventSynchronize( stopH );
// detect edges at GPU
hipEventRecord( startD, 0 );
cannyDevice(h_idata, w, h, tmin, tmax, sigma, h_odata);
hipEventRecord( stopD, 0 );
hipEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
hipEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
hipEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_idata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
| 293102a8f46d771948e2ecc4896232074d7fbe2a.cu |
// Based on CUDA SDK template from NVIDIA
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
// includes, project
#include <cutil_inline.h>
#define max(a,b) (((a)>(b))?(a):(b))
#define min(a,b) (((a)<(b))?(a):(b))
#define MAX_BRIGHTNESS 255
// Use int instead `unsigned char' so that we can
// store negative values.
typedef int pixel_t;
// convolution of in image to out image using kernel of kn width
void convolution(const pixel_t *in, pixel_t *out, const float *kernel,
const int nx, const int ny, const int kn)
{
assert(kn % 2 == 1);
assert(nx > kn && ny > kn);
const int khalf = kn / 2;
for (int m = khalf; m < nx - khalf; m++)
for (int n = khalf; n < ny - khalf; n++) {
float pixel = 0.0;
size_t c = 0;
for (int j = -khalf; j <= khalf; j++)
for (int i = -khalf; i <= khalf; i++) {
pixel += in[(n + j) * nx + m + i] * kernel[c];
c++;
}
out[n * nx + m] = (pixel_t)pixel;
}
}
// convolution of in image to out image using kernel of kn width
__global__ void CUDA_convolution_kernel(pixel_t *A, pixel_t *B, float *kirnel, int nx, int ny, int kn)
{
int m = threadIdx.x + blockIdx.x * blockDim.x;
int n = threadIdx.y + blockIdx.y * blockDim.y;
int id = m + n * nx;
int khalf = kn/2;
if( (m < khalf) || (n < khalf) || (m >= nx - khalf) || (n >= ny - khalf) ){
return;
}
float pixel = 0.0;
size_t c = 0;
for (int j = -khalf; j <= khalf; j++){
for (int i = -khalf; i <= khalf; i++) {
pixel += A[(n + j) * nx + m + i] * kirnel[c];
c++;
}
}
B[id] = (pixel_t)pixel;
}
void CUDA_convolution(pixel_t *in, pixel_t *out, float *kernel,
int nx, int ny, int kn)
{
//assert(kn % 2 == 1);
//assert(nx > kn && ny > kn);
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* B;
float* kirnel;
cudaMalloc((void**)&A, memsize);
cudaMalloc((void**)&B, memsize);
cudaMalloc((void**)&kirnel, kn*kn*sizeof(float));
cudaMemcpy(A, in, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(kirnel, kernel, kn*kn*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
CUDA_convolution_kernel <<<dimGrid, dimBlock>>> (A, B, kirnel, nx, ny, kn);
cudaMemcpy(out, B, memsize, cudaMemcpyDeviceToHost);
cudaFree(A);
cudaFree(B);
cudaFree(kirnel);
}
// determines min and max of in image
void min_max(const pixel_t *in, const int nx, const int ny, pixel_t *pmin, pixel_t *pmax)
{
int min = INT_MAX, max = -INT_MAX;
for (int m = 0; m < nx; m++)
for (int n = 0; n < ny ; n++) {
int pixel = in[n*nx + m];
if (pixel < min)
min = pixel;
if (pixel > max)
max = pixel;
}
*pmin = min; *pmax = max;
}
// normalizes inout image using min and max values
void normalize( pixel_t *inout,
const int nx, const int ny, const int kn,
const int min, const int max)
{
const int khalf = kn / 2;
for (int m = khalf; m < nx - khalf; m++)
for (int n = khalf; n < ny - khalf; n++) {
pixel_t pixel = MAX_BRIGHTNESS * ((int)inout[n * nx + m] -(float) min) / ((float)max - (float)min);
inout[n * nx + m] = pixel;
}
}
/*
* gaussianFilter:
* http://www.songho.ca/dsp/cannyedge/cannyedge.html
* determine size of kernel (odd #)
* 0.0 <= sigma < 0.5 : 3
* 0.5 <= sigma < 1.0 : 5
* 1.0 <= sigma < 1.5 : 7
* 1.5 <= sigma < 2.0 : 9
* 2.0 <= sigma < 2.5 : 11
* 2.5 <= sigma < 3.0 : 13 ...
* kernelSize = 2 * int(2*sigma) + 3;
*/
void gaussian_filter(const pixel_t *in, pixel_t *out,
const int nx, const int ny, const float sigma)
{
const int n = 2 * (int)(2 * sigma) + 3;
const float mean = (float)floor(n / 2.0);
float kernel[n * n]; // variable length array
fprintf(stderr, "gaussian_filter: kernel size %d, sigma=%g\n",
n, sigma);
size_t c = 0;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++) {
kernel[c] = exp(-0.5 * (pow((i - mean) / sigma, 2.0) +
pow((j - mean) / sigma, 2.0)))
/ (2 * M_PI * sigma * sigma);
c++;
}
convolution(in, out, kernel, nx, ny, n);
pixel_t max, min;
min_max(out, nx, ny, &min, &max);
normalize(out, nx, ny, n, min, max);
}
// Canny non-maximum suppression
void non_maximum_supression(const pixel_t *after_Gx, const pixel_t * after_Gy, const pixel_t *G, pixel_t *nms,
const int nx, const int ny)
{
for (int i = 1; i < nx - 1; i++)
for (int j = 1; j < ny - 1; j++) {
const int c = i + nx * j;
const int nn = c - nx;
const int ss = c + nx;
const int ww = c + 1;
const int ee = c - 1;
const int nw = nn + 1;
const int ne = nn - 1;
const int sw = ss + 1;
const int se = ss - 1;
const float dir = (float)(fmod(atan2(after_Gy[c],
after_Gx[c]) + M_PI,
M_PI) / M_PI) * 8;
if (((dir <= 1 || dir > 7) && G[c] > G[ee] &&
G[c] > G[ww]) || // 0 deg
((dir > 1 && dir <= 3) && G[c] > G[nw] &&
G[c] > G[se]) || // 45 deg
((dir > 3 && dir <= 5) && G[c] > G[nn] &&
G[c] > G[ss]) || // 90 deg
((dir > 5 && dir <= 7) && G[c] > G[ne] &&
G[c] > G[sw])) // 135 deg
nms[c] = G[c];
else
nms[c] = 0;
}
}
__global__ void CUDA_non_maximum_supression_kernel(pixel_t *A, pixel_t *B, pixel_t *C, pixel_t *out, int nx, int ny)
{
int m = threadIdx.x + blockIdx.x * blockDim.x;
int n = threadIdx.y + blockIdx.y * blockDim.y;
const int c = m + nx * n; // id
if( (m < 1) || (n < 1) || (m >= nx - 1) || (n >= ny - 1) ){
return;
}
const int nn = c - nx;
const int ss = c + nx;
const int ww = c + 1;
const int ee = c - 1;
const int nw = nn + 1;
const int ne = nn - 1;
const int sw = ss + 1;
const int se = ss - 1;
const float dir = (float)(fmod(atan2((double)B[c],(double)A[c]) + M_PI, M_PI) / M_PI) * 8;
if (((dir <= 1 || dir > 7) && C[c] > C[ee] &&
C[c] > C[ww]) || // 0 deg
((dir > 1 && dir <= 3) && C[c] > C[nw] &&
C[c] > C[se]) || // 45 deg
((dir > 3 && dir <= 5) && C[c] > C[nn] &&
C[c] > C[ss]) || // 90 deg
((dir > 5 && dir <= 7) && C[c] > C[ne] &&
C[c] > C[sw])) // 135 deg
out[c] = C[c];
else
out[c] = 0;
}
void CUDA_non_maximum_supression(const pixel_t *after_Gx, const pixel_t * after_Gy, const pixel_t *G, pixel_t *nms, const int nx, const int ny)
{
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* B;
pixel_t* C;
pixel_t* out;
// float* kirnel;
cudaMalloc((void**)&A, memsize);
cudaMalloc((void**)&B, memsize);
cudaMalloc((void**)&C, memsize);
cudaMalloc((void**)&out, memsize);
cudaMemcpy(A, after_Gx, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(B, after_Gy, memsize, cudaMemcpyHostToDevice);
cudaMemset( out, 0, memsize);
cudaMemcpy(C, G, memsize, cudaMemcpyHostToDevice);
// cudaMemcpy(GG, kernel, kn*kn*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
CUDA_non_maximum_supression_kernel <<<dimGrid, dimBlock>>> (A, B, C, out, nx, ny);
cudaMemcpy(nms, out, memsize, cudaMemcpyDeviceToHost);
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(out);
}
// edges found in first pass for nms > tmax
void first_edges(const pixel_t *nms, pixel_t *reference,
const int nx, const int ny, const int tmax)
{
size_t c = 1;
for (int j = 1; j < ny - 1; j++) {
for (int i = 1; i < nx - 1; i++) {
if (nms[c] >= tmax) { // trace edges
reference[c] = MAX_BRIGHTNESS;
}
c++;
}
c+=2; // because borders are not considered
}
}
__global__ void CUDA_first_edges_kernel(pixel_t *nms, pixel_t *reference, int nx, int ny, int tmax){
int xx = threadIdx.x + blockIdx.x * blockDim.x;
int yy = threadIdx.y + blockIdx.y * blockDim.y;
int id = xx + yy * nx;
if ((xx>0 && xx<nx-1) && (yy>0 && yy<ny-1))
{
if (nms[id+1] >= tmax) { // trace edges
reference[id+1] = MAX_BRIGHTNESS;
}
}
}
void CUDA_first_edges(pixel_t *nms, pixel_t *reference, int nx, int ny, int tmax)
{
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* out;
cudaMalloc((void**)&A, memsize);
cudaMalloc((void**)&out, memsize);
cudaMemcpy(A, nms, memsize, cudaMemcpyHostToDevice);
cudaMemset(out, 0, memsize);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
CUDA_first_edges_kernel <<<dimGrid, dimBlock>>> (A, out, nx, ny, tmax);
cudaMemcpy(reference, out, memsize, cudaMemcpyDeviceToHost);
cudaFree(A);
cudaFree(out);
}
// edges found in after first passes for nms > tmin && neighbor is edge
void hysteresis_edges(const pixel_t *nms, pixel_t *reference,
const int nx, const int ny, const int tmin, bool *pchanged)
{
// Tracing edges with hysteresis . Non-recursive implementation.
for (int i = 1; i < nx - 1; i++) {
for (int j = 1; j < ny - 1; j++) {
size_t t = i + j * nx;
int nbs[8]; // neighbours
nbs[0] = t - nx; // nn
nbs[1] = t + nx; // ss
nbs[2] = t + 1; // ww
nbs[3] = t - 1; // ee
nbs[4] = nbs[0] + 1; // nw
nbs[5] = nbs[0] - 1; // ne
nbs[6] = nbs[1] + 1; // sw
nbs[7] = nbs[1] - 1; // se
if (nms[t] >= tmin && reference[t] == 0) {
for(int k = 0; k < 8; k++)
if (reference[nbs[k]] != 0) {
reference[t] = MAX_BRIGHTNESS;
*pchanged = true;
}
}
}
}
}
/*
* Links:
* http://en.wikipedia.org/wiki/Canny_edge_detector
* http://www.tomgibara.com/computer-vision/CannyEdgeDetector.java
* http://fourier.eng.hmc.edu/e161/lectures/canny/node1.html
* http://www.songho.ca/dsp/cannyedge/cannyedge.html
*
* Note: T1 and T2 are lower and upper thresholds.
*/
//canny edge detector code to run on the host
void cannyHost( const int *h_idata, const int w, const int h,
const int tmin, // tmin canny parameter
const int tmax, // tmax canny parameter
const float sigma, // sigma canny parameter
int * reference)
{
const int nx = w;
const int ny = h;
pixel_t *G = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gx = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gy = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *nms = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
if (G == NULL || after_Gx == NULL || after_Gy == NULL ||
nms == NULL || reference == NULL) {
fprintf(stderr, "canny_edge_detection:"
" Failed memory allocation(s).\n");
exit(1);
}
// Gaussian filter
gaussian_filter(h_idata, reference, nx, ny, sigma);
const float Gx[] = {-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
// Gradient along x
convolution(reference, after_Gx, Gx, nx, ny, 3);
const float Gy[] = { 1, 2, 1,
0, 0, 0,
-1,-2,-1};
// Gradient along y
convolution(reference, after_Gy, Gy, nx, ny, 3);
// Merging gradients
for (int i = 1; i < nx - 1; i++)
for (int j = 1; j < ny - 1; j++) {
const int c = i + nx * j;
G[c] = (pixel_t)(hypot((double)(after_Gx[c]), (double)( after_Gy[c]) ));
}
// Non-maximum suppression, straightforward implementation.
non_maximum_supression(after_Gx, after_Gy, G, nms, nx, ny);
// edges with nms >= tmax
memset(reference, 0, sizeof(pixel_t) * nx * ny);
first_edges(nms, reference, nx, ny, tmax);
// edges with nms >= tmin && neighbor is edge
bool changed;
do {
changed = false;
hysteresis_edges(nms, reference, nx, ny, tmin, &changed);
} while (changed==true);
free(after_Gx);
free(after_Gy);
free(G);
free(nms);
}
// canny edge detector code to run on the GPU
void cannyDevice( const int *h_idata, const int w, const int h,
const int tmin, const int tmax,
const float sigma,
int * h_odata)
{
const int nx = w;
const int ny = h;
pixel_t *G = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gx = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gy = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *nms = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
if (G == NULL || after_Gx == NULL || after_Gy == NULL ||
nms == NULL || h_odata == NULL) {
fprintf(stderr, "canny_edge_detection:"
" Failed memory allocation(s).\n");
exit(1);
}
// Gaussian filter
gaussian_filter(h_idata, h_odata, nx, ny, sigma);
//CUDA_gaussian_filter(h_idata, h_odata, nx, ny, sigma);
const float Gx[] = {-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
// Gradient along x
//convolution(h_odata, after_Gx, Gx, nx, ny, 3);
CUDA_convolution(h_odata, after_Gx,(float*) Gx, nx, ny, 3); // convolution function in CUDA
const float Gy[] = { 1, 2, 1,
0, 0, 0,
-1,-2,-1};
// Gradient along y
//convolution(h_odata, after_Gy, Gy, nx, ny, 3);
CUDA_convolution(h_odata, after_Gy, (float *) Gy, nx, ny, 3); // convolution function in CUDA
// Merging gradients
for (int i = 1; i < nx - 1; i++)
for (int j = 1; j < ny - 1; j++) {
const int c = i + nx * j;
G[c] = (pixel_t)(hypot((double)(after_Gx[c]), (double)( after_Gy[c]) ));
}
// Non-maximum suppression, straightforward implementation.
//non_maximum_supression(after_Gx, after_Gy, G, nms, nx, ny);
CUDA_non_maximum_supression(after_Gx, after_Gy, G, nms, nx, ny);
// edges with nms >= tmaxstaff-1879.wirel
memset(h_odata, 0, sizeof(pixel_t) * nx * ny);
//first_edges(nms, h_odata, nx, ny, tmax);
CUDA_first_edges(nms, h_odata, nx, ny, tmax);
// edges with nms >= tmin && neighbor is edge
bool changed;
do {
changed = false;
hysteresis_edges(nms, h_odata, nx, ny, tmin, &changed);
//CUDA_hysteresis_edges(nms, h_odata, nx, ny, tmin, &changed);
} while (changed==true);
free(after_Gx);
free(after_Gy);
free(G);
free(nms);
}
// print command line format
void usage(char *command)
{
printf("Usage: %s [-h] [-d device] [-i inputfile] [-o outputfile] [-r referenceFile] [-w windowsize] [-t threshold]\n",command);
}
// main
int main( int argc, char** argv)
{
// default command line options
int deviceId = 0;
char *fileIn=(char *)"lena.pgm",*fileOut=(char *)"lenaOut.pgm",*referenceOut=(char *)"reference.pgm";
int tmin = 45, tmax = 50;
float sigma=1.0f;
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:i:o:r:n:x:s:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'i': // input image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 'r': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'n': // tmin
if(strlen(optarg)==0 || sscanf(optarg,"%d",&tmin)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'x': // tmax
if(strlen(optarg)==0 || sscanf(optarg,"%d",&tmax)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 's': // sigma
if(strlen(optarg)==0 || sscanf(optarg,"%f",&sigma)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
// select cuda device
cutilSafeCall( cudaSetDevice( deviceId ) );
// create events to measure host canny detector time and device canny detector time
cudaEvent_t startH, stopH, startD, stopD;
cudaEventCreate(&startH);
cudaEventCreate(&stopH);
cudaEventCreate(&startD);
cudaEventCreate(&stopD);
// allocate host memory
int* h_idata=NULL;
unsigned int h,w;
//load pgm
if (cutLoadPGMi(fileIn, (unsigned int **)&h_idata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", fileIn);
exit(1);
}
// allocate mem for the result on host side
//int* h_odata = (int*) malloc( h*w*sizeof(unsigned int));
//int* reference = (int*) malloc( h*w*sizeof(unsigned int));
int* h_odata = (int*) calloc( h*w, sizeof(unsigned int)); // fazer cudaMalloc??
int* reference = (int*) calloc( h*w, sizeof(unsigned int));
// detect edges at host
cudaEventRecord( startH, 0 );
cannyHost(h_idata, w, h, tmin, tmax, sigma, reference);
cudaEventRecord( stopH, 0 );
cudaEventSynchronize( stopH );
// detect edges at GPU
cudaEventRecord( startD, 0 );
cannyDevice(h_idata, w, h, tmin, tmax, sigma, h_odata);
cudaEventRecord( stopD, 0 );
cudaEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
cudaEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
cudaEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_idata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
|
8a224e81884832f6fa954174268a4d0d9c2a6d28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| 8a224e81884832f6fa954174268a4d0d9c2a6d28.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
e5677e4e2ae5e0e40f28ffb88c0190748d5899c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/index_sample_grad_kernel.h"
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
namespace {
template <typename Context>
void LimitGridDim(const Context& ctx, dim3* grid_dim) {
auto max_grid_dim =
reinterpret_cast<const phi::GPUContext&>(ctx).GetCUDAMaxGridDimSize();
grid_dim->x = grid_dim->x < max_grid_dim[0] ? grid_dim->x : max_grid_dim[0];
grid_dim->y = grid_dim->y < max_grid_dim[1] ? grid_dim->y : max_grid_dim[1];
}
#define PREDEFINED_BLOCK_SIZE_X 512
#define PREDEFINED_BLOCK_SIZE 1024
#define MIN(a, b) ((a) < (b) ? (a) : (b))
} // namespace
template <typename T, typename IndexT = int>
__global__ void IndexSampleGrad(const IndexT* index,
T* in_grad,
const T* out_grad,
size_t index_length,
size_t input_length,
size_t batch_size,
bool same_data_in_row = true) {
unsigned int index_i = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int index_j = blockDim.y * blockIdx.y + threadIdx.y;
for (; index_j < batch_size; index_j += blockDim.y * gridDim.y) {
index_i = blockDim.x * blockIdx.x + threadIdx.x;
for (; index_i < index_length; index_i += blockDim.x * gridDim.x) {
unsigned int index_idx = index_j * index_length + index_i;
unsigned int in_idx = index_j * input_length + index_i;
IndexT sample_idx = index[index_idx];
if (same_data_in_row) {
paddle::platform::CudaAtomicAdd(
&(in_grad[in_idx - index_i + sample_idx]), out_grad[sample_idx]);
} else {
in_grad[in_idx - index_i + sample_idx] = out_grad[index_idx];
}
}
}
}
template <typename T, typename Context>
void IndexSampleGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& index,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
const T* output_grad_data = out_grad.data<T>();
T* input_grad_data = ctx.template Alloc<T>(x_grad);
auto index_type = index.dtype();
bool index_type_match =
index_type == DataType::INT32 || index_type == DataType::INT64;
PADDLE_ENFORCE_EQ(
index_type_match,
true,
errors::InvalidArgument(
"Input(Index) holds the wrong type, it holds %s, but "
"desires to be %s or %s",
paddle::framework::DataTypeToString(
paddle::framework::TransToProtoVarType(index_type)),
paddle::framework::DataTypeToString(
paddle::framework::TransToProtoVarType(DataType::INT32)),
paddle::framework::DataTypeToString(
paddle::framework::TransToProtoVarType((DataType::INT64)))));
auto stream = reinterpret_cast<const phi::GPUContext&>(ctx).stream();
auto input_num = x.numel();
auto input_dim = x.dims();
auto index_dim = index.dims();
size_t batch_size = index_dim[0];
size_t input_length = input_dim[1];
size_t index_length = index_dim[1];
bool same_data_in_index_row = index_length == 1 ? false : true;
auto block_width = paddle::platform::RoundToPowerOfTwo(index_length);
block_width = MIN(block_width, PREDEFINED_BLOCK_SIZE_X);
auto block_height =
paddle::platform::RoundToPowerOfTwo(index_length * batch_size) /
block_width;
block_height = MIN(block_height, PREDEFINED_BLOCK_SIZE / block_width);
dim3 block_dim(block_width, block_height);
dim3 grid_dim((index_length + block_dim.x - 1) / block_dim.x,
(batch_size + block_dim.y - 1) / block_dim.y);
LimitGridDim(ctx, &grid_dim);
phi::funcs::SetConstant<Context, T> set_zero;
set_zero(ctx, x_grad, static_cast<T>(0));
if (index_type == DataType::INT64) {
const int64_t* index_data = index.data<int64_t>();
hipLaunchKernelGGL(( IndexSampleGrad<T, int64_t>), dim3(grid_dim), dim3(block_dim), 0, stream,
index_data,
input_grad_data,
output_grad_data,
index_length,
input_length,
batch_size,
same_data_in_index_row);
} else if (index_type == DataType::INT32) {
const int* index_data = index.data<int>();
hipLaunchKernelGGL(( IndexSampleGrad<T, int>), dim3(grid_dim), dim3(block_dim), 0, stream,
index_data,
input_grad_data,
output_grad_data,
index_length,
input_length,
batch_size,
same_data_in_index_row);
}
}
} // namespace phi
PD_REGISTER_KERNEL(index_sample_grad,
GPU,
ALL_LAYOUT,
phi::IndexSampleGradKernel,
float,
double,
int,
int64_t) {}
| e5677e4e2ae5e0e40f28ffb88c0190748d5899c1.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/index_sample_grad_kernel.h"
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
namespace {
template <typename Context>
void LimitGridDim(const Context& ctx, dim3* grid_dim) {
auto max_grid_dim =
reinterpret_cast<const phi::GPUContext&>(ctx).GetCUDAMaxGridDimSize();
grid_dim->x = grid_dim->x < max_grid_dim[0] ? grid_dim->x : max_grid_dim[0];
grid_dim->y = grid_dim->y < max_grid_dim[1] ? grid_dim->y : max_grid_dim[1];
}
#define PREDEFINED_BLOCK_SIZE_X 512
#define PREDEFINED_BLOCK_SIZE 1024
#define MIN(a, b) ((a) < (b) ? (a) : (b))
} // namespace
template <typename T, typename IndexT = int>
__global__ void IndexSampleGrad(const IndexT* index,
T* in_grad,
const T* out_grad,
size_t index_length,
size_t input_length,
size_t batch_size,
bool same_data_in_row = true) {
unsigned int index_i = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int index_j = blockDim.y * blockIdx.y + threadIdx.y;
for (; index_j < batch_size; index_j += blockDim.y * gridDim.y) {
index_i = blockDim.x * blockIdx.x + threadIdx.x;
for (; index_i < index_length; index_i += blockDim.x * gridDim.x) {
unsigned int index_idx = index_j * index_length + index_i;
unsigned int in_idx = index_j * input_length + index_i;
IndexT sample_idx = index[index_idx];
if (same_data_in_row) {
paddle::platform::CudaAtomicAdd(
&(in_grad[in_idx - index_i + sample_idx]), out_grad[sample_idx]);
} else {
in_grad[in_idx - index_i + sample_idx] = out_grad[index_idx];
}
}
}
}
template <typename T, typename Context>
void IndexSampleGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& index,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
const T* output_grad_data = out_grad.data<T>();
T* input_grad_data = ctx.template Alloc<T>(x_grad);
auto index_type = index.dtype();
bool index_type_match =
index_type == DataType::INT32 || index_type == DataType::INT64;
PADDLE_ENFORCE_EQ(
index_type_match,
true,
errors::InvalidArgument(
"Input(Index) holds the wrong type, it holds %s, but "
"desires to be %s or %s",
paddle::framework::DataTypeToString(
paddle::framework::TransToProtoVarType(index_type)),
paddle::framework::DataTypeToString(
paddle::framework::TransToProtoVarType(DataType::INT32)),
paddle::framework::DataTypeToString(
paddle::framework::TransToProtoVarType((DataType::INT64)))));
auto stream = reinterpret_cast<const phi::GPUContext&>(ctx).stream();
auto input_num = x.numel();
auto input_dim = x.dims();
auto index_dim = index.dims();
size_t batch_size = index_dim[0];
size_t input_length = input_dim[1];
size_t index_length = index_dim[1];
bool same_data_in_index_row = index_length == 1 ? false : true;
auto block_width = paddle::platform::RoundToPowerOfTwo(index_length);
block_width = MIN(block_width, PREDEFINED_BLOCK_SIZE_X);
auto block_height =
paddle::platform::RoundToPowerOfTwo(index_length * batch_size) /
block_width;
block_height = MIN(block_height, PREDEFINED_BLOCK_SIZE / block_width);
dim3 block_dim(block_width, block_height);
dim3 grid_dim((index_length + block_dim.x - 1) / block_dim.x,
(batch_size + block_dim.y - 1) / block_dim.y);
LimitGridDim(ctx, &grid_dim);
phi::funcs::SetConstant<Context, T> set_zero;
set_zero(ctx, x_grad, static_cast<T>(0));
if (index_type == DataType::INT64) {
const int64_t* index_data = index.data<int64_t>();
IndexSampleGrad<T, int64_t><<<grid_dim, block_dim, 0, stream>>>(
index_data,
input_grad_data,
output_grad_data,
index_length,
input_length,
batch_size,
same_data_in_index_row);
} else if (index_type == DataType::INT32) {
const int* index_data = index.data<int>();
IndexSampleGrad<T, int><<<grid_dim, block_dim, 0, stream>>>(
index_data,
input_grad_data,
output_grad_data,
index_length,
input_length,
batch_size,
same_data_in_index_row);
}
}
} // namespace phi
PD_REGISTER_KERNEL(index_sample_grad,
GPU,
ALL_LAYOUT,
phi::IndexSampleGradKernel,
float,
double,
int,
int64_t) {}
|
df93966048a99874b2f1499fc26bec03d4705ec7.hip | // !!! This is a file automatically generated by hipify!!!
#include "DL/layers/innerproduct_layer.h"
#include <cblas.h>
#include <malloc.h>
#include <stdlib.h>
#include "DL/util/common_function.h"
void setup_innerproduct(struct LayerParameter* layer_parameter){
struct InnerproductParameter innerproduct_param = layer_parameter->parameter.innerproduct_param;
int num_out = innerproduct_param.num_output;
struct Blob* output = layer_parameter->tops[0];
struct Blob* input = layer_parameter->bottoms[0];
int num_in = input->count/input->n;
MakeBlob(input->n,num_out, 1, 1, output);
struct Blob* weights = layer_parameter->learn_parameter[0];
struct Blob* bias = layer_parameter->learn_parameter[1];
MakeBlob(num_out, num_in, 1, 1, weights);
MakeBlob(num_out, 1, 1, 1, bias);
MakeBlob(input->n, 1, 1, 1, layer_parameter->meds[0]);
set(layer_parameter->meds[0]->gpu_data, input->n, 1.0);
Doutput_shape_info();
}
void forward_innerproduct(struct LayerParameter* layer_parameter){
Doutput_info();
struct Blob* input = layer_parameter->bottoms[0];
struct Blob* output = layer_parameter->tops[0];
struct Blob* weights = layer_parameter->learn_parameter[0];
struct Blob* bias = layer_parameter->learn_parameter[1];
int num_in = input->count/input->n;
int num_out = output->c;
int batch = input->n;
struct Blob temp;
MakeBlob(batch, 1, 1, 1, &temp);
set(temp.gpu_data, batch, 1.0);
const float a = 1.0;
const float b = 0.0;
hipblasSgemm(*(layer_parameter->p_cublas_handle), HIPBLAS_OP_T, HIPBLAS_OP_N, num_out, batch, num_in,
&a, weights->gpu_data, num_in, input->gpu_data, num_in, &b, output->gpu_data, num_out);
hipblasSgemm(*(layer_parameter->p_cublas_handle), HIPBLAS_OP_N, HIPBLAS_OP_N, num_out, batch, 1,
&a, bias->gpu_data, num_out, layer_parameter->meds[0]->gpu_data, 1, &a, output->gpu_data, num_out);
// hipblasSgemm(*(layer_parameter->p_cublas_handle), HIPBLAS_OP_N, HIPBLAS_OP_N, num_out, batch, 1,
// &a, bias->gpu_data, num_out, temp.gpu_data, 1, &a, output->gpu_data, num_out);
FreeBlob(&temp);
}
| df93966048a99874b2f1499fc26bec03d4705ec7.cu | #include "DL/layers/innerproduct_layer.h"
#include <cblas.h>
#include <malloc.h>
#include <stdlib.h>
#include "DL/util/common_function.h"
void setup_innerproduct(struct LayerParameter* layer_parameter){
struct InnerproductParameter innerproduct_param = layer_parameter->parameter.innerproduct_param;
int num_out = innerproduct_param.num_output;
struct Blob* output = layer_parameter->tops[0];
struct Blob* input = layer_parameter->bottoms[0];
int num_in = input->count/input->n;
MakeBlob(input->n,num_out, 1, 1, output);
struct Blob* weights = layer_parameter->learn_parameter[0];
struct Blob* bias = layer_parameter->learn_parameter[1];
MakeBlob(num_out, num_in, 1, 1, weights);
MakeBlob(num_out, 1, 1, 1, bias);
MakeBlob(input->n, 1, 1, 1, layer_parameter->meds[0]);
set(layer_parameter->meds[0]->gpu_data, input->n, 1.0);
Doutput_shape_info();
}
void forward_innerproduct(struct LayerParameter* layer_parameter){
Doutput_info();
struct Blob* input = layer_parameter->bottoms[0];
struct Blob* output = layer_parameter->tops[0];
struct Blob* weights = layer_parameter->learn_parameter[0];
struct Blob* bias = layer_parameter->learn_parameter[1];
int num_in = input->count/input->n;
int num_out = output->c;
int batch = input->n;
struct Blob temp;
MakeBlob(batch, 1, 1, 1, &temp);
set(temp.gpu_data, batch, 1.0);
const float a = 1.0;
const float b = 0.0;
cublasSgemm(*(layer_parameter->p_cublas_handle), CUBLAS_OP_T, CUBLAS_OP_N, num_out, batch, num_in,
&a, weights->gpu_data, num_in, input->gpu_data, num_in, &b, output->gpu_data, num_out);
cublasSgemm(*(layer_parameter->p_cublas_handle), CUBLAS_OP_N, CUBLAS_OP_N, num_out, batch, 1,
&a, bias->gpu_data, num_out, layer_parameter->meds[0]->gpu_data, 1, &a, output->gpu_data, num_out);
// cublasSgemm(*(layer_parameter->p_cublas_handle), CUBLAS_OP_N, CUBLAS_OP_N, num_out, batch, 1,
// &a, bias->gpu_data, num_out, temp.gpu_data, 1, &a, output->gpu_data, num_out);
FreeBlob(&temp);
}
|
74ed2a727e22217bdada6c4d977391b42136401b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void compute_Gamma_kernel(double* Gamma, int Gamma_n, int Gamma_ld, double* N, int N_r, int N_c, int N_ld, double* G, int G_r, int G_c, int G_ld, int* random_vertex_vector, double* exp_V, double* exp_delta_V) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int vertex_index = N_c - G_c;
if (i < Gamma_n and j < Gamma_n) {
int configuration_e_spin_index_i = random_vertex_vector[i];
int configuration_e_spin_index_j = random_vertex_vector[j];
if (configuration_e_spin_index_j < vertex_index) {
double delta = 0;
if (configuration_e_spin_index_i == configuration_e_spin_index_j)
delta = 1.;
double N_ij = N[configuration_e_spin_index_i + configuration_e_spin_index_j * N_ld];
Gamma[i + j * Gamma_ld] = (N_ij * exp_V[j] - delta) / (exp_V[j] - 1.);
}
else
Gamma[i + j * Gamma_ld] =
G[configuration_e_spin_index_i + (configuration_e_spin_index_j - vertex_index) * G_ld];
}
if (i < Gamma_n and j < Gamma_n and i == j) {
double gamma_k = exp_delta_V[j];
Gamma[i + j * Gamma_ld] -= (gamma_k) / (gamma_k - 1.);
}
} | 74ed2a727e22217bdada6c4d977391b42136401b.cu | #include "includes.h"
__global__ void compute_Gamma_kernel(double* Gamma, int Gamma_n, int Gamma_ld, double* N, int N_r, int N_c, int N_ld, double* G, int G_r, int G_c, int G_ld, int* random_vertex_vector, double* exp_V, double* exp_delta_V) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int vertex_index = N_c - G_c;
if (i < Gamma_n and j < Gamma_n) {
int configuration_e_spin_index_i = random_vertex_vector[i];
int configuration_e_spin_index_j = random_vertex_vector[j];
if (configuration_e_spin_index_j < vertex_index) {
double delta = 0;
if (configuration_e_spin_index_i == configuration_e_spin_index_j)
delta = 1.;
double N_ij = N[configuration_e_spin_index_i + configuration_e_spin_index_j * N_ld];
Gamma[i + j * Gamma_ld] = (N_ij * exp_V[j] - delta) / (exp_V[j] - 1.);
}
else
Gamma[i + j * Gamma_ld] =
G[configuration_e_spin_index_i + (configuration_e_spin_index_j - vertex_index) * G_ld];
}
if (i < Gamma_n and j < Gamma_n and i == j) {
double gamma_k = exp_delta_V[j];
Gamma[i + j * Gamma_ld] -= (gamma_k) / (gamma_k - 1.);
}
} |
12533a994409bf5dc041a8f2a936a8b5021086eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ void execFillIsMax(void *vdZ, Nd4jLong *xShapeInfo, Nd4jLong length, long idx) {
auto dz = reinterpret_cast<T*>(vdZ);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x)
dz[shape::getIndexOffset(i, xShapeInfo)] = (i == idx ? (T) 1 : (T) 0);
}
////////////////////////////////////////////////////////////////////////
template <typename T>
__host__ void fillIsMaxGeneric(dim3 &launchDims, hipStream_t *stream, void *dx, Nd4jLong *xShapeInfo, Nd4jLong length, long idx) {
hipLaunchKernelGGL(( execFillIsMax<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, dx, xShapeInfo, length, idx);
sd::DebugHelper::checkErrorCode(stream, "fillIsMax(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT fillIsMaxGeneric, (dim3& launchDims, hipStream_t *stream, void* dz, Nd4jLong *zShapeInfo, Nd4jLong length, long idx), LIBND4J_TYPES);
} | 12533a994409bf5dc041a8f2a936a8b5021086eb.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ void execFillIsMax(void *vdZ, Nd4jLong *xShapeInfo, Nd4jLong length, long idx) {
auto dz = reinterpret_cast<T*>(vdZ);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x)
dz[shape::getIndexOffset(i, xShapeInfo)] = (i == idx ? (T) 1 : (T) 0);
}
////////////////////////////////////////////////////////////////////////
template <typename T>
__host__ void fillIsMaxGeneric(dim3 &launchDims, cudaStream_t *stream, void *dx, Nd4jLong *xShapeInfo, Nd4jLong length, long idx) {
execFillIsMax<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dx, xShapeInfo, length, idx);
sd::DebugHelper::checkErrorCode(stream, "fillIsMax(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT fillIsMaxGeneric, (dim3& launchDims, cudaStream_t *stream, void* dz, Nd4jLong *zShapeInfo, Nd4jLong length, long idx), LIBND4J_TYPES);
} |
701ea910a49567c28b7e8ac400f49be9765dfdd2.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/pair.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/detail/config.h>
#if THRUST_CPP_DIALECT >= 2011
#include <thrust/random.h>
#include <thrust/shuffle.h>
#include <random>
#endif
#include <algorithm>
#include <numeric>
#include <map>
#include <string>
#include <exception>
#include <iostream>
#include <cassert>
#include <cstdlib> // For `atoi`.
#include <climits> // For CHAR_BIT.
#include <cmath> // For `sqrt` and `abs`.
#include <stdint.h> // For `intN_t`.
#include "random.h"
#include "timer.h"
#if defined(HAVE_TBB)
#include "tbb_algos.h"
#endif
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
#include <thrust/system_error.h> // For `thrust::system_error`
#include <thrust/system/hip/error.h> // For `thrust::cuda_category`
#endif
// We don't use THRUST_PP_STRINGIZE and THRUST_PP_CAT because they are new, and
// we want this benchmark to be backwards-compatible to older versions of Thrust.
#define PP_STRINGIZE_(expr) #expr
#define PP_STRINGIZE(expr) PP_STRINGIZE_(expr)
#define PP_CAT(a, b) a ## b
// We don't use THRUST_NOEXCEPT because it's new, and we want this benchmark to
// be backwards-compatible to older versions of Thrust.
#if THRUST_CPP_DIALECT >= 2011
#define NOEXCEPT noexcept
#else
#define NOEXCEPT throw()
#endif
///////////////////////////////////////////////////////////////////////////////
template <typename T>
struct squared_difference
{
private:
T const average;
public:
__host__ __device__
squared_difference(squared_difference const& rhs) : average(rhs.average) {}
__host__ __device__
squared_difference(T average_) : average(average_) {}
__host__ __device__
T operator()(T x) const
{
return (x - average) * (x - average);
}
};
template <typename T>
struct value_and_count
{
T value;
uint64_t count;
__host__ __device__
value_and_count(value_and_count const& other)
: value(other.value), count(other.count) {}
__host__ __device__
value_and_count(T const& value_)
: value(value_), count(1) {}
__host__ __device__
value_and_count(T const& value_, uint64_t count_)
: value(value_), count(count_) {}
__host__ __device__
value_and_count& operator=(value_and_count const& other)
{
value = other.value;
count = other.count;
return *this;
}
__host__ __device__
value_and_count& operator=(T const& value_)
{
value = value_;
count = 1;
return *this;
}
};
template <typename T, typename ReduceOp>
struct counting_op
{
private:
ReduceOp reduce;
public:
__host__ __device__
counting_op() : reduce() {}
__host__ __device__
counting_op(counting_op const& other) : reduce(other.reduce) {}
__host__ __device__
counting_op(ReduceOp const& reduce_) : reduce(reduce_) {}
__host__ __device__
value_and_count<T> operator()(
value_and_count<T> const& x
, T const& y
) const
{
return value_and_count<T>(reduce(x.value, y), x.count + 1);
}
__host__ __device__
value_and_count<T> operator()(
value_and_count<T> const& x
, value_and_count<T> const& y
) const
{
return value_and_count<T>(reduce(x.value, y.value), x.count + y.count);
}
};
template <typename InputIt, typename T>
T arithmetic_mean(InputIt first, InputIt last, T init)
{
value_and_count<T> init_vc(init, 0);
counting_op<T, thrust::plus<T> > reduce_vc;
value_and_count<T> vc
= thrust::reduce(first, last, init_vc, reduce_vc);
return vc.value / vc.count;
}
template <typename InputIt>
typename thrust::iterator_traits<InputIt>::value_type
arithmetic_mean(InputIt first, InputIt last)
{
typedef typename thrust::iterator_traits<InputIt>::value_type T;
return arithmetic_mean(first, last, T());
}
template <typename InputIt, typename T>
T sample_standard_deviation(InputIt first, InputIt last, T average)
{
value_and_count<T> init_vc(T(), 0);
counting_op<T, thrust::plus<T> > reduce_vc;
squared_difference<T> transform(average);
value_and_count<T> vc
= thrust::transform_reduce(first, last, transform, init_vc, reduce_vc);
return std::sqrt(vc.value / T(vc.count - 1));
}
///////////////////////////////////////////////////////////////////////////////
// Formulas for propagation of uncertainty from:
//
// https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas
//
// Even though it's Wikipedia, I trust it as I helped write that table.
//
// XXX Replace with a proper reference.
// Compute the propagated uncertainty from the multiplication of two uncertain
// values, `A +/- A_unc` and `B +/- B_unc`. Given `f = AB` or `f = A/B`, where
// `A != 0` and `B != 0`, the uncertainty in `f` is approximately:
//
// f_unc = abs(f) * sqrt((A_unc / A) ^ 2 + (B_unc / B) ^ 2)
//
template <typename T>
__host__ __device__
T uncertainty_multiplicative(
T const& f
, T const& A, T const& A_unc
, T const& B, T const& B_unc
)
{
return std::abs(f)
* std::sqrt((A_unc / A) * (A_unc / A) + (B_unc / B) * (B_unc / B));
}
// Compute the propagated uncertainty from addition of two uncertain values,
// `A +/- A_unc` and `B +/- B_unc`. Given `f = cA + dB` (where `c` and `d` are
// certain constants), the uncertainty in `f` is approximately:
//
// f_unc = sqrt(c ^ 2 * A_unc ^ 2 + d ^ 2 * B_unc ^ 2)
//
template <typename T>
__host__ __device__
T uncertainty_additive(
T const& c, T const& A_unc
, T const& d, T const& B_unc
)
{
return std::sqrt((c * c * A_unc * A_unc) + (d * d * B_unc * B_unc));
}
///////////////////////////////////////////////////////////////////////////////
// Return the significant digit of `x`. The result is the number of digits
// after the decimal place to round to (negative numbers indicate rounding
// before the decimal place)
template <typename T>
int find_significant_digit(T x)
{
if (x == T(0)) return T(0);
return -int(::floor(std::log10(std::abs(x))));
}
// Round `x` to `ndigits` after the decimal place (Python-style).
template <typename T, typename N>
T round_to_precision(T x, N ndigits)
{
double m = (x < 0.0) ? -1.0 : 1.0;
double pwr = ::pow(T(10.0), ndigits);
return (::floor(x * m * pwr + 0.5) / pwr) * m;
}
///////////////////////////////////////////////////////////////////////////////
void print_experiment_header()
{ // {{{
std::cout << "Thrust Version"
<< "," << "Algorithm"
<< "," << "Element Type"
<< "," << "Element Size"
<< "," << "Elements per Trial"
<< "," << "Total Input Size"
<< "," << "STL Trials"
<< "," << "STL Average Walltime"
<< "," << "STL Walltime Uncertainty"
<< "," << "STL Average Throughput"
<< "," << "STL Throughput Uncertainty"
<< "," << "Thrust Trials"
<< "," << "Thrust Average Walltime"
<< "," << "Thrust Walltime Uncertainty"
<< "," << "Thrust Average Throughput"
<< "," << "Thrust Throughput Uncertainty"
#if defined(HAVE_TBB)
<< "," << "TBB Trials"
<< "," << "TBB Average Walltime"
<< "," << "TBB Walltime Uncertainty"
<< "," << "TBB Average Throughput"
<< "," << "TBB Throughput Uncertainty"
#endif
<< std::endl;
std::cout << "" // Thrust Version.
<< "," << "" // Algorithm.
<< "," << "" // Element Type.
<< "," << "bits/element" // Element Size.
<< "," << "elements" // Elements per Trial.
<< "," << "MiBs" // Total Input Size.
<< "," << "trials" // STL Trials.
<< "," << "secs" // STL Average Walltime.
<< "," << "secs" // STL Walltime Uncertainty.
<< "," << "elements/sec" // STL Average Throughput.
<< "," << "elements/sec" // STL Throughput Uncertainty.
<< "," << "trials" // Thrust Trials.
<< "," << "secs" // Thrust Average Walltime.
<< "," << "secs" // Thrust Walltime Uncertainty.
<< "," << "elements/sec" // Thrust Average Throughput.
<< "," << "elements/sec" // Thrust Throughput Uncertainty.
#if defined(HAVE_TBB)
<< "," << "trials" // TBB Trials.
<< "," << "secs" // TBB Average Walltime.
<< "," << "secs" // TBB Walltime Uncertainty.
<< "," << "elements/sec" // TBB Average Throughput.
<< "," << "elements/sec" // TBB Throughput Uncertainty.
#endif
<< std::endl;
} // }}}
///////////////////////////////////////////////////////////////////////////////
struct experiment_results
{
double const average_time; // Arithmetic mean of trial times in seconds.
double const stdev_time; // Sample standard deviation of trial times.
experiment_results(double average_time_, double stdev_time_)
: average_time(average_time_), stdev_time(stdev_time_) {}
};
///////////////////////////////////////////////////////////////////////////////
template <
template <typename> class Test
, typename ElementMetaType // Has an embedded typedef `type,
// and a static method `name` that
// returns a char const*.
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
struct experiment_driver
{
typedef typename ElementMetaType::type element_type;
static char const* const test_name;
static char const* const element_type_name; // Element type name as a string.
static uint64_t const elements; // # of elements per trial.
static uint64_t const element_size; // Size of each element in bits.
static double const input_size; // `elements` * `element_size` in MiB.
static uint64_t const baseline_trials; // # of gbdt trials per experiment.
static uint64_t const regular_trials; // # of regular trials per experiment.
static void run_experiment()
{ // {{{
experiment_results stl = std_experiment();
experiment_results thrust = thrust_experiment();
#if defined(HAVE_TBB)
experiment_results tbb = tbb_experiment();
#endif
double stl_average_walltime = stl.average_time;
double thrust_average_walltime = thrust.average_time;
#if defined(HAVE_TBB)
double tbb_average_walltime = tbb.average_time;
#endif
double stl_average_throughput = elements / stl.average_time;
double thrust_average_throughput = elements / thrust.average_time;
#if defined(HAVE_TBB)
double tbb_average_throughput = elements / tbb.average_time;
#endif
double stl_walltime_uncertainty = stl.stdev_time;
double thrust_walltime_uncertainty = thrust.stdev_time;
#if defined(HAVE_TBB)
double tbb_walltime_uncertainty = tbb.stdev_time;
#endif
double stl_throughput_uncertainty = uncertainty_multiplicative(
stl_average_throughput
, double(elements), 0.0
, stl_average_walltime, stl_walltime_uncertainty
);
double thrust_throughput_uncertainty = uncertainty_multiplicative(
thrust_average_throughput
, double(elements), 0.0
, thrust_average_walltime, thrust_walltime_uncertainty
);
#if defined(HAVE_TBB)
double tbb_throughput_uncertainty = uncertainty_multiplicative(
tbb_average_throughput
, double(elements), 0.0
, tbb_average_walltime, tbb_walltime_uncertainty
);
#endif
// Round the average walltime and walltime uncertainty to the
// significant figure of the walltime uncertainty.
int stl_walltime_precision = ::max(
find_significant_digit(stl.average_time)
, find_significant_digit(stl.stdev_time)
);
int thrust_walltime_precision = ::max(
find_significant_digit(thrust.average_time)
, find_significant_digit(thrust.stdev_time)
);
#if defined(HAVE_TBB)
int tbb_walltime_precision = ::max(
find_significant_digit(tbb.average_time)
, find_significant_digit(tbb.stdev_time)
);
#endif
stl_average_walltime = round_to_precision(
stl_average_walltime, stl_walltime_precision
);
thrust_average_walltime = round_to_precision(
thrust_average_walltime, thrust_walltime_precision
);
#if defined(HAVE_TBB)
tbb_average_walltime = round_to_precision(
tbb_average_walltime, tbb_walltime_precision
);
#endif
stl_walltime_uncertainty = round_to_precision(
stl_walltime_uncertainty, stl_walltime_precision
);
thrust_walltime_uncertainty = round_to_precision(
thrust_walltime_uncertainty, thrust_walltime_precision
);
#if defined(HAVE_TBB)
tbb_walltime_uncertainty = round_to_precision(
tbb_walltime_uncertainty, tbb_walltime_precision
);
#endif
// Round the average throughput and throughput uncertainty to the
// significant figure of the throughput uncertainty.
int stl_throughput_precision = ::max(
find_significant_digit(stl_average_throughput)
, find_significant_digit(stl_throughput_uncertainty)
);
int thrust_throughput_precision = ::max(
find_significant_digit(thrust_average_throughput)
, find_significant_digit(thrust_throughput_uncertainty)
);
#if defined(HAVE_TBB)
int tbb_throughput_precision = ::max(
find_significant_digit(tbb_average_throughput)
, find_significant_digit(tbb_throughput_uncertainty)
);
#endif
stl_average_throughput = round_to_precision(
stl_average_throughput, stl_throughput_precision
);
thrust_average_throughput = round_to_precision(
thrust_average_throughput, thrust_throughput_precision
);
#if defined(HAVE_TBB)
tbb_average_throughput = round_to_precision(
tbb_average_throughput, tbb_throughput_precision
);
#endif
stl_throughput_uncertainty = round_to_precision(
stl_throughput_uncertainty, stl_throughput_precision
);
thrust_throughput_uncertainty = round_to_precision(
thrust_throughput_uncertainty, thrust_throughput_precision
);
#if defined(HAVE_TBB)
tbb_throughput_uncertainty = round_to_precision(
tbb_throughput_uncertainty, tbb_throughput_precision
);
#endif
std::cout << THRUST_VERSION // Thrust Version.
<< "," << test_name // Algorithm.
<< "," << element_type_name // Element Type.
<< "," << element_size // Element Size.
<< "," << elements // Elements per Trial.
<< "," << input_size // Total Input Size.
<< "," << baseline_trials // STL Trials.
<< "," << stl_average_walltime // STL Average Walltime.
<< "," << stl_walltime_uncertainty // STL Walltime Uncertainty.
<< "," << stl_average_throughput // STL Average Throughput.
<< "," << stl_throughput_uncertainty // STL Throughput Uncertainty.
<< "," << regular_trials // Thrust Trials.
<< "," << thrust_average_walltime // Thrust Average Walltime.
<< "," << thrust_walltime_uncertainty // Thrust Walltime Uncertainty.
<< "," << thrust_average_throughput // Thrust Average Throughput.
<< "," << thrust_throughput_uncertainty // Thrust Throughput Uncertainty.
#if defined(HAVE_TBB)
<< "," << regular_trials // TBB Trials.
<< "," << tbb_average_walltime // TBB Average Walltime.
<< "," << tbb_walltime_uncertainty // TBB Walltime Uncertainty.
<< "," << tbb_average_throughput // TBB Average Throughput.
<< "," << tbb_throughput_uncertainty // TBB Throughput Uncertainty.
#endif
<< std::endl;
} // }}}
private:
static experiment_results std_experiment()
{
return experiment<typename Test<element_type>::std_trial>();
}
static experiment_results thrust_experiment()
{
return experiment<typename Test<element_type>::thrust_trial>();
}
#if defined(HAVE_TBB)
static experiment_results tbb_experiment()
{
return experiment<typename Test<element_type>::tbb_trial>();
}
#endif
template <typename Trial>
static experiment_results experiment()
{ // {{{
Trial trial;
// Allocate storage and generate random input for the warmup trial.
trial.setup(elements);
// Warmup trial.
trial();
uint64_t const trials
= trial.is_baseline() ? baseline_trials : regular_trials;
std::vector<double> times;
times.reserve(trials);
for (uint64_t t = 0; t < trials; ++t)
{
// Generate random input for next trial.
trial.setup(elements);
steady_timer e;
// Benchmark.
e.start();
trial();
e.stop();
times.push_back(e.seconds_elapsed());
}
double average_time
= arithmetic_mean(times.begin(), times.end());
double stdev_time
= sample_standard_deviation(times.begin(), times.end(), average_time);
return experiment_results(average_time, stdev_time);
} // }}}
};
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
char const* const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::test_name
= Test<typename ElementMetaType::type>::test_name();
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
char const* const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::element_type_name
= ElementMetaType::name();
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
uint64_t const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::element_size
= CHAR_BIT * sizeof(typename ElementMetaType::type);
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
uint64_t const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::elements
= Elements;
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
double const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::input_size
= double( Elements /* [elements] */
* sizeof(typename ElementMetaType::type) /* [bytes/element] */
)
/ double(1024 * 1024 /* [bytes/MiB] */);
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
uint64_t const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::baseline_trials
= BaselineTrials;
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
uint64_t const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::regular_trials
= RegularTrials;
///////////////////////////////////////////////////////////////////////////////
// Never create variables, pointers or references of any of the `*_trial_base`
// classes. They are purely mixin base classes and do not have vtables and
// virtual destructors. Using them for polymorphism instead of composition will
// probably cause slicing.
struct baseline_trial {};
struct regular_trial {};
template <typename TrialKind = regular_trial>
struct trial_base;
template <>
struct trial_base<baseline_trial>
{
static bool is_baseline() { return true; }
};
template <>
struct trial_base<regular_trial>
{
static bool is_baseline() { return false; }
};
template <typename Container, typename TrialKind = regular_trial>
struct inplace_trial_base : trial_base<TrialKind>
{
Container input;
void setup(uint64_t elements)
{
input.resize(elements);
randomize(input);
}
};
template <typename Container, typename TrialKind = regular_trial>
struct copy_trial_base : trial_base<TrialKind>
{
Container input;
Container output;
void setup(uint64_t elements)
{
input.resize(elements);
output.resize(elements);
randomize(input);
}
};
#if THRUST_CPP_DIALECT >= 2011
template <typename Container, typename TrialKind = regular_trial>
struct shuffle_trial_base : trial_base<TrialKind>
{
Container input;
void setup(uint64_t elements)
{
input.resize(elements);
randomize(input);
}
};
#endif
///////////////////////////////////////////////////////////////////////////////
template <typename T>
struct reduce_tester
{
static char const* test_name() { return "reduce"; }
struct std_trial : inplace_trial_base<std::vector<T>, baseline_trial>
{
void operator()()
{
if (std::accumulate(this->input.begin(), this->input.end(), T(0)) == 0)
// Prevent optimizer from removing body.
std::cout << "xyz";
}
};
struct thrust_trial : inplace_trial_base<thrust::device_vector<T> >
{
void operator()()
{
thrust::reduce(this->input.begin(), this->input.end());
}
};
#if defined(HAVE_TBB)
struct tbb_trial : inplace_trial_base<std::vector<T> >
{
void operator()()
{
tbb_reduce(this->input);
}
};
#endif
};
template <typename T>
struct sort_tester
{
static char const* test_name() { return "sort"; }
struct std_trial : inplace_trial_base<std::vector<T>, baseline_trial>
{
void operator()()
{
std::sort(this->input.begin(), this->input.end());
}
};
struct thrust_trial : inplace_trial_base<thrust::device_vector<T> >
{
void operator()()
{
thrust::sort(this->input.begin(), this->input.end());
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
hipError_t err = hipDeviceSynchronize();
if (err != hipSuccess)
throw thrust::error_code(err, thrust::cuda_category());
#endif
}
};
#if defined(HAVE_TBB)
struct tbb_trial : inplace_trial_base<std::vector<T> >
{
void operator()()
{
tbb_sort(this->input);
}
}
#endif
};
template <typename T>
struct transform_inplace_tester
{
static char const* test_name() { return "transform_inplace"; }
struct std_trial : inplace_trial_base<std::vector<T>, baseline_trial>
{
void operator()()
{
std::transform(
this->input.begin(), this->input.end(), this->input.begin()
, thrust::negate<T>()
);
}
};
struct thrust_trial : inplace_trial_base<thrust::device_vector<T> >
{
void operator()()
{
thrust::transform(
this->input.begin(), this->input.end(), this->input.begin()
, thrust::negate<T>()
);
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
hipError_t err = hipDeviceSynchronize();
if (err != hipSuccess)
throw thrust::error_code(err, thrust::cuda_category());
#endif
}
};
#if defined(HAVE_TBB)
struct tbb_trial : inplace_trial_base<std::vector<T> >
{
void operator()()
{
tbb_transform(this->input);
}
};
#endif
};
template <typename T>
struct inclusive_scan_inplace_tester
{
static char const* test_name() { return "inclusive_scan_inplace"; }
struct std_trial : inplace_trial_base<std::vector<T>, baseline_trial>
{
void operator()()
{
std::partial_sum(
this->input.begin(), this->input.end(), this->input.begin()
);
}
};
struct thrust_trial : inplace_trial_base<thrust::device_vector<T> >
{
void operator()()
{
thrust::inclusive_scan(
this->input.begin(), this->input.end(), this->input.begin()
);
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
hipError_t err = hipDeviceSynchronize();
if (err != hipSuccess)
throw thrust::error_code(err, thrust::cuda_category());
#endif
}
};
#if defined(HAVE_TBB)
struct tbb_trial : inplace_trial_base<std::vector<T> >
{
void operator()()
{
tbb_scan(this->input);
}
};
#endif
};
template <typename T>
struct copy_tester
{
static char const* test_name() { return "copy"; }
struct std_trial : copy_trial_base<std::vector<T> >
{
void operator()()
{
std::copy(this->input.begin(), this->input.end(), this->output.begin());
}
};
struct thrust_trial : copy_trial_base<thrust::device_vector<T> >
{
void operator()()
{
thrust::copy(this->input.begin(), this->input.end(), this->input.begin());
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
hipError_t err = hipDeviceSynchronize();
if (err != hipSuccess)
throw thrust::error_code(err, thrust::cuda_category());
#endif
}
};
#if defined(HAVE_TBB)
struct tbb_trial : copy_trial_base<std::vector<T> >
{
void operator()()
{
tbb_copy(this->input, this->output);
}
};
#endif
};
#if THRUST_CPP_DIALECT >= 2011
template <typename T>
struct shuffle_tester
{
static char const* test_name() { return "shuffle"; }
struct std_trial : shuffle_trial_base<std::vector<T>, baseline_trial>
{
std::default_random_engine g;
void operator()()
{
std::shuffle(this->input.begin(), this->input.end(), this->g);
}
};
struct thrust_trial : shuffle_trial_base<thrust::device_vector<T> >
{
thrust::default_random_engine g;
void operator()()
{
thrust::shuffle(this->input.begin(), this->input.end(), this->g);
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
hipError_t err = hipDeviceSynchronize();
if (err != hipSuccess)
throw thrust::error_code(err, thrust::cuda_category());
#endif
}
};
};
#endif
///////////////////////////////////////////////////////////////////////////////
template <
typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
void run_core_primitives_experiments_for_type()
{
experiment_driver<
reduce_tester
, ElementMetaType
, Elements / sizeof(typename ElementMetaType::type)
, BaselineTrials
, RegularTrials
>::run_experiment();
experiment_driver<
transform_inplace_tester
, ElementMetaType
, Elements / sizeof(typename ElementMetaType::type)
, BaselineTrials
, RegularTrials
>::run_experiment();
experiment_driver<
inclusive_scan_inplace_tester
, ElementMetaType
, Elements / sizeof(typename ElementMetaType::type)
, BaselineTrials
, RegularTrials
>::run_experiment();
experiment_driver<
sort_tester
, ElementMetaType
// , Elements / sizeof(typename ElementMetaType::type)
, (Elements >> 6) // Sorting is more sensitive to element count than
// memory footprint.
, BaselineTrials
, RegularTrials
>::run_experiment();
experiment_driver<
copy_tester
, ElementMetaType
, Elements / sizeof(typename ElementMetaType::type)
, BaselineTrials
, RegularTrials
>::run_experiment();
#if THRUST_CPP_DIALECT >= 2011
experiment_driver<
shuffle_tester
, ElementMetaType
, Elements / sizeof(typename ElementMetaType::type)
, BaselineTrials
, RegularTrials
>::run_experiment();
#endif
}
///////////////////////////////////////////////////////////////////////////////
#define DEFINE_ELEMENT_META_TYPE(T) \
struct PP_CAT(T, _meta) \
{ \
typedef T type; \
\
static char const* name() { return PP_STRINGIZE(T); } \
}; \
/**/
DEFINE_ELEMENT_META_TYPE(char);
DEFINE_ELEMENT_META_TYPE(int);
DEFINE_ELEMENT_META_TYPE(int8_t);
DEFINE_ELEMENT_META_TYPE(int16_t);
DEFINE_ELEMENT_META_TYPE(int32_t);
DEFINE_ELEMENT_META_TYPE(int64_t);
DEFINE_ELEMENT_META_TYPE(float);
DEFINE_ELEMENT_META_TYPE(double);
///////////////////////////////////////////////////////////////////////////////
template <
uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
void run_core_primitives_experiments()
{
run_core_primitives_experiments_for_type<
char_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
int_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
int8_t_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
int16_t_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
int32_t_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
int64_t_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
float_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
double_meta, Elements, BaselineTrials, RegularTrials
>();
}
///////////////////////////////////////////////////////////////////////////////
// XXX Use `std::string_view` when possible.
std::vector<std::string> split(std::string const& str, std::string const& delim)
{
std::vector<std::string> tokens;
std::string::size_type prev = 0, pos = 0;
do
{
pos = str.find(delim, prev);
if (pos == std::string::npos) pos = str.length();
std::string token = str.substr(prev, pos - prev);
if (!token.empty()) tokens.push_back(token);
prev = pos + delim.length();
}
while (pos < str.length() && prev < str.length());
return tokens;
}
///////////////////////////////////////////////////////////////////////////////
struct command_line_option_error : std::exception
{
virtual ~command_line_option_error() NOEXCEPT {}
virtual const char* what() const NOEXCEPT = 0;
};
struct only_one_option_allowed : command_line_option_error
{
// Construct a new `only_one_option_allowed` exception. `key` is the
// option name and `[first, last)` is a sequence of
// `std::pair<std::string const, std::string>`s (the values).
template <typename InputIt>
only_one_option_allowed(std::string const& key, InputIt first, InputIt last)
: message()
{
message = "Only one `--";
message += key;
message += "` option is allowed, but multiple were received: ";
for (; first != last; ++first)
{
message += "`";
message += (*first).second;
message += "` ";
}
// Remove the trailing space added by the last iteration of the above loop.
message.erase(message.size() - 1, 1);
message += ".";
}
virtual ~only_one_option_allowed() NOEXCEPT {}
virtual const char* what() const NOEXCEPT
{
return message.c_str();
}
private:
std::string message;
};
struct required_option_missing : command_line_option_error
{
// Construct a new `requirement_option_missing` exception. `key` is the
// option name.
required_option_missing(std::string const& key)
: message()
{
message = "`--";
message += key;
message += "` option is required.";
}
virtual ~required_option_missing() NOEXCEPT {}
virtual const char* what() const NOEXCEPT
{
return message.c_str();
}
private:
std::string message;
};
struct command_line_processor
{
typedef std::vector<std::string> positional_options_type;
typedef std::multimap<std::string, std::string> keyword_options_type;
typedef std::pair<
keyword_options_type::const_iterator
, keyword_options_type::const_iterator
> keyword_option_values;
command_line_processor(int argc, char** argv)
: pos_args(), kw_args()
{ // {{{
for (int i = 1; i < argc; ++i)
{
std::string arg(argv[i]);
// Look for --key or --key=value options.
if (arg.substr(0, 2) == "--")
{
std::string::size_type n = arg.find('=', 2);
keyword_options_type::value_type key_value;
if (n == std::string::npos) // --key
kw_args.insert(keyword_options_type::value_type(
arg.substr(2), ""
));
else // --key=value
kw_args.insert(keyword_options_type::value_type(
arg.substr(2, n - 2), arg.substr(n + 1)
));
kw_args.insert(key_value);
}
else // Assume it's positional.
pos_args.push_back(arg);
}
} // }}}
// Return the value for option `key`.
//
// Throws:
// * `only_one_option_allowed` if there is more than one value for `key`.
// * `required_option_missing` if there is no value for `key`.
std::string operator()(std::string const& key) const
{
keyword_option_values v = kw_args.equal_range(key);
keyword_options_type::difference_type d = std::distance(v.first, v.second);
if (1 < d) // Too many options.
throw only_one_option_allowed(key, v.first, v.second);
else if (0 == d) // No option.
throw required_option_missing(key);
return (*v.first).second;
}
// Return the value for option `key`, or `dflt` if `key` has no value.
//
// Throws: `only_one_option_allowed` if there is more than one value for `key`.
std::string operator()(std::string const& key, std::string const& dflt) const
{
keyword_option_values v = kw_args.equal_range(key);
keyword_options_type::difference_type d = std::distance(v.first, v.second);
if (1 < d) // Too many options.
throw only_one_option_allowed(key, v.first, v.second);
if (0 == d) // No option.
return dflt;
else // 1 option.
return (*v.first).second;
}
// Returns `true` if the option `key` was specified at least once.
bool has(std::string const& key) const
{
return kw_args.count(key) > 0;
}
private:
positional_options_type pos_args;
keyword_options_type kw_args;
};
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
command_line_processor clp(argc, argv);
#if defined(HAVE_TBB)
tbb::task_scheduler_init init;
test_tbb();
#endif
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
// Set the CUDA device to use for the benchmark - `0` by default.
int device = std::atoi(clp("device", "0").c_str());
// `std::atoi` returns 0 if the conversion fails.
hipSetDevice(device);
#endif
if (!clp.has("no-header"))
print_experiment_header();
/* Elements | Trials */
/* | Baseline | Regular */
//run_core_primitives_experiments< 1LLU << 21LLU , 4 , 16 >();
//run_core_primitives_experiments< 1LLU << 22LLU , 4 , 16 >();
//run_core_primitives_experiments< 1LLU << 23LLU , 4 , 16 >();
//run_core_primitives_experiments< 1LLU << 24LLU , 4 , 16 >();
//run_core_primitives_experiments< 1LLU << 25LLU , 4 , 16 >();
run_core_primitives_experiments< 1LLU << 26LLU , 4 , 16 >();
run_core_primitives_experiments< 1LLU << 27LLU , 4 , 16 >();
//run_core_primitives_experiments< 1LLU << 28LLU , 4 , 16 >();
//run_core_primitives_experiments< 1LLU << 29LLU , 4 , 16 >();
return 0;
}
// TODO: Add different input sizes and half precision
| 701ea910a49567c28b7e8ac400f49be9765dfdd2.cu | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/pair.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/detail/config.h>
#if THRUST_CPP_DIALECT >= 2011
#include <thrust/random.h>
#include <thrust/shuffle.h>
#include <random>
#endif
#include <algorithm>
#include <numeric>
#include <map>
#include <string>
#include <exception>
#include <iostream>
#include <cassert>
#include <cstdlib> // For `atoi`.
#include <climits> // For CHAR_BIT.
#include <cmath> // For `sqrt` and `abs`.
#include <stdint.h> // For `intN_t`.
#include "random.h"
#include "timer.h"
#if defined(HAVE_TBB)
#include "tbb_algos.h"
#endif
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
#include <thrust/system_error.h> // For `thrust::system_error`
#include <thrust/system/cuda/error.h> // For `thrust::cuda_category`
#endif
// We don't use THRUST_PP_STRINGIZE and THRUST_PP_CAT because they are new, and
// we want this benchmark to be backwards-compatible to older versions of Thrust.
#define PP_STRINGIZE_(expr) #expr
#define PP_STRINGIZE(expr) PP_STRINGIZE_(expr)
#define PP_CAT(a, b) a ## b
// We don't use THRUST_NOEXCEPT because it's new, and we want this benchmark to
// be backwards-compatible to older versions of Thrust.
#if THRUST_CPP_DIALECT >= 2011
#define NOEXCEPT noexcept
#else
#define NOEXCEPT throw()
#endif
///////////////////////////////////////////////////////////////////////////////
template <typename T>
struct squared_difference
{
private:
T const average;
public:
__host__ __device__
squared_difference(squared_difference const& rhs) : average(rhs.average) {}
__host__ __device__
squared_difference(T average_) : average(average_) {}
__host__ __device__
T operator()(T x) const
{
return (x - average) * (x - average);
}
};
template <typename T>
struct value_and_count
{
T value;
uint64_t count;
__host__ __device__
value_and_count(value_and_count const& other)
: value(other.value), count(other.count) {}
__host__ __device__
value_and_count(T const& value_)
: value(value_), count(1) {}
__host__ __device__
value_and_count(T const& value_, uint64_t count_)
: value(value_), count(count_) {}
__host__ __device__
value_and_count& operator=(value_and_count const& other)
{
value = other.value;
count = other.count;
return *this;
}
__host__ __device__
value_and_count& operator=(T const& value_)
{
value = value_;
count = 1;
return *this;
}
};
template <typename T, typename ReduceOp>
struct counting_op
{
private:
ReduceOp reduce;
public:
__host__ __device__
counting_op() : reduce() {}
__host__ __device__
counting_op(counting_op const& other) : reduce(other.reduce) {}
__host__ __device__
counting_op(ReduceOp const& reduce_) : reduce(reduce_) {}
__host__ __device__
value_and_count<T> operator()(
value_and_count<T> const& x
, T const& y
) const
{
return value_and_count<T>(reduce(x.value, y), x.count + 1);
}
__host__ __device__
value_and_count<T> operator()(
value_and_count<T> const& x
, value_and_count<T> const& y
) const
{
return value_and_count<T>(reduce(x.value, y.value), x.count + y.count);
}
};
template <typename InputIt, typename T>
T arithmetic_mean(InputIt first, InputIt last, T init)
{
value_and_count<T> init_vc(init, 0);
counting_op<T, thrust::plus<T> > reduce_vc;
value_and_count<T> vc
= thrust::reduce(first, last, init_vc, reduce_vc);
return vc.value / vc.count;
}
template <typename InputIt>
typename thrust::iterator_traits<InputIt>::value_type
arithmetic_mean(InputIt first, InputIt last)
{
typedef typename thrust::iterator_traits<InputIt>::value_type T;
return arithmetic_mean(first, last, T());
}
template <typename InputIt, typename T>
T sample_standard_deviation(InputIt first, InputIt last, T average)
{
value_and_count<T> init_vc(T(), 0);
counting_op<T, thrust::plus<T> > reduce_vc;
squared_difference<T> transform(average);
value_and_count<T> vc
= thrust::transform_reduce(first, last, transform, init_vc, reduce_vc);
return std::sqrt(vc.value / T(vc.count - 1));
}
///////////////////////////////////////////////////////////////////////////////
// Formulas for propagation of uncertainty from:
//
// https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas
//
// Even though it's Wikipedia, I trust it as I helped write that table.
//
// XXX Replace with a proper reference.
// Compute the propagated uncertainty from the multiplication of two uncertain
// values, `A +/- A_unc` and `B +/- B_unc`. Given `f = AB` or `f = A/B`, where
// `A != 0` and `B != 0`, the uncertainty in `f` is approximately:
//
// f_unc = abs(f) * sqrt((A_unc / A) ^ 2 + (B_unc / B) ^ 2)
//
template <typename T>
__host__ __device__
T uncertainty_multiplicative(
T const& f
, T const& A, T const& A_unc
, T const& B, T const& B_unc
)
{
return std::abs(f)
* std::sqrt((A_unc / A) * (A_unc / A) + (B_unc / B) * (B_unc / B));
}
// Compute the propagated uncertainty from addition of two uncertain values,
// `A +/- A_unc` and `B +/- B_unc`. Given `f = cA + dB` (where `c` and `d` are
// certain constants), the uncertainty in `f` is approximately:
//
// f_unc = sqrt(c ^ 2 * A_unc ^ 2 + d ^ 2 * B_unc ^ 2)
//
template <typename T>
__host__ __device__
T uncertainty_additive(
T const& c, T const& A_unc
, T const& d, T const& B_unc
)
{
return std::sqrt((c * c * A_unc * A_unc) + (d * d * B_unc * B_unc));
}
///////////////////////////////////////////////////////////////////////////////
// Return the significant digit of `x`. The result is the number of digits
// after the decimal place to round to (negative numbers indicate rounding
// before the decimal place)
template <typename T>
int find_significant_digit(T x)
{
if (x == T(0)) return T(0);
return -int(std::floor(std::log10(std::abs(x))));
}
// Round `x` to `ndigits` after the decimal place (Python-style).
template <typename T, typename N>
T round_to_precision(T x, N ndigits)
{
double m = (x < 0.0) ? -1.0 : 1.0;
double pwr = std::pow(T(10.0), ndigits);
return (std::floor(x * m * pwr + 0.5) / pwr) * m;
}
///////////////////////////////////////////////////////////////////////////////
void print_experiment_header()
{ // {{{
std::cout << "Thrust Version"
<< "," << "Algorithm"
<< "," << "Element Type"
<< "," << "Element Size"
<< "," << "Elements per Trial"
<< "," << "Total Input Size"
<< "," << "STL Trials"
<< "," << "STL Average Walltime"
<< "," << "STL Walltime Uncertainty"
<< "," << "STL Average Throughput"
<< "," << "STL Throughput Uncertainty"
<< "," << "Thrust Trials"
<< "," << "Thrust Average Walltime"
<< "," << "Thrust Walltime Uncertainty"
<< "," << "Thrust Average Throughput"
<< "," << "Thrust Throughput Uncertainty"
#if defined(HAVE_TBB)
<< "," << "TBB Trials"
<< "," << "TBB Average Walltime"
<< "," << "TBB Walltime Uncertainty"
<< "," << "TBB Average Throughput"
<< "," << "TBB Throughput Uncertainty"
#endif
<< std::endl;
std::cout << "" // Thrust Version.
<< "," << "" // Algorithm.
<< "," << "" // Element Type.
<< "," << "bits/element" // Element Size.
<< "," << "elements" // Elements per Trial.
<< "," << "MiBs" // Total Input Size.
<< "," << "trials" // STL Trials.
<< "," << "secs" // STL Average Walltime.
<< "," << "secs" // STL Walltime Uncertainty.
<< "," << "elements/sec" // STL Average Throughput.
<< "," << "elements/sec" // STL Throughput Uncertainty.
<< "," << "trials" // Thrust Trials.
<< "," << "secs" // Thrust Average Walltime.
<< "," << "secs" // Thrust Walltime Uncertainty.
<< "," << "elements/sec" // Thrust Average Throughput.
<< "," << "elements/sec" // Thrust Throughput Uncertainty.
#if defined(HAVE_TBB)
<< "," << "trials" // TBB Trials.
<< "," << "secs" // TBB Average Walltime.
<< "," << "secs" // TBB Walltime Uncertainty.
<< "," << "elements/sec" // TBB Average Throughput.
<< "," << "elements/sec" // TBB Throughput Uncertainty.
#endif
<< std::endl;
} // }}}
///////////////////////////////////////////////////////////////////////////////
struct experiment_results
{
double const average_time; // Arithmetic mean of trial times in seconds.
double const stdev_time; // Sample standard deviation of trial times.
experiment_results(double average_time_, double stdev_time_)
: average_time(average_time_), stdev_time(stdev_time_) {}
};
///////////////////////////////////////////////////////////////////////////////
template <
template <typename> class Test
, typename ElementMetaType // Has an embedded typedef `type,
// and a static method `name` that
// returns a char const*.
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
struct experiment_driver
{
typedef typename ElementMetaType::type element_type;
static char const* const test_name;
static char const* const element_type_name; // Element type name as a string.
static uint64_t const elements; // # of elements per trial.
static uint64_t const element_size; // Size of each element in bits.
static double const input_size; // `elements` * `element_size` in MiB.
static uint64_t const baseline_trials; // # of gbdt trials per experiment.
static uint64_t const regular_trials; // # of regular trials per experiment.
static void run_experiment()
{ // {{{
experiment_results stl = std_experiment();
experiment_results thrust = thrust_experiment();
#if defined(HAVE_TBB)
experiment_results tbb = tbb_experiment();
#endif
double stl_average_walltime = stl.average_time;
double thrust_average_walltime = thrust.average_time;
#if defined(HAVE_TBB)
double tbb_average_walltime = tbb.average_time;
#endif
double stl_average_throughput = elements / stl.average_time;
double thrust_average_throughput = elements / thrust.average_time;
#if defined(HAVE_TBB)
double tbb_average_throughput = elements / tbb.average_time;
#endif
double stl_walltime_uncertainty = stl.stdev_time;
double thrust_walltime_uncertainty = thrust.stdev_time;
#if defined(HAVE_TBB)
double tbb_walltime_uncertainty = tbb.stdev_time;
#endif
double stl_throughput_uncertainty = uncertainty_multiplicative(
stl_average_throughput
, double(elements), 0.0
, stl_average_walltime, stl_walltime_uncertainty
);
double thrust_throughput_uncertainty = uncertainty_multiplicative(
thrust_average_throughput
, double(elements), 0.0
, thrust_average_walltime, thrust_walltime_uncertainty
);
#if defined(HAVE_TBB)
double tbb_throughput_uncertainty = uncertainty_multiplicative(
tbb_average_throughput
, double(elements), 0.0
, tbb_average_walltime, tbb_walltime_uncertainty
);
#endif
// Round the average walltime and walltime uncertainty to the
// significant figure of the walltime uncertainty.
int stl_walltime_precision = std::max(
find_significant_digit(stl.average_time)
, find_significant_digit(stl.stdev_time)
);
int thrust_walltime_precision = std::max(
find_significant_digit(thrust.average_time)
, find_significant_digit(thrust.stdev_time)
);
#if defined(HAVE_TBB)
int tbb_walltime_precision = std::max(
find_significant_digit(tbb.average_time)
, find_significant_digit(tbb.stdev_time)
);
#endif
stl_average_walltime = round_to_precision(
stl_average_walltime, stl_walltime_precision
);
thrust_average_walltime = round_to_precision(
thrust_average_walltime, thrust_walltime_precision
);
#if defined(HAVE_TBB)
tbb_average_walltime = round_to_precision(
tbb_average_walltime, tbb_walltime_precision
);
#endif
stl_walltime_uncertainty = round_to_precision(
stl_walltime_uncertainty, stl_walltime_precision
);
thrust_walltime_uncertainty = round_to_precision(
thrust_walltime_uncertainty, thrust_walltime_precision
);
#if defined(HAVE_TBB)
tbb_walltime_uncertainty = round_to_precision(
tbb_walltime_uncertainty, tbb_walltime_precision
);
#endif
// Round the average throughput and throughput uncertainty to the
// significant figure of the throughput uncertainty.
int stl_throughput_precision = std::max(
find_significant_digit(stl_average_throughput)
, find_significant_digit(stl_throughput_uncertainty)
);
int thrust_throughput_precision = std::max(
find_significant_digit(thrust_average_throughput)
, find_significant_digit(thrust_throughput_uncertainty)
);
#if defined(HAVE_TBB)
int tbb_throughput_precision = std::max(
find_significant_digit(tbb_average_throughput)
, find_significant_digit(tbb_throughput_uncertainty)
);
#endif
stl_average_throughput = round_to_precision(
stl_average_throughput, stl_throughput_precision
);
thrust_average_throughput = round_to_precision(
thrust_average_throughput, thrust_throughput_precision
);
#if defined(HAVE_TBB)
tbb_average_throughput = round_to_precision(
tbb_average_throughput, tbb_throughput_precision
);
#endif
stl_throughput_uncertainty = round_to_precision(
stl_throughput_uncertainty, stl_throughput_precision
);
thrust_throughput_uncertainty = round_to_precision(
thrust_throughput_uncertainty, thrust_throughput_precision
);
#if defined(HAVE_TBB)
tbb_throughput_uncertainty = round_to_precision(
tbb_throughput_uncertainty, tbb_throughput_precision
);
#endif
std::cout << THRUST_VERSION // Thrust Version.
<< "," << test_name // Algorithm.
<< "," << element_type_name // Element Type.
<< "," << element_size // Element Size.
<< "," << elements // Elements per Trial.
<< "," << input_size // Total Input Size.
<< "," << baseline_trials // STL Trials.
<< "," << stl_average_walltime // STL Average Walltime.
<< "," << stl_walltime_uncertainty // STL Walltime Uncertainty.
<< "," << stl_average_throughput // STL Average Throughput.
<< "," << stl_throughput_uncertainty // STL Throughput Uncertainty.
<< "," << regular_trials // Thrust Trials.
<< "," << thrust_average_walltime // Thrust Average Walltime.
<< "," << thrust_walltime_uncertainty // Thrust Walltime Uncertainty.
<< "," << thrust_average_throughput // Thrust Average Throughput.
<< "," << thrust_throughput_uncertainty // Thrust Throughput Uncertainty.
#if defined(HAVE_TBB)
<< "," << regular_trials // TBB Trials.
<< "," << tbb_average_walltime // TBB Average Walltime.
<< "," << tbb_walltime_uncertainty // TBB Walltime Uncertainty.
<< "," << tbb_average_throughput // TBB Average Throughput.
<< "," << tbb_throughput_uncertainty // TBB Throughput Uncertainty.
#endif
<< std::endl;
} // }}}
private:
static experiment_results std_experiment()
{
return experiment<typename Test<element_type>::std_trial>();
}
static experiment_results thrust_experiment()
{
return experiment<typename Test<element_type>::thrust_trial>();
}
#if defined(HAVE_TBB)
static experiment_results tbb_experiment()
{
return experiment<typename Test<element_type>::tbb_trial>();
}
#endif
template <typename Trial>
static experiment_results experiment()
{ // {{{
Trial trial;
// Allocate storage and generate random input for the warmup trial.
trial.setup(elements);
// Warmup trial.
trial();
uint64_t const trials
= trial.is_baseline() ? baseline_trials : regular_trials;
std::vector<double> times;
times.reserve(trials);
for (uint64_t t = 0; t < trials; ++t)
{
// Generate random input for next trial.
trial.setup(elements);
steady_timer e;
// Benchmark.
e.start();
trial();
e.stop();
times.push_back(e.seconds_elapsed());
}
double average_time
= arithmetic_mean(times.begin(), times.end());
double stdev_time
= sample_standard_deviation(times.begin(), times.end(), average_time);
return experiment_results(average_time, stdev_time);
} // }}}
};
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
char const* const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::test_name
= Test<typename ElementMetaType::type>::test_name();
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
char const* const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::element_type_name
= ElementMetaType::name();
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
uint64_t const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::element_size
= CHAR_BIT * sizeof(typename ElementMetaType::type);
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
uint64_t const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::elements
= Elements;
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
double const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::input_size
= double( Elements /* [elements] */
* sizeof(typename ElementMetaType::type) /* [bytes/element] */
)
/ double(1024 * 1024 /* [bytes/MiB] */);
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
uint64_t const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::baseline_trials
= BaselineTrials;
template <
template <typename> class Test
, typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
uint64_t const
experiment_driver<
Test, ElementMetaType, Elements, BaselineTrials, RegularTrials
>::regular_trials
= RegularTrials;
///////////////////////////////////////////////////////////////////////////////
// Never create variables, pointers or references of any of the `*_trial_base`
// classes. They are purely mixin base classes and do not have vtables and
// virtual destructors. Using them for polymorphism instead of composition will
// probably cause slicing.
struct baseline_trial {};
struct regular_trial {};
template <typename TrialKind = regular_trial>
struct trial_base;
template <>
struct trial_base<baseline_trial>
{
static bool is_baseline() { return true; }
};
template <>
struct trial_base<regular_trial>
{
static bool is_baseline() { return false; }
};
template <typename Container, typename TrialKind = regular_trial>
struct inplace_trial_base : trial_base<TrialKind>
{
Container input;
void setup(uint64_t elements)
{
input.resize(elements);
randomize(input);
}
};
template <typename Container, typename TrialKind = regular_trial>
struct copy_trial_base : trial_base<TrialKind>
{
Container input;
Container output;
void setup(uint64_t elements)
{
input.resize(elements);
output.resize(elements);
randomize(input);
}
};
#if THRUST_CPP_DIALECT >= 2011
template <typename Container, typename TrialKind = regular_trial>
struct shuffle_trial_base : trial_base<TrialKind>
{
Container input;
void setup(uint64_t elements)
{
input.resize(elements);
randomize(input);
}
};
#endif
///////////////////////////////////////////////////////////////////////////////
template <typename T>
struct reduce_tester
{
static char const* test_name() { return "reduce"; }
struct std_trial : inplace_trial_base<std::vector<T>, baseline_trial>
{
void operator()()
{
if (std::accumulate(this->input.begin(), this->input.end(), T(0)) == 0)
// Prevent optimizer from removing body.
std::cout << "xyz";
}
};
struct thrust_trial : inplace_trial_base<thrust::device_vector<T> >
{
void operator()()
{
thrust::reduce(this->input.begin(), this->input.end());
}
};
#if defined(HAVE_TBB)
struct tbb_trial : inplace_trial_base<std::vector<T> >
{
void operator()()
{
tbb_reduce(this->input);
}
};
#endif
};
template <typename T>
struct sort_tester
{
static char const* test_name() { return "sort"; }
struct std_trial : inplace_trial_base<std::vector<T>, baseline_trial>
{
void operator()()
{
std::sort(this->input.begin(), this->input.end());
}
};
struct thrust_trial : inplace_trial_base<thrust::device_vector<T> >
{
void operator()()
{
thrust::sort(this->input.begin(), this->input.end());
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess)
throw thrust::error_code(err, thrust::cuda_category());
#endif
}
};
#if defined(HAVE_TBB)
struct tbb_trial : inplace_trial_base<std::vector<T> >
{
void operator()()
{
tbb_sort(this->input);
}
}
#endif
};
template <typename T>
struct transform_inplace_tester
{
static char const* test_name() { return "transform_inplace"; }
struct std_trial : inplace_trial_base<std::vector<T>, baseline_trial>
{
void operator()()
{
std::transform(
this->input.begin(), this->input.end(), this->input.begin()
, thrust::negate<T>()
);
}
};
struct thrust_trial : inplace_trial_base<thrust::device_vector<T> >
{
void operator()()
{
thrust::transform(
this->input.begin(), this->input.end(), this->input.begin()
, thrust::negate<T>()
);
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess)
throw thrust::error_code(err, thrust::cuda_category());
#endif
}
};
#if defined(HAVE_TBB)
struct tbb_trial : inplace_trial_base<std::vector<T> >
{
void operator()()
{
tbb_transform(this->input);
}
};
#endif
};
template <typename T>
struct inclusive_scan_inplace_tester
{
static char const* test_name() { return "inclusive_scan_inplace"; }
struct std_trial : inplace_trial_base<std::vector<T>, baseline_trial>
{
void operator()()
{
std::partial_sum(
this->input.begin(), this->input.end(), this->input.begin()
);
}
};
struct thrust_trial : inplace_trial_base<thrust::device_vector<T> >
{
void operator()()
{
thrust::inclusive_scan(
this->input.begin(), this->input.end(), this->input.begin()
);
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess)
throw thrust::error_code(err, thrust::cuda_category());
#endif
}
};
#if defined(HAVE_TBB)
struct tbb_trial : inplace_trial_base<std::vector<T> >
{
void operator()()
{
tbb_scan(this->input);
}
};
#endif
};
template <typename T>
struct copy_tester
{
static char const* test_name() { return "copy"; }
struct std_trial : copy_trial_base<std::vector<T> >
{
void operator()()
{
std::copy(this->input.begin(), this->input.end(), this->output.begin());
}
};
struct thrust_trial : copy_trial_base<thrust::device_vector<T> >
{
void operator()()
{
thrust::copy(this->input.begin(), this->input.end(), this->input.begin());
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess)
throw thrust::error_code(err, thrust::cuda_category());
#endif
}
};
#if defined(HAVE_TBB)
struct tbb_trial : copy_trial_base<std::vector<T> >
{
void operator()()
{
tbb_copy(this->input, this->output);
}
};
#endif
};
#if THRUST_CPP_DIALECT >= 2011
template <typename T>
struct shuffle_tester
{
static char const* test_name() { return "shuffle"; }
struct std_trial : shuffle_trial_base<std::vector<T>, baseline_trial>
{
std::default_random_engine g;
void operator()()
{
std::shuffle(this->input.begin(), this->input.end(), this->g);
}
};
struct thrust_trial : shuffle_trial_base<thrust::device_vector<T> >
{
thrust::default_random_engine g;
void operator()()
{
thrust::shuffle(this->input.begin(), this->input.end(), this->g);
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess)
throw thrust::error_code(err, thrust::cuda_category());
#endif
}
};
};
#endif
///////////////////////////////////////////////////////////////////////////////
template <
typename ElementMetaType
, uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
void run_core_primitives_experiments_for_type()
{
experiment_driver<
reduce_tester
, ElementMetaType
, Elements / sizeof(typename ElementMetaType::type)
, BaselineTrials
, RegularTrials
>::run_experiment();
experiment_driver<
transform_inplace_tester
, ElementMetaType
, Elements / sizeof(typename ElementMetaType::type)
, BaselineTrials
, RegularTrials
>::run_experiment();
experiment_driver<
inclusive_scan_inplace_tester
, ElementMetaType
, Elements / sizeof(typename ElementMetaType::type)
, BaselineTrials
, RegularTrials
>::run_experiment();
experiment_driver<
sort_tester
, ElementMetaType
// , Elements / sizeof(typename ElementMetaType::type)
, (Elements >> 6) // Sorting is more sensitive to element count than
// memory footprint.
, BaselineTrials
, RegularTrials
>::run_experiment();
experiment_driver<
copy_tester
, ElementMetaType
, Elements / sizeof(typename ElementMetaType::type)
, BaselineTrials
, RegularTrials
>::run_experiment();
#if THRUST_CPP_DIALECT >= 2011
experiment_driver<
shuffle_tester
, ElementMetaType
, Elements / sizeof(typename ElementMetaType::type)
, BaselineTrials
, RegularTrials
>::run_experiment();
#endif
}
///////////////////////////////////////////////////////////////////////////////
#define DEFINE_ELEMENT_META_TYPE(T) \
struct PP_CAT(T, _meta) \
{ \
typedef T type; \
\
static char const* name() { return PP_STRINGIZE(T); } \
}; \
/**/
DEFINE_ELEMENT_META_TYPE(char);
DEFINE_ELEMENT_META_TYPE(int);
DEFINE_ELEMENT_META_TYPE(int8_t);
DEFINE_ELEMENT_META_TYPE(int16_t);
DEFINE_ELEMENT_META_TYPE(int32_t);
DEFINE_ELEMENT_META_TYPE(int64_t);
DEFINE_ELEMENT_META_TYPE(float);
DEFINE_ELEMENT_META_TYPE(double);
///////////////////////////////////////////////////////////////////////////////
template <
uint64_t Elements
, uint64_t BaselineTrials
, uint64_t RegularTrials
>
void run_core_primitives_experiments()
{
run_core_primitives_experiments_for_type<
char_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
int_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
int8_t_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
int16_t_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
int32_t_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
int64_t_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
float_meta, Elements, BaselineTrials, RegularTrials
>();
run_core_primitives_experiments_for_type<
double_meta, Elements, BaselineTrials, RegularTrials
>();
}
///////////////////////////////////////////////////////////////////////////////
// XXX Use `std::string_view` when possible.
std::vector<std::string> split(std::string const& str, std::string const& delim)
{
std::vector<std::string> tokens;
std::string::size_type prev = 0, pos = 0;
do
{
pos = str.find(delim, prev);
if (pos == std::string::npos) pos = str.length();
std::string token = str.substr(prev, pos - prev);
if (!token.empty()) tokens.push_back(token);
prev = pos + delim.length();
}
while (pos < str.length() && prev < str.length());
return tokens;
}
///////////////////////////////////////////////////////////////////////////////
struct command_line_option_error : std::exception
{
virtual ~command_line_option_error() NOEXCEPT {}
virtual const char* what() const NOEXCEPT = 0;
};
struct only_one_option_allowed : command_line_option_error
{
// Construct a new `only_one_option_allowed` exception. `key` is the
// option name and `[first, last)` is a sequence of
// `std::pair<std::string const, std::string>`s (the values).
template <typename InputIt>
only_one_option_allowed(std::string const& key, InputIt first, InputIt last)
: message()
{
message = "Only one `--";
message += key;
message += "` option is allowed, but multiple were received: ";
for (; first != last; ++first)
{
message += "`";
message += (*first).second;
message += "` ";
}
// Remove the trailing space added by the last iteration of the above loop.
message.erase(message.size() - 1, 1);
message += ".";
}
virtual ~only_one_option_allowed() NOEXCEPT {}
virtual const char* what() const NOEXCEPT
{
return message.c_str();
}
private:
std::string message;
};
struct required_option_missing : command_line_option_error
{
// Construct a new `requirement_option_missing` exception. `key` is the
// option name.
required_option_missing(std::string const& key)
: message()
{
message = "`--";
message += key;
message += "` option is required.";
}
virtual ~required_option_missing() NOEXCEPT {}
virtual const char* what() const NOEXCEPT
{
return message.c_str();
}
private:
std::string message;
};
struct command_line_processor
{
typedef std::vector<std::string> positional_options_type;
typedef std::multimap<std::string, std::string> keyword_options_type;
typedef std::pair<
keyword_options_type::const_iterator
, keyword_options_type::const_iterator
> keyword_option_values;
command_line_processor(int argc, char** argv)
: pos_args(), kw_args()
{ // {{{
for (int i = 1; i < argc; ++i)
{
std::string arg(argv[i]);
// Look for --key or --key=value options.
if (arg.substr(0, 2) == "--")
{
std::string::size_type n = arg.find('=', 2);
keyword_options_type::value_type key_value;
if (n == std::string::npos) // --key
kw_args.insert(keyword_options_type::value_type(
arg.substr(2), ""
));
else // --key=value
kw_args.insert(keyword_options_type::value_type(
arg.substr(2, n - 2), arg.substr(n + 1)
));
kw_args.insert(key_value);
}
else // Assume it's positional.
pos_args.push_back(arg);
}
} // }}}
// Return the value for option `key`.
//
// Throws:
// * `only_one_option_allowed` if there is more than one value for `key`.
// * `required_option_missing` if there is no value for `key`.
std::string operator()(std::string const& key) const
{
keyword_option_values v = kw_args.equal_range(key);
keyword_options_type::difference_type d = std::distance(v.first, v.second);
if (1 < d) // Too many options.
throw only_one_option_allowed(key, v.first, v.second);
else if (0 == d) // No option.
throw required_option_missing(key);
return (*v.first).second;
}
// Return the value for option `key`, or `dflt` if `key` has no value.
//
// Throws: `only_one_option_allowed` if there is more than one value for `key`.
std::string operator()(std::string const& key, std::string const& dflt) const
{
keyword_option_values v = kw_args.equal_range(key);
keyword_options_type::difference_type d = std::distance(v.first, v.second);
if (1 < d) // Too many options.
throw only_one_option_allowed(key, v.first, v.second);
if (0 == d) // No option.
return dflt;
else // 1 option.
return (*v.first).second;
}
// Returns `true` if the option `key` was specified at least once.
bool has(std::string const& key) const
{
return kw_args.count(key) > 0;
}
private:
positional_options_type pos_args;
keyword_options_type kw_args;
};
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
command_line_processor clp(argc, argv);
#if defined(HAVE_TBB)
tbb::task_scheduler_init init;
test_tbb();
#endif
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
// Set the CUDA device to use for the benchmark - `0` by default.
int device = std::atoi(clp("device", "0").c_str());
// `std::atoi` returns 0 if the conversion fails.
cudaSetDevice(device);
#endif
if (!clp.has("no-header"))
print_experiment_header();
/* Elements | Trials */
/* | Baseline | Regular */
//run_core_primitives_experiments< 1LLU << 21LLU , 4 , 16 >();
//run_core_primitives_experiments< 1LLU << 22LLU , 4 , 16 >();
//run_core_primitives_experiments< 1LLU << 23LLU , 4 , 16 >();
//run_core_primitives_experiments< 1LLU << 24LLU , 4 , 16 >();
//run_core_primitives_experiments< 1LLU << 25LLU , 4 , 16 >();
run_core_primitives_experiments< 1LLU << 26LLU , 4 , 16 >();
run_core_primitives_experiments< 1LLU << 27LLU , 4 , 16 >();
//run_core_primitives_experiments< 1LLU << 28LLU , 4 , 16 >();
//run_core_primitives_experiments< 1LLU << 29LLU , 4 , 16 >();
return 0;
}
// TODO: Add different input sizes and half precision
|
1e5fd220c704896c810ca173f0ea919278f6f741.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
// TODO use intrinsics like __sinf and so on
namespace build_warp_maps
{
__constant__ float ck_rinv[9];
__constant__ float cr_kinv[9];
__constant__ float ct[3];
__constant__ float cscale;
}
class PlaneMapper
{
public:
static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
{
using namespace build_warp_maps;
float x_ = u / cscale - ct[0];
float y_ = v / cscale - ct[1];
float z;
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * (1 - ct[2]);
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * (1 - ct[2]);
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * (1 - ct[2]);
x /= z;
y /= z;
}
};
class CylindricalMapper
{
public:
static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
{
using namespace build_warp_maps;
u /= cscale;
float x_ = ::sinf(u);
float y_ = v / cscale;
float z_ = ::cosf(u);
float z;
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
};
class SphericalMapper
{
public:
static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
{
using namespace build_warp_maps;
v /= cscale;
u /= cscale;
float sinv = ::sinf(v);
float x_ = sinv * ::sinf(u);
float y_ = -::cosf(v);
float z_ = sinv * ::cosf(u);
float z;
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
};
template <typename Mapper>
__global__ void buildWarpMapsKernel(int tl_u, int tl_v, int cols, int rows,
PtrStepf map_x, PtrStepf map_y)
{
int du = blockIdx.x * blockDim.x + threadIdx.x;
int dv = blockIdx.y * blockDim.y + threadIdx.y;
if (du < cols && dv < rows)
{
float u = tl_u + du;
float v = tl_v + dv;
float x, y;
Mapper::mapBackward(u, v, x, y);
map_x.ptr(dv)[du] = x;
map_y.ptr(dv)[du] = y;
}
}
void buildWarpPlaneMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
const float k_rinv[9], const float r_kinv[9], const float t[3],
float scale, hipStream_t stream)
{
cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::ct, t, 3*sizeof(float)));
cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
int cols = map_x.cols;
int rows = map_x.rows;
dim3 threads(32, 8);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
hipLaunchKernelGGL(( buildWarpMapsKernel<PlaneMapper>), dim3(grid),dim3(threads), 0, 0, tl_u, tl_v, cols, rows, map_x, map_y);
cudaSafeCall(hipGetLastError());
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
void buildWarpCylindricalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
const float k_rinv[9], const float r_kinv[9], float scale,
hipStream_t stream)
{
cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
int cols = map_x.cols;
int rows = map_x.rows;
dim3 threads(32, 8);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
hipLaunchKernelGGL(( buildWarpMapsKernel<CylindricalMapper>), dim3(grid),dim3(threads), 0, 0, tl_u, tl_v, cols, rows, map_x, map_y);
cudaSafeCall(hipGetLastError());
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
void buildWarpSphericalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
const float k_rinv[9], const float r_kinv[9], float scale,
hipStream_t stream)
{
cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
int cols = map_x.cols;
int rows = map_x.rows;
dim3 threads(32, 8);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
hipLaunchKernelGGL(( buildWarpMapsKernel<SphericalMapper>), dim3(grid),dim3(threads), 0, 0, tl_u, tl_v, cols, rows, map_x, map_y);
cudaSafeCall(hipGetLastError());
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
} // namespace imgproc
}}} // namespace cv { namespace gpu { namespace cudev {
#endif /* CUDA_DISABLER */
| 1e5fd220c704896c810ca173f0ea919278f6f741.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
// TODO use intrinsics like __sinf and so on
namespace build_warp_maps
{
__constant__ float ck_rinv[9];
__constant__ float cr_kinv[9];
__constant__ float ct[3];
__constant__ float cscale;
}
class PlaneMapper
{
public:
static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
{
using namespace build_warp_maps;
float x_ = u / cscale - ct[0];
float y_ = v / cscale - ct[1];
float z;
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * (1 - ct[2]);
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * (1 - ct[2]);
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * (1 - ct[2]);
x /= z;
y /= z;
}
};
class CylindricalMapper
{
public:
static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
{
using namespace build_warp_maps;
u /= cscale;
float x_ = ::sinf(u);
float y_ = v / cscale;
float z_ = ::cosf(u);
float z;
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
};
class SphericalMapper
{
public:
static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
{
using namespace build_warp_maps;
v /= cscale;
u /= cscale;
float sinv = ::sinf(v);
float x_ = sinv * ::sinf(u);
float y_ = -::cosf(v);
float z_ = sinv * ::cosf(u);
float z;
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
};
template <typename Mapper>
__global__ void buildWarpMapsKernel(int tl_u, int tl_v, int cols, int rows,
PtrStepf map_x, PtrStepf map_y)
{
int du = blockIdx.x * blockDim.x + threadIdx.x;
int dv = blockIdx.y * blockDim.y + threadIdx.y;
if (du < cols && dv < rows)
{
float u = tl_u + du;
float v = tl_v + dv;
float x, y;
Mapper::mapBackward(u, v, x, y);
map_x.ptr(dv)[du] = x;
map_y.ptr(dv)[du] = y;
}
}
void buildWarpPlaneMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
const float k_rinv[9], const float r_kinv[9], const float t[3],
float scale, cudaStream_t stream)
{
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ct, t, 3*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
int cols = map_x.cols;
int rows = map_x.rows;
dim3 threads(32, 8);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
buildWarpMapsKernel<PlaneMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
cudaSafeCall(cudaGetLastError());
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void buildWarpCylindricalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
const float k_rinv[9], const float r_kinv[9], float scale,
cudaStream_t stream)
{
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
int cols = map_x.cols;
int rows = map_x.rows;
dim3 threads(32, 8);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
buildWarpMapsKernel<CylindricalMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
cudaSafeCall(cudaGetLastError());
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void buildWarpSphericalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
const float k_rinv[9], const float r_kinv[9], float scale,
cudaStream_t stream)
{
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
int cols = map_x.cols;
int rows = map_x.rows;
dim3 threads(32, 8);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
buildWarpMapsKernel<SphericalMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
cudaSafeCall(cudaGetLastError());
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
} // namespace imgproc
}}} // namespace cv { namespace gpu { namespace cudev {
#endif /* CUDA_DISABLER */
|
13703c2c1daaf6338689e7599dc9a0e53e5184f5.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
using namespace std;
// ----------------------------------------------------------------------------
#define checkLastError() { \
hipError_t error = hipGetLastError(); \
int id; \
hipGetDevice(&id); \
if(error != hipSuccess) { \
printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \
__FILE__,__LINE__, hipGetErrorString(error), id); \
exit(EXIT_FAILURE); \
} \
}
// ----------------------------------------------------------------------------
__global__
void __convolution(float *src, float *dst, int dimx, int dimy, int dimz)
{
__shared__ float sharedMem[14][14][14];
int shared_index_1d, global_index_1d, index_1d;
// int2 shared_index_2d, global_index_2d, index_2d;
int3 shared_index_3d, global_index_3d, index_3d;
// Multi batch loading
int trial;
for(trial=0; trial <6; trial++)
{
shared_index_1d = threadIdx.z * blockDim.y * blockDim.x +
threadIdx.y * blockDim.x +
threadIdx.x +
blockDim.x * blockDim.y * blockDim.z * trial; // Next number of loading
shared_index_3d = make_int3((shared_index_1d % ((blockDim.y+2*3) * (blockDim.x+2*3))) % (blockDim.x+2*3),
(shared_index_1d % ((blockDim.y+2*3) * (blockDim.x+2*3))) / (blockDim.x+2*3),
(shared_index_1d / ((blockDim.y+2*3) * (blockDim.x+2*3))) );
global_index_3d = make_int3(blockIdx.x * blockDim.x + shared_index_3d.x - 3,
blockIdx.y * blockDim.y + shared_index_3d.y - 3,
blockIdx.z * blockDim.z + shared_index_3d.z - 3);
global_index_1d = global_index_3d.z * dimy * dimx +
global_index_3d.y * dimx +
global_index_3d.x;
if (shared_index_3d.z < (blockDim.z + 2*3))
{
if (global_index_3d.z >= 0 && global_index_3d.z < dimz &&
global_index_3d.y >= 0 && global_index_3d.y < dimy &&
global_index_3d.x >= 0 && global_index_3d.x < dimx )
sharedMem[shared_index_3d.z][shared_index_3d.y][shared_index_3d.x] = src[global_index_1d];
else
sharedMem[shared_index_3d.z][shared_index_3d.y][shared_index_3d.x] = -1;
}
__syncthreads();
}
// // First batch loading
// shared_index_1d = threadIdx.z * blockDim.y * blockDim.x +
// threadIdx.y * blockDim.x +
// threadIdx.x;
// shared_index_3d = make_int3((shared_index_1d % ((blockDim.y+2*3) * (blockDim.x+2*3))) % (blockDim.x+2*3),
// (shared_index_1d % ((blockDim.y+2*3) * (blockDim.x+2*3))) / (blockDim.x+2*3),
// (shared_index_1d / ((blockDim.y+2*3) * (blockDim.x+2*3))) );
// global_index_3d = make_int3(blockIdx.x * blockDim.x + shared_index_3d.x - 3,
// blockIdx.y * blockDim.y + shared_index_3d.y - 3,
// blockIdx.z * blockDim.z + shared_index_3d.z - 3);
// global_index_1d = global_index_3d.z * dimy * dimx +
// global_index_3d.y * dimx +
// global_index_3d.x;
// if (global_index_3d.z >= 0 && global_index_3d.z < dimz &&
// global_index_3d.y >= 0 && global_index_3d.y < dimy &&
// global_index_3d.x >= 0 && global_index_3d.x < dimx )
// sharedMem[shared_index_3d.z][shared_index_3d.y][shared_index_3d.x] = src[global_index_1d];
// else
// sharedMem[shared_index_3d.z][shared_index_3d.y][shared_index_3d.x] = -1;
// __syncthreads();
// // Second batch loading
// shared_index_1d = threadIdx.z * blockDim.y * blockDim.x +
// threadIdx.y * blockDim.x +
// threadIdx.x +
// blockDim.x * blockDim.y * blockDim.z; // Next number of loading
// shared_index_3d = make_int3((shared_index_1d % ((blockDim.y+2*3) * (blockDim.x+2*3))) % (blockDim.x+2*3),
// (shared_index_1d % ((blockDim.y+2*3) * (blockDim.x+2*3))) / (blockDim.x+2*3),
// (shared_index_1d / ((blockDim.y+2*3) * (blockDim.x+2*3))) );
// global_index_3d = make_int3(blockIdx.x * blockDim.x + shared_index_3d.x - 3,
// blockIdx.y * blockDim.y + shared_index_3d.y - 3,
// blockIdx.z * blockDim.z + shared_index_3d.z - 3);
// global_index_1d = global_index_3d.z * dimy * dimx +
// global_index_3d.y * dimx +
// global_index_3d.x;
// if (shared_index_3d.z < (blockDim.z + 2*3))
// {
// if (global_index_3d.z >= 0 && global_index_3d.z < dimz &&
// global_index_3d.y >= 0 && global_index_3d.y < dimy &&
// global_index_3d.x >= 0 && global_index_3d.x < dimx )
// sharedMem[shared_index_3d.z][shared_index_3d.y][shared_index_3d.x] = src[global_index_1d];
// else
// sharedMem[shared_index_3d.z][shared_index_3d.y][shared_index_3d.x] = -1;
// }
// __syncthreads();
index_3d = make_int3(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.z * blockDim.z + threadIdx.z);
index_1d = index_3d.z * dimy * dimx +
index_3d.y * dimx +
index_3d.x;
// Store back
if (index_3d.z < dimz &&
index_3d.y < dimy &&
index_3d.x < dimx)
dst[index_1d] = sharedMem[threadIdx.z+3][threadIdx.y+3][threadIdx.x+3];
}
void convolution(float *src, float *dst, int dimx, int dimy, int dimz)
{
dim3 numBlocks((dimx/8 + ((dimx%8)?1:0)),
(dimy/8 + ((dimy%8)?1:0)),
(dimz/8 + ((dimz%8)?1:0)));
dim3 numThreads(8, 8, 8);
hipLaunchKernelGGL(( __convolution), dim3(numBlocks), dim3(numThreads), 0, 0, src, dst, dimx, dimy, dimz);
}
// ----------------------------------------------------------------------------
int main(int argc, char** argv)
{
srand(time(NULL)); // for random number generator
// Specify dimensions
const int dimx = 100;
const int dimy = 100;
const int dimz = 100;
const int total = dimx*dimy*dimz;
// Allocate host memory
float *h_src = new float[total];
float *h_dst = new float[total];
// Allocate device memory
float *d_src;
float *d_dst;
hipMalloc((void**)&d_src, total*sizeof(float)); checkLastError();
hipMalloc((void**)&d_dst, total*sizeof(float)); checkLastError();
// Initialize the image source
for(int z=0; z<dimz; z++)
{
for(int y=0; y<dimy; y++)
{
for(int x=0; x<dimx; x++)
{
h_src[z*dimy*dimx+y*dimx+x] = (float)rand();
}
}
}
// Transferring to the device memory
hipMemcpy(d_src, h_src, total*sizeof(float), hipMemcpyHostToDevice); checkLastError();
convolution(d_src, d_dst, dimx, dimy, dimz);
hipMemcpy(h_dst, d_dst, total*sizeof(float), hipMemcpyDeviceToHost); checkLastError();
// Verify the result
for(int z=0; z<dimz; z++)
{
for(int y=0; y<dimy; y++)
{
for(int x=0; x<dimx; x++)
{
if(h_src[z*dimy*dimx+y*dimx+x] != h_dst[z*dimy*dimx+y*dimx+x])
{
printf("Solution doesnot match at x: %d, y: %d, z: %d\n", x, y, z);
goto cleanup;
}
// else
// printf("Solution match at x: %d, y: %d, z: %d\n", x, y, z);
}
}
}
printf("Solution is correct.\n");
cleanup:
hipFree(d_src);
hipFree(d_dst);
free(h_src);
free(h_dst);
return 0;
}
| 13703c2c1daaf6338689e7599dc9a0e53e5184f5.cu | #include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <time.h>
using namespace std;
// ----------------------------------------------------------------------------
#define checkLastError() { \
cudaError_t error = cudaGetLastError(); \
int id; \
cudaGetDevice(&id); \
if(error != cudaSuccess) { \
printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \
__FILE__,__LINE__, cudaGetErrorString(error), id); \
exit(EXIT_FAILURE); \
} \
}
// ----------------------------------------------------------------------------
__global__
void __convolution(float *src, float *dst, int dimx, int dimy, int dimz)
{
__shared__ float sharedMem[14][14][14];
int shared_index_1d, global_index_1d, index_1d;
// int2 shared_index_2d, global_index_2d, index_2d;
int3 shared_index_3d, global_index_3d, index_3d;
// Multi batch loading
int trial;
for(trial=0; trial <6; trial++)
{
shared_index_1d = threadIdx.z * blockDim.y * blockDim.x +
threadIdx.y * blockDim.x +
threadIdx.x +
blockDim.x * blockDim.y * blockDim.z * trial; // Next number of loading
shared_index_3d = make_int3((shared_index_1d % ((blockDim.y+2*3) * (blockDim.x+2*3))) % (blockDim.x+2*3),
(shared_index_1d % ((blockDim.y+2*3) * (blockDim.x+2*3))) / (blockDim.x+2*3),
(shared_index_1d / ((blockDim.y+2*3) * (blockDim.x+2*3))) );
global_index_3d = make_int3(blockIdx.x * blockDim.x + shared_index_3d.x - 3,
blockIdx.y * blockDim.y + shared_index_3d.y - 3,
blockIdx.z * blockDim.z + shared_index_3d.z - 3);
global_index_1d = global_index_3d.z * dimy * dimx +
global_index_3d.y * dimx +
global_index_3d.x;
if (shared_index_3d.z < (blockDim.z + 2*3))
{
if (global_index_3d.z >= 0 && global_index_3d.z < dimz &&
global_index_3d.y >= 0 && global_index_3d.y < dimy &&
global_index_3d.x >= 0 && global_index_3d.x < dimx )
sharedMem[shared_index_3d.z][shared_index_3d.y][shared_index_3d.x] = src[global_index_1d];
else
sharedMem[shared_index_3d.z][shared_index_3d.y][shared_index_3d.x] = -1;
}
__syncthreads();
}
// // First batch loading
// shared_index_1d = threadIdx.z * blockDim.y * blockDim.x +
// threadIdx.y * blockDim.x +
// threadIdx.x;
// shared_index_3d = make_int3((shared_index_1d % ((blockDim.y+2*3) * (blockDim.x+2*3))) % (blockDim.x+2*3),
// (shared_index_1d % ((blockDim.y+2*3) * (blockDim.x+2*3))) / (blockDim.x+2*3),
// (shared_index_1d / ((blockDim.y+2*3) * (blockDim.x+2*3))) );
// global_index_3d = make_int3(blockIdx.x * blockDim.x + shared_index_3d.x - 3,
// blockIdx.y * blockDim.y + shared_index_3d.y - 3,
// blockIdx.z * blockDim.z + shared_index_3d.z - 3);
// global_index_1d = global_index_3d.z * dimy * dimx +
// global_index_3d.y * dimx +
// global_index_3d.x;
// if (global_index_3d.z >= 0 && global_index_3d.z < dimz &&
// global_index_3d.y >= 0 && global_index_3d.y < dimy &&
// global_index_3d.x >= 0 && global_index_3d.x < dimx )
// sharedMem[shared_index_3d.z][shared_index_3d.y][shared_index_3d.x] = src[global_index_1d];
// else
// sharedMem[shared_index_3d.z][shared_index_3d.y][shared_index_3d.x] = -1;
// __syncthreads();
// // Second batch loading
// shared_index_1d = threadIdx.z * blockDim.y * blockDim.x +
// threadIdx.y * blockDim.x +
// threadIdx.x +
// blockDim.x * blockDim.y * blockDim.z; // Next number of loading
// shared_index_3d = make_int3((shared_index_1d % ((blockDim.y+2*3) * (blockDim.x+2*3))) % (blockDim.x+2*3),
// (shared_index_1d % ((blockDim.y+2*3) * (blockDim.x+2*3))) / (blockDim.x+2*3),
// (shared_index_1d / ((blockDim.y+2*3) * (blockDim.x+2*3))) );
// global_index_3d = make_int3(blockIdx.x * blockDim.x + shared_index_3d.x - 3,
// blockIdx.y * blockDim.y + shared_index_3d.y - 3,
// blockIdx.z * blockDim.z + shared_index_3d.z - 3);
// global_index_1d = global_index_3d.z * dimy * dimx +
// global_index_3d.y * dimx +
// global_index_3d.x;
// if (shared_index_3d.z < (blockDim.z + 2*3))
// {
// if (global_index_3d.z >= 0 && global_index_3d.z < dimz &&
// global_index_3d.y >= 0 && global_index_3d.y < dimy &&
// global_index_3d.x >= 0 && global_index_3d.x < dimx )
// sharedMem[shared_index_3d.z][shared_index_3d.y][shared_index_3d.x] = src[global_index_1d];
// else
// sharedMem[shared_index_3d.z][shared_index_3d.y][shared_index_3d.x] = -1;
// }
// __syncthreads();
index_3d = make_int3(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.z * blockDim.z + threadIdx.z);
index_1d = index_3d.z * dimy * dimx +
index_3d.y * dimx +
index_3d.x;
// Store back
if (index_3d.z < dimz &&
index_3d.y < dimy &&
index_3d.x < dimx)
dst[index_1d] = sharedMem[threadIdx.z+3][threadIdx.y+3][threadIdx.x+3];
}
void convolution(float *src, float *dst, int dimx, int dimy, int dimz)
{
dim3 numBlocks((dimx/8 + ((dimx%8)?1:0)),
(dimy/8 + ((dimy%8)?1:0)),
(dimz/8 + ((dimz%8)?1:0)));
dim3 numThreads(8, 8, 8);
__convolution<<<numBlocks, numThreads>>>(src, dst, dimx, dimy, dimz);
}
// ----------------------------------------------------------------------------
int main(int argc, char** argv)
{
srand(time(NULL)); // for random number generator
// Specify dimensions
const int dimx = 100;
const int dimy = 100;
const int dimz = 100;
const int total = dimx*dimy*dimz;
// Allocate host memory
float *h_src = new float[total];
float *h_dst = new float[total];
// Allocate device memory
float *d_src;
float *d_dst;
cudaMalloc((void**)&d_src, total*sizeof(float)); checkLastError();
cudaMalloc((void**)&d_dst, total*sizeof(float)); checkLastError();
// Initialize the image source
for(int z=0; z<dimz; z++)
{
for(int y=0; y<dimy; y++)
{
for(int x=0; x<dimx; x++)
{
h_src[z*dimy*dimx+y*dimx+x] = (float)rand();
}
}
}
// Transferring to the device memory
cudaMemcpy(d_src, h_src, total*sizeof(float), cudaMemcpyHostToDevice); checkLastError();
convolution(d_src, d_dst, dimx, dimy, dimz);
cudaMemcpy(h_dst, d_dst, total*sizeof(float), cudaMemcpyDeviceToHost); checkLastError();
// Verify the result
for(int z=0; z<dimz; z++)
{
for(int y=0; y<dimy; y++)
{
for(int x=0; x<dimx; x++)
{
if(h_src[z*dimy*dimx+y*dimx+x] != h_dst[z*dimy*dimx+y*dimx+x])
{
printf("Solution doesnot match at x: %d, y: %d, z: %d\n", x, y, z);
goto cleanup;
}
// else
// printf("Solution match at x: %d, y: %d, z: %d\n", x, y, z);
}
}
}
printf("Solution is correct.\n");
cleanup:
cudaFree(d_src);
cudaFree(d_dst);
free(h_src);
free(h_dst);
return 0;
}
|
e254f65aabfd75393f6c90eb734d895122391cfe.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
// includes CUDA Runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
/*
written by George Strauch on 4/21/2020
c++ program to sort an array with bubblesort on gpu
Execution syntax:
$ ./exec {int num of elements}
Example run:
$ nvcc gpu_bubble.cu -arch='sm_35' -rdc=true -lineinfo -lcudadevrt -o gpu_bs
$ time ./gpu_bs 10
$ time ./gpu_bs 20000
*/
__host__ // used for debug
void print_array (int *array, int n, int tag_index)
{
for (size_t i = 0; i < n; i++) {
if (i == tag_index+1) {
std::cout << " > ";
}
std::cout << array[i] << ' ';
}
std::cout << '\n';
}
__host__
int* allocate_shared_array(int n_elements)
{
int *a;
hipMallocManaged(&a, n_elements*sizeof(int));
return a;
}
__host__ // makes and returns unsorted array with random elements
int* make_unsorted_array(int n_elements)
{
int *a = allocate_shared_array(n_elements);
for (size_t j = 0; j < n_elements; j++) {
a[j] = rand()%(2*n_elements);
}
return a;
}
__host__
bool go_again(int* array, int n)
{
for (size_t i = 0; i < n-1; i++) {
if(array[i] > array[i+1])
{
return true;
}
}
return false;
}
__global__
void sort(int* array, int n, int offset, int k)
{
int id = 2*(blockIdx.x*blockDim.x + threadIdx.x) + offset + k;
if (id >= n) {
return;
}
int tmp;
if (array[id] > array[id+1]) {
tmp = array[id+1];
array[id+1] = array[id];
array[id] = tmp;
}
__syncthreads();
}
__host__
void fill_array(int* a, int n) {
for (size_t i = 0; i < n; i++) {
a[i] = 0;
}
}
__host__ // returns element index if any element larger than i+1 element, else -1
int verify_in_order(int* array, int n)
{
for (size_t i = 0; i < n-1; i++) {
if (array[i+1] < array[i]) {
return i;
}
}
return -1;
}
__host__
void entry_point(int* array, int n)
{
int t = 512;
int b = 512;
int count = 0;
int total = t*b;
dim3 threads(t);
dim3 blocks(b);
// fill_array(array, n);
while (go_again(array, n)) {
hipDeviceSynchronize();
for (size_t i = 0; i < (n/(2*total)) +1; i++) {
hipLaunchKernelGGL(( sort), dim3(blocks), dim3(threads), 0, 0, array, n, 0, i*total);
hipDeviceSynchronize();
hipLaunchKernelGGL(( sort), dim3(blocks), dim3(threads), 0, 0, array, n, 1, i*total);
}
hipDeviceSynchronize();
count++;
if (count > 1.5*n) {
break;
}
}
}
int main(int argc, char const *argv[])
{
int N = atoi(argv[1]);
std::cout << "N = " << N << '\n';
int* a = make_unsorted_array(N);
hipProfilerStart();
// while (go_again(a, N)) {
// sort<<<1, N/2>>>(a, N, 0);
// sort<<<1, N/2>>>(a, N, 1);
// hipDeviceSynchronize();
// }
entry_point(a, N);
hipProfilerStop();
int order = verify_in_order(a, N);
if (order == -1) {
std::cout << "array is in order" << '\n';
}
else {
std::cout << "not in order" << '\n';
print_array(a, N, order);
}
hipFree(a);
return 0;
}
//
| e254f65aabfd75393f6c90eb734d895122391cfe.cu | #include <iostream>
#include <cuda.h>
// includes CUDA Runtime
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
/*
written by George Strauch on 4/21/2020
c++ program to sort an array with bubblesort on gpu
Execution syntax:
$ ./exec {int num of elements}
Example run:
$ nvcc gpu_bubble.cu -arch='sm_35' -rdc=true -lineinfo -lcudadevrt -o gpu_bs
$ time ./gpu_bs 10
$ time ./gpu_bs 20000
*/
__host__ // used for debug
void print_array (int *array, int n, int tag_index)
{
for (size_t i = 0; i < n; i++) {
if (i == tag_index+1) {
std::cout << " > ";
}
std::cout << array[i] << ' ';
}
std::cout << '\n';
}
__host__
int* allocate_shared_array(int n_elements)
{
int *a;
cudaMallocManaged(&a, n_elements*sizeof(int));
return a;
}
__host__ // makes and returns unsorted array with random elements
int* make_unsorted_array(int n_elements)
{
int *a = allocate_shared_array(n_elements);
for (size_t j = 0; j < n_elements; j++) {
a[j] = rand()%(2*n_elements);
}
return a;
}
__host__
bool go_again(int* array, int n)
{
for (size_t i = 0; i < n-1; i++) {
if(array[i] > array[i+1])
{
return true;
}
}
return false;
}
__global__
void sort(int* array, int n, int offset, int k)
{
int id = 2*(blockIdx.x*blockDim.x + threadIdx.x) + offset + k;
if (id >= n) {
return;
}
int tmp;
if (array[id] > array[id+1]) {
tmp = array[id+1];
array[id+1] = array[id];
array[id] = tmp;
}
__syncthreads();
}
__host__
void fill_array(int* a, int n) {
for (size_t i = 0; i < n; i++) {
a[i] = 0;
}
}
__host__ // returns element index if any element larger than i+1 element, else -1
int verify_in_order(int* array, int n)
{
for (size_t i = 0; i < n-1; i++) {
if (array[i+1] < array[i]) {
return i;
}
}
return -1;
}
__host__
void entry_point(int* array, int n)
{
int t = 512;
int b = 512;
int count = 0;
int total = t*b;
dim3 threads(t);
dim3 blocks(b);
// fill_array(array, n);
while (go_again(array, n)) {
cudaDeviceSynchronize();
for (size_t i = 0; i < (n/(2*total)) +1; i++) {
sort<<<blocks, threads>>>(array, n, 0, i*total);
cudaDeviceSynchronize();
sort<<<blocks, threads>>>(array, n, 1, i*total);
}
cudaDeviceSynchronize();
count++;
if (count > 1.5*n) {
break;
}
}
}
int main(int argc, char const *argv[])
{
int N = atoi(argv[1]);
std::cout << "N = " << N << '\n';
int* a = make_unsorted_array(N);
cudaProfilerStart();
// while (go_again(a, N)) {
// sort<<<1, N/2>>>(a, N, 0);
// sort<<<1, N/2>>>(a, N, 1);
// cudaDeviceSynchronize();
// }
entry_point(a, N);
cudaProfilerStop();
int order = verify_in_order(a, N);
if (order == -1) {
std::cout << "array is in order" << '\n';
}
else {
std::cout << "not in order" << '\n';
print_array(a, N, order);
}
cudaFree(a);
return 0;
}
//
|
5c2ecad0ec361081fbd32a367b712b28cafe3d3e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
__global__ void add(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
#define N (2048 * 2048)
#define THREADS_PER_BLOCK 512
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)*d_c, size);
// Allocate space
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
// Setup values
random_ints(a, N);
random_ints(b, N);
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU with N blocks
hipLaunchKernelGGL(( add), dim3(N/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
// Cleanup
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
} | 5c2ecad0ec361081fbd32a367b712b28cafe3d3e.cu | #include <cuda_runtime.h>
__global__ void add(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
#define N (2048 * 2048)
#define THREADS_PER_BLOCK 512
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)*d_c, size);
// Allocate space
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
// Setup values
random_ints(a, N);
random_ints(b, N);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU with N blocks
add<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} |
47fd676b826f678cd381aaacede695b84a6a2f3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
static const int DIM = 128;
__global__ void Normalize128(float *data, const int N) {
float tmp[DIM];
float norm1 = 0;
float *start = data + threadIdx.x + blockIdx.x*blockDim.x;
#pragma unroll
for (int i = 0; i < DIM; ++i) {
tmp[i] = *start;
norm1 += abs(tmp[i]);
start += N;
}
float norm1_inv = 1.0f / norm1;
start = data + threadIdx.x + blockIdx.x*blockDim.x;
#pragma unroll
for (int i = 0; i < DIM; ++i) {
// const int idx = i*N;
*start = (tmp[i]) * norm1_inv;
start += N;
}
}
int main(){
float *h_ran, *d_ran;
h_ran = (float *)malloc(sizeof(float)*128);
for(int i = 0 ; i < 128 ; i ++){
h_ran[i] = 2;
}
hipMalloc(&d_ran, sizeof(float)*128);
hipMemcpy(d_ran, h_ran, sizeof(float)*128, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Normalize128), dim3(1), dim3(1), 0, 0, d_ran, 1);
hipMemcpy(h_ran, d_ran, sizeof(float)*128, hipMemcpyDeviceToHost);
printf("0: %f \n", h_ran[0]);
// free memory
free(h_ran);
hipFree(d_ran);
return 0;
} | 47fd676b826f678cd381aaacede695b84a6a2f3f.cu | #include <cstdio>
#include <cstdlib>
static const int DIM = 128;
__global__ void Normalize128(float *data, const int N) {
float tmp[DIM];
float norm1 = 0;
float *start = data + threadIdx.x + blockIdx.x*blockDim.x;
#pragma unroll
for (int i = 0; i < DIM; ++i) {
tmp[i] = *start;
norm1 += abs(tmp[i]);
start += N;
}
float norm1_inv = 1.0f / norm1;
start = data + threadIdx.x + blockIdx.x*blockDim.x;
#pragma unroll
for (int i = 0; i < DIM; ++i) {
// const int idx = i*N;
*start = (tmp[i]) * norm1_inv;
start += N;
}
}
int main(){
float *h_ran, *d_ran;
h_ran = (float *)malloc(sizeof(float)*128);
for(int i = 0 ; i < 128 ; i ++){
h_ran[i] = 2;
}
cudaMalloc(&d_ran, sizeof(float)*128);
cudaMemcpy(d_ran, h_ran, sizeof(float)*128, cudaMemcpyHostToDevice);
Normalize128<<<1, 1>>>(d_ran, 1);
cudaMemcpy(h_ran, d_ran, sizeof(float)*128, cudaMemcpyDeviceToHost);
printf("0: %f \n", h_ran[0]);
// free memory
free(h_ran);
cudaFree(d_ran);
return 0;
} |
14cfb71aa2ae4d0a1630cee3dc4f86267736a85a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// 1 block of threads --> 8 values, grid = 1
__global__ void unique_idx_calc_threadIdx(int * input)
{
int tid = threadIdx.x;
printf("threadIdx : %d, value : %d \n", tid, input[tid]);
}
// 4 blocks, each block - 4 threads.
__global__ void unique_gid_calculation(int * input)
{
int tid = threadIdx.x;
int offset = blockIdx.x * blockDim.x;
int gid = tid + offset;
printf("blockIdx.x : %d, threadIdx.x : %d, blockDim.x : %d, gridDim.x: %d, value : %d \n", blockIdx.x, tid, blockDim.x , gridDim.x, input[gid]);
}
int main()
{
int array_size = 16;
int array_bite_size = sizeof(int) * array_size;
int h_data[] = {23, 9, 4, 53, 64, 12, 1, 33, 22, 11, 9, 12, 13, 89, 90, 77};
for (int i=0; i < array_size; i++) {
printf("%d ", h_data[i]);
}
printf ("\n \n");
int * d_data;
hipMalloc((void **)&d_data, array_bite_size);
hipMemcpy(d_data, h_data, array_bite_size, hipMemcpyHostToDevice);
dim3 block(4);
dim3 grid(4);
//unique_idx_calc_threadIdx<<<grid, block>>>(d_data);
hipLaunchKernelGGL(( unique_gid_calculation), dim3(grid), dim3(block), 0, 0, d_data);
hipDeviceSynchronize();
hipDeviceReset();
}
| 14cfb71aa2ae4d0a1630cee3dc4f86267736a85a.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// 1 block of threads --> 8 values, grid = 1
__global__ void unique_idx_calc_threadIdx(int * input)
{
int tid = threadIdx.x;
printf("threadIdx : %d, value : %d \n", tid, input[tid]);
}
// 4 blocks, each block - 4 threads.
__global__ void unique_gid_calculation(int * input)
{
int tid = threadIdx.x;
int offset = blockIdx.x * blockDim.x;
int gid = tid + offset;
printf("blockIdx.x : %d, threadIdx.x : %d, blockDim.x : %d, gridDim.x: %d, value : %d \n", blockIdx.x, tid, blockDim.x , gridDim.x, input[gid]);
}
int main()
{
int array_size = 16;
int array_bite_size = sizeof(int) * array_size;
int h_data[] = {23, 9, 4, 53, 64, 12, 1, 33, 22, 11, 9, 12, 13, 89, 90, 77};
for (int i=0; i < array_size; i++) {
printf("%d ", h_data[i]);
}
printf ("\n \n");
int * d_data;
cudaMalloc((void **)&d_data, array_bite_size);
cudaMemcpy(d_data, h_data, array_bite_size, cudaMemcpyHostToDevice);
dim3 block(4);
dim3 grid(4);
//unique_idx_calc_threadIdx<<<grid, block>>>(d_data);
unique_gid_calculation<<<grid, block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
}
|
99acbb64bb6cb379220b42d58ace3cad4979c181.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #ifdef __cplusplus
// extern "C" {
// #endif
#include <stdio.h>
#include <vector>
#include <math.h>
#include <float.h>
#include "edge_detector_kernel.h"
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
__global__ void EdgeSelection_kernel(const int num_kernels, float* input_edge, float* output, int height, int width) {
CUDA_KERNEL_LOOP(index, num_kernels)
{
int point_offset = index;
int x = index % width;
int y = index / width;
float* input_edge_center = input_edge + point_offset;
if (*input_edge_center > 25 && *input_edge_center < 60)
{
int sharpCount = 0;
int smoothCount = 0;
int window_size = 10;
for (int m = -window_size; m <= window_size; m++) {
for (int n = -window_size; n <= window_size; n++) {
if (m == 0 && n == 0)
continue;
if (y+m < 0 || y+m >= height || x+n < 0 || x+n >= width)
continue;
int image_offset = (y + m) * width + x + n;
float* input_edge_offset = input_edge + image_offset;
if (*input_edge_center - *input_edge_offset > 20)
smoothCount++;
}
}
if (smoothCount > 200)
*(output + point_offset) = 1;
}
if (*input_edge_center >= 60)
{
int sharpCount = 0;
int smoothCount = 0;
int window_size = 10;
for (int m = -window_size; m <= window_size; m++) {
for (int n = -window_size; n <= window_size; n++) {
if (m == 0 && n == 0)
continue;
if (y+m < 0 || y+m >= height || x+n < 0 || x+n >= width)
continue;
int image_offset = (y + m) * width + x + n;
float* input_edge_offset = input_edge + image_offset;
if (*input_edge_center - *input_edge_offset > 30)
smoothCount++;
}
}
if (smoothCount > 200)
*(output + point_offset) = 1;
}
}
}
__global__ void EdgeFiltering_kernel(const int num_kernels, float* input_edge, float* output, int height, int width) {
CUDA_KERNEL_LOOP(index, num_kernels)
{
int point_offset = index;
int x = index % width;
int y = index / width;
float* output_center = output + point_offset;
if(*output_center == 1)
{
int thres = 20;
int count = 0;
int countAll = 0;
// horizontal
int window_size = 25;
int temp_x = x;
for (int m = 1; m <= window_size; m++)
{
if (y+m < 0 || y+m >= height)
continue;
int precount = count;
for (int n = -1; n <= 1; n++)
{
if (temp_x+n < 0 || temp_x+n >= width)
continue;
float* output_offset = output + (y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_x = temp_x + n;
count++;
break;
}
}
if(precount == count)
break;
}
temp_x = x;
for (int m = -1; m >= -window_size; m--)
{
if (y+m < 0 || y+m >= height)
continue;
int precount = count;
for (int n = -1; n <= 1; n++)
{
if(temp_x+n < 0 || temp_x+n >= width)
continue;
float* output_offset = output + (y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_x = temp_x + n;
count++;
break;
}
}
if(precount == count)
break;
}
if (count < thres)
countAll++;
//vertical
count = 0;
int temp_y = y;
for (int n = 1; n <= window_size; n++)
{
if (x+n < 0 || x+n >= width)
continue;
int precount = count;
for (int m = -1; m <= 1; m++)
{
if(temp_y+m < 0 || temp_y+m >= height)
continue;
float* output_offset = output + (temp_y + m) * width + x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
count++;
break;
}
}
if(precount == count)
break;
}
temp_y = y;
for (int n = -1; n >= -window_size; n--)
{
if (x+n < 0 || x+n >= width)
continue;
int precount = count;
for (int m = -1; m <= 1; m++)
{
if(temp_y+m < 0 || temp_y+m >= height)
continue;
float* output_offset = output + (temp_y + m) * width + x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
count++;
break;
}
}
if(precount == count)
break;
}
if (count < thres)
countAll++;
//diagonal
count = 0;
temp_x = x, temp_y = y;
for (int p = 1; p <= window_size; p++)
{
int m = 0, n = 1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
float* output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = 1, n = 0;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = 1, n = 1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
break;
}
temp_x = x, temp_y = y;
for (int p = 1; p <= window_size; p++)
{
int m = 0, n = -1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
float* output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = -1, n = 0;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = -1, n = -1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
break;
}
if (count < thres)
countAll++;
//diagonal -1
count = 0;
temp_x = x, temp_y = y;
for (int p = 1; p <= window_size; p++)
{
int m = 0, n = 1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
float* output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = -1, n = 0;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = -1, n = 1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
break;
}
temp_x = x, temp_y = y;
for (int p = 1; p <= window_size; p++)
{
int m = 0, n = -1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
float* output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = 1, n = 0;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = 1, n = -1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
break;
}
if (count <= thres)
countAll++;
if (countAll == 4)
*output_center = 0;
}
}
}
__global__ void EdgeTexture_kernel(const int num_kernels, float* input_image, float* input_edge, float* output, int height, int width) {
CUDA_KERNEL_LOOP(index, num_kernels)
{
int point_offset = index;
int x = index % width;
int y = index / width;
int image_length = height*width;
float* input_image_center = input_image + point_offset;
float* input_edge_center = input_edge + point_offset;
if (*input_edge_center > 25)
{
if (y-1 >= 0 && y+1 < height && x-1 >= 0 && x+1 < width)
{
double y_off = 2 * (*(input_image + (y + 1) * width + x) - *(input_image + (y - 1) * width + x)) + (*(input_image + (y + 1) * width + x + 1) - *(input_image + (y - 1) * width + x + 1)) + (*(input_image + (y + 1) * width + x - 1) - *(input_image + (y - 1) * width + x - 1));
double x_off = 2 * (*(input_image + y * width + x + 1) - *(input_image + y * width + x - 1)) + (*(input_image + (y+1) * width + x + 1) - *(input_image + (y+1) * width + x - 1)) + (*(input_image + (y-1) * width + x + 1) - *(input_image + (y-1) * width + x - 1));
double angle = 0;
if (x_off == 0){
if (y_off > 0)
angle = PI / 2;
else if (y_off <= 0)
angle = PI*1.5;
}
else if (y_off == 0){
if (x_off > 0)
angle = 0;
else if (x_off <= 0)
angle = PI;
}
else
angle = atan2(y_off, x_off);
if (angle < 0)
angle += PI;
int point_dis = 2;
int a_x = x + point_dis*cos(angle);
int a_y = y + point_dis*sin(angle);
int b_x = x + point_dis*cos(angle + PI);
int b_y = y + point_dis*sin(angle + PI);
double averageA = 0;
int countA = 0;
int window_size = 1;
for (int m = -window_size; m <= window_size; m++) {
for (int n = -window_size; n <= window_size; n++) {
if (m == 0 && n == 0)
continue;
if (a_y+m < 0 || a_y+m >= height || a_x+n < 0 || a_x+n >= width)
continue;
int image_offset = (a_y + m) * width + a_x + n;
float* input_image_offset = input_image + image_offset;
// if (fabs(*input_image_center - *input_image_offset) > 20)
{
averageA = averageA + *input_image_offset;
countA++;
}
}
}
averageA = averageA / countA;
double averageB = 0;
int countB = 0;
for (int m = -window_size; m <= window_size; m++) {
for (int n = -window_size; n <= window_size; n++) {
if (m == 0 && n == 0)
continue;
if (b_y+m < 0 || b_y+m >= height || b_x+n < 0 || b_x+n >= width)
continue;
int image_offset = (b_y + m) * width + b_x + n;
float* input_image_offset = input_image + image_offset;
// if (fabs(*input_image_center - *input_image_offset) > 20)
{
averageB = averageB + *input_image_offset;
countB++;
}
}
}
averageB = averageB / countB;
if (fabs(averageA-averageB) < 50)
*(output + point_offset) = 1;
}
}
}
}
void EdgeDetector(hipStream_t stream, float* input_image, float* input_edge, float* output_preserve, float* output_eliminate, int height, int width, int isSmoothing)
{
int dimSize = 1024;
int num_kernels = height * width;
int grid = (num_kernels + dimSize - 1) / dimSize;
if (isSmoothing == 1){
// structure extraction for edge-preserving smoothing
hipLaunchKernelGGL(( EdgeSelection_kernel), dim3(grid), dim3(dimSize), 0, stream, num_kernels, input_edge, output_preserve, height, width);
hipLaunchKernelGGL(( EdgeFiltering_kernel), dim3(grid), dim3(dimSize), 0, stream, num_kernels, input_edge, output_preserve, height, width);
}else{
// structure extraction for texture removal
hipLaunchKernelGGL(( EdgeTexture_kernel), dim3(grid), dim3(dimSize), 0, stream, num_kernels, input_image, input_edge, output_eliminate, height, width);
}
}
// #ifdef __cplusplus
// }
// #endif
| 99acbb64bb6cb379220b42d58ace3cad4979c181.cu | // #ifdef __cplusplus
// extern "C" {
// #endif
#include <stdio.h>
#include <vector>
#include <math.h>
#include <float.h>
#include "edge_detector_kernel.h"
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
__global__ void EdgeSelection_kernel(const int num_kernels, float* input_edge, float* output, int height, int width) {
CUDA_KERNEL_LOOP(index, num_kernels)
{
int point_offset = index;
int x = index % width;
int y = index / width;
float* input_edge_center = input_edge + point_offset;
if (*input_edge_center > 25 && *input_edge_center < 60)
{
int sharpCount = 0;
int smoothCount = 0;
int window_size = 10;
for (int m = -window_size; m <= window_size; m++) {
for (int n = -window_size; n <= window_size; n++) {
if (m == 0 && n == 0)
continue;
if (y+m < 0 || y+m >= height || x+n < 0 || x+n >= width)
continue;
int image_offset = (y + m) * width + x + n;
float* input_edge_offset = input_edge + image_offset;
if (*input_edge_center - *input_edge_offset > 20)
smoothCount++;
}
}
if (smoothCount > 200)
*(output + point_offset) = 1;
}
if (*input_edge_center >= 60)
{
int sharpCount = 0;
int smoothCount = 0;
int window_size = 10;
for (int m = -window_size; m <= window_size; m++) {
for (int n = -window_size; n <= window_size; n++) {
if (m == 0 && n == 0)
continue;
if (y+m < 0 || y+m >= height || x+n < 0 || x+n >= width)
continue;
int image_offset = (y + m) * width + x + n;
float* input_edge_offset = input_edge + image_offset;
if (*input_edge_center - *input_edge_offset > 30)
smoothCount++;
}
}
if (smoothCount > 200)
*(output + point_offset) = 1;
}
}
}
__global__ void EdgeFiltering_kernel(const int num_kernels, float* input_edge, float* output, int height, int width) {
CUDA_KERNEL_LOOP(index, num_kernels)
{
int point_offset = index;
int x = index % width;
int y = index / width;
float* output_center = output + point_offset;
if(*output_center == 1)
{
int thres = 20;
int count = 0;
int countAll = 0;
// horizontal
int window_size = 25;
int temp_x = x;
for (int m = 1; m <= window_size; m++)
{
if (y+m < 0 || y+m >= height)
continue;
int precount = count;
for (int n = -1; n <= 1; n++)
{
if (temp_x+n < 0 || temp_x+n >= width)
continue;
float* output_offset = output + (y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_x = temp_x + n;
count++;
break;
}
}
if(precount == count)
break;
}
temp_x = x;
for (int m = -1; m >= -window_size; m--)
{
if (y+m < 0 || y+m >= height)
continue;
int precount = count;
for (int n = -1; n <= 1; n++)
{
if(temp_x+n < 0 || temp_x+n >= width)
continue;
float* output_offset = output + (y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_x = temp_x + n;
count++;
break;
}
}
if(precount == count)
break;
}
if (count < thres)
countAll++;
//vertical
count = 0;
int temp_y = y;
for (int n = 1; n <= window_size; n++)
{
if (x+n < 0 || x+n >= width)
continue;
int precount = count;
for (int m = -1; m <= 1; m++)
{
if(temp_y+m < 0 || temp_y+m >= height)
continue;
float* output_offset = output + (temp_y + m) * width + x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
count++;
break;
}
}
if(precount == count)
break;
}
temp_y = y;
for (int n = -1; n >= -window_size; n--)
{
if (x+n < 0 || x+n >= width)
continue;
int precount = count;
for (int m = -1; m <= 1; m++)
{
if(temp_y+m < 0 || temp_y+m >= height)
continue;
float* output_offset = output + (temp_y + m) * width + x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
count++;
break;
}
}
if(precount == count)
break;
}
if (count < thres)
countAll++;
//diagonal
count = 0;
temp_x = x, temp_y = y;
for (int p = 1; p <= window_size; p++)
{
int m = 0, n = 1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
float* output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = 1, n = 0;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = 1, n = 1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
break;
}
temp_x = x, temp_y = y;
for (int p = 1; p <= window_size; p++)
{
int m = 0, n = -1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
float* output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = -1, n = 0;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = -1, n = -1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
break;
}
if (count < thres)
countAll++;
//diagonal -1
count = 0;
temp_x = x, temp_y = y;
for (int p = 1; p <= window_size; p++)
{
int m = 0, n = 1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
float* output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = -1, n = 0;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = -1, n = 1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
break;
}
temp_x = x, temp_y = y;
for (int p = 1; p <= window_size; p++)
{
int m = 0, n = -1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
float* output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = 1, n = 0;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
m = 1, n = -1;
if(temp_y+m < 0 || temp_y+m >= height || temp_x+n < 0 || temp_x+n >= width)
continue;
output_offset = output + (temp_y + m) * width + temp_x + n;
if (*output_offset == 1)
{
temp_y = temp_y + m;
temp_x = temp_x + n;
count++;
continue;
}
break;
}
if (count <= thres)
countAll++;
if (countAll == 4)
*output_center = 0;
}
}
}
__global__ void EdgeTexture_kernel(const int num_kernels, float* input_image, float* input_edge, float* output, int height, int width) {
CUDA_KERNEL_LOOP(index, num_kernels)
{
int point_offset = index;
int x = index % width;
int y = index / width;
int image_length = height*width;
float* input_image_center = input_image + point_offset;
float* input_edge_center = input_edge + point_offset;
if (*input_edge_center > 25)
{
if (y-1 >= 0 && y+1 < height && x-1 >= 0 && x+1 < width)
{
double y_off = 2 * (*(input_image + (y + 1) * width + x) - *(input_image + (y - 1) * width + x)) + (*(input_image + (y + 1) * width + x + 1) - *(input_image + (y - 1) * width + x + 1)) + (*(input_image + (y + 1) * width + x - 1) - *(input_image + (y - 1) * width + x - 1));
double x_off = 2 * (*(input_image + y * width + x + 1) - *(input_image + y * width + x - 1)) + (*(input_image + (y+1) * width + x + 1) - *(input_image + (y+1) * width + x - 1)) + (*(input_image + (y-1) * width + x + 1) - *(input_image + (y-1) * width + x - 1));
double angle = 0;
if (x_off == 0){
if (y_off > 0)
angle = PI / 2;
else if (y_off <= 0)
angle = PI*1.5;
}
else if (y_off == 0){
if (x_off > 0)
angle = 0;
else if (x_off <= 0)
angle = PI;
}
else
angle = atan2(y_off, x_off);
if (angle < 0)
angle += PI;
int point_dis = 2;
int a_x = x + point_dis*cos(angle);
int a_y = y + point_dis*sin(angle);
int b_x = x + point_dis*cos(angle + PI);
int b_y = y + point_dis*sin(angle + PI);
double averageA = 0;
int countA = 0;
int window_size = 1;
for (int m = -window_size; m <= window_size; m++) {
for (int n = -window_size; n <= window_size; n++) {
if (m == 0 && n == 0)
continue;
if (a_y+m < 0 || a_y+m >= height || a_x+n < 0 || a_x+n >= width)
continue;
int image_offset = (a_y + m) * width + a_x + n;
float* input_image_offset = input_image + image_offset;
// if (fabs(*input_image_center - *input_image_offset) > 20)
{
averageA = averageA + *input_image_offset;
countA++;
}
}
}
averageA = averageA / countA;
double averageB = 0;
int countB = 0;
for (int m = -window_size; m <= window_size; m++) {
for (int n = -window_size; n <= window_size; n++) {
if (m == 0 && n == 0)
continue;
if (b_y+m < 0 || b_y+m >= height || b_x+n < 0 || b_x+n >= width)
continue;
int image_offset = (b_y + m) * width + b_x + n;
float* input_image_offset = input_image + image_offset;
// if (fabs(*input_image_center - *input_image_offset) > 20)
{
averageB = averageB + *input_image_offset;
countB++;
}
}
}
averageB = averageB / countB;
if (fabs(averageA-averageB) < 50)
*(output + point_offset) = 1;
}
}
}
}
void EdgeDetector(cudaStream_t stream, float* input_image, float* input_edge, float* output_preserve, float* output_eliminate, int height, int width, int isSmoothing)
{
int dimSize = 1024;
int num_kernels = height * width;
int grid = (num_kernels + dimSize - 1) / dimSize;
if (isSmoothing == 1){
// structure extraction for edge-preserving smoothing
EdgeSelection_kernel<<<grid, dimSize, 0, stream>>>(num_kernels, input_edge, output_preserve, height, width);
EdgeFiltering_kernel<<<grid, dimSize, 0, stream>>>(num_kernels, input_edge, output_preserve, height, width);
}else{
// structure extraction for texture removal
EdgeTexture_kernel<<<grid, dimSize, 0, stream>>>(num_kernels, input_image, input_edge, output_eliminate, height, width);
}
}
// #ifdef __cplusplus
// }
// #endif
|
29b649bc646f3991225785dad5639e5452d612df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <layer_kernels.cuh>
//-------------------------------------------------------------
//EltwiseMax
//-------------------------------------------------------------
template <int B_X, bool add>
__global__ void kEltwiseMaxGrad(float* actGrad, float* input, float* output, float* target,
const int numElements) {
for (int i = B_X * blockIdx.x + threadIdx.x; i < numElements; i += B_X * gridDim.x) {
if (add) {
target[i] += actGrad[i] * (output[i] == input[i]);
} else {
target[i] = actGrad[i] * (output[i] == input[i]);
}
}
}
#include "tt.h"
#define CONST_AREA_SIZE 256
__device__ __constant__ float const_area[CONST_AREA_SIZE];
//-------------------------------------------------------------
//EltwiseFunc
//-------------------------------------------------------------
__device__ inline float Switch(float s, float C)
{
//return fminf(fmaxf(s*C, -.5), .5);
return (s>0)*.5 - (s<0)*.5;
}
__device__ inline float Median3(float a, float b, float c)
{
return fmaxf(fminf(a,b), fmaxf(max(a,b),c));
}
//#define MIX_F
template <int sizeArr>
__global__ void kEltwiseFuncAct(const float* input, float* const target,
const uint imgInPixels, const uint numCases,
const uint strideInp, const uint strideTag,
const int numPixelsPerChannel,
const float Csw, const float Bsw,
const uint sizeIn, const uint sizeOut) {
const int numPixelsPerGroup = imgInPixels/sizeIn;
// dim3 blocks(::min(NUM_BLOCKS_MAX, DIVUP(out_width, ELTWISE_THREADS_X)),
// ::min(NUM_BLOCKS_MAX, DIVUP(numPixelsPerGroup, ELTWISE_THREADS_Y)));
const uint idxX = blockIdx.x * blockDim.x + threadIdx.x;
const uint idxY = blockIdx.y * blockDim.y + threadIdx.y;
#ifdef MIX_F
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
const int sw_len = sizeIn*ELWISE_FUNC_SEC;
// ix, iy == 0 almost always
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y*blockDim.y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x*blockDim.x) {
float inpVal[sizeArr];//use shared instead?
float v_sw =0;
for (uint inp_i = 0; inp_i < sizeIn; inp_i++) {
#ifdef MIX_F
int inp_off = hiID*sizeIn*numPixelsPerChannel*strideInp
+ inp_i*numPixelsPerChannel*strideInp + pixelChannelID*strideInp + x;
#else
int inp_off = inp_i*numPixelsPerGroup*strideInp + y*strideInp + x;
#endif
float val = input[inp_off];
inpVal[inp_i] = val;
v_sw += (val>0) - (val<0);
}
float Sw = Switch(v_sw + Bsw, Csw);
//float v_sw = Median3(inpVal[0],inpVal[1],inpVal[2]);
for (uint out_i = 0; out_i < sizeOut; out_i++) {
int out_par = out_i*EL_SWITCH*sizeIn*ELWISE_FUNC_SEC;
float sum = 0;
for (uint inp_i = 0; inp_i < sizeIn; inp_i++)
{
float val = inpVal[inp_i];
float param = const_area[out_par + inp_i];
float paramM = const_area[out_par + sizeIn + inp_i];
float paramB = const_area[out_par + 2*sizeIn + inp_i];
float output = param*val + paramM*fmax(val+paramB, 0);
float param_1 = const_area[out_par + inp_i+sw_len];
float paramM_1 = const_area[out_par + sizeIn + inp_i+sw_len];
float paramB_1 = const_area[out_par + 2*sizeIn + inp_i+sw_len];
float output_1 = param_1*val + paramM_1*fmax(-val+paramB_1, 0);
sum += Sw*(output - output_1) + .5*(output + output_1);
}// inp_i
int tag_off = out_i*numPixelsPerGroup*strideTag + y*strideTag + x;
//target[tag_off] = Switch(v_sw+Bsw, Csw)*(output - output_1) + .5*(output + output_1);
target[tag_off] = sum;
}//out_i
}
}
}
template <int sizeArr>
__global__ void kEltwiseFuncGrad(const float* actGrad, const float* input, float* const target,
const uint imgInPixels, const uint numCases,
const uint strideInp, const uint strideOut,
const int numPixelsPerChannel,
const float Csw, const float Bsw,
const uint sizeIn, const uint sizeOut) {
const int numPixelsPerGroup = imgInPixels/sizeIn;
const int outStep = strideOut*numPixelsPerGroup;
const uint idxX = blockIdx.x * blockDim.x + threadIdx.x;
const uint idxY = blockIdx.y * blockDim.y + threadIdx.y;
#ifdef MIX_F
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
const int sw_len = sizeIn*ELWISE_FUNC_SEC;
//with no N_SUM ix, iy == 0 almost always
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y*blockDim.y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x*blockDim.x) {
float grad_next[sizeArr];
int act_off = y*strideInp + x;
#ifdef MIX_F
int inp_off = hiID*sizeIn*numPixelsPerChannel*strideInp
+ pixelChannelID*strideInp + x;
#define strideInpStep strideInp*numPixelsPerChannel
#else
#define inp_off act_off
#define strideInpStep strideInp*numPixelsPerGroup
#endif
for (uint out_i = 0; out_i < sizeOut; out_i++)
{
grad_next[out_i] = actGrad[act_off + outStep*out_i];
}//out_i
//debug
float inpArr[3];
float v_sw =0;
for (uint inp_i = 0; inp_i < sizeIn; inp_i++)
{
float val = input[inp_off + inp_i*strideInpStep];
inpArr[inp_i] = val;
v_sw += (val>0) - (val<0);
}
//float v_sw = Median3(inpArr[0],inpArr[1],inpArr[2]);
float Sw = Switch(v_sw+Bsw, Csw);
for (uint inp_i = 0; inp_i < sizeIn; inp_i++) {
float val = inpArr[inp_i];
float sum_grad = 0;
for (uint out_i = 0; out_i < sizeOut; out_i++)
{
int out_par = out_i*EL_SWITCH*sizeIn*ELWISE_FUNC_SEC;
float vsign_0 = (val + const_area[out_par + 2*sizeIn + inp_i] > 0);
float vsign_1 = (-val + const_area[out_par + sw_len + 2*sizeIn + inp_i] > 0);
float c_0 = vsign_0*const_area[out_par + sizeIn + inp_i] + const_area[out_par + inp_i];
float c_1 = -vsign_1*const_area[out_par + sw_len + sizeIn + inp_i] + const_area[out_par + sw_len + inp_i];
sum_grad += grad_next[out_i]*((Sw+.5)*c_0 + (.5-Sw)*c_1);
//+(v_sw+Bsw > -InvCsw && v_sw+Bsw < InvCsw)*Csw*(c_0-c_1));
}
#ifdef MIX_F
target[inp_off + inp_i*numPixelsPerChannel*strideInp] = sum_grad;
#else
target[inp_off + inp_i*strideInpStep] = sum_grad;
#endif
}//inp_i
}//ix
}//iy
}
//---------------
template <int B_X, int B_Y, int sizeOut, int sizeIn>
__global__ void kEltwiseFuncParamWeightGrad(float* actGrad, float* input, float** target,
const uint imgInPixels, const uint numCases,
const uint stride, const uint strideTag,
const uint numPixelsPerChannel,
const float Csw, const float Bsw)
{
const int numPixelsPerGroup = imgInPixels/sizeIn;
#ifdef MIX_F
const int groupStride = numPixelsPerChannel*stride;
#else
const int groupStride = numPixelsPerGroup*stride;
#endif
const int sw_len = sizeIn*ELWISE_FUNC_SEC;
const uint idxX = blockIdx.x * B_X + threadIdx.x;
const uint idxY = blockIdx.y * B_Y + threadIdx.y;
for(int pout = 0; pout < sizeOut; pout++)
{
//debug
float sum[2*sizeIn];
float sum_m[2*sizeIn];
float sum_b[2*sizeIn];
memset(sum, 0, sizeof(sum));
memset(sum_m, 0, sizeof(sum_m));
memset(sum_b, 0, sizeof(sum_b));
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y * B_Y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x * B_X) {
int offset_act = y * stride + x;
float InArr[sizeIn];
float v_sw = 0;
for(int pin = 0; pin < sizeIn; pin++)
{
#ifdef MIX_F
int offset_in = hiID*sizeIn*groupStride
+ pin*groupStride + pixelChannelID*stride + x;
#else
int offset_in = offset_act + pin*groupStride;
#endif
float val = input[offset_in];
InArr[pin] = val;
v_sw += (val>0) - (val<0);
}
float Sw = Switch(v_sw+ Bsw, Csw);
//float v_sw = Median3(InArr[0],InArr[1],InArr[2]);
float grad_next = actGrad[offset_act + pout*groupStride];
for(int pin = 0; pin < sizeIn; pin++)
{
float in_val = InArr[pin];
float val_m_0 = in_val + const_area[pout*sizeIn*ELWISE_FUNC_SEC + 2*sizeIn + pin];
sum[pin] += (.5+Sw)*grad_next*in_val;
sum_m[pin] += (.5+Sw)*grad_next*(val_m_0 > 0)*in_val;
sum_b[pin] += (.5+Sw)*grad_next*const_area[pout*sizeIn*ELWISE_FUNC_SEC + sizeIn + pin]*(val_m_0 > 0);
float val_m_1 = in_val + const_area[pout*sizeIn*ELWISE_FUNC_SEC + 2*sizeIn + pin + sw_len];
sum[pin + sizeIn] += (.5-Sw)*grad_next*in_val;
sum_m[pin + sizeIn] += -(.5-Sw)*grad_next*(val_m_1 < 0)*in_val;
sum_b[pin + sizeIn] += (.5-Sw)*grad_next*const_area[pout*sizeIn*ELWISE_FUNC_SEC + sizeIn + pin + sw_len]*(val_m_1 > 0);
}
}
}
const int tagOffset = (threadIdx.x + blockIdx.x*blockDim.x) + (threadIdx.y + blockIdx.y*blockDim.y)*strideTag;
for(int pin = 0; pin < sizeIn; pin++)
{
int out_par = pout*EL_SWITCH*sizeIn*ELWISE_FUNC_SEC;
target[out_par + pin][tagOffset] = sum[pin];
target[out_par + sizeIn + pin][tagOffset] = sum_m[pin];
target[out_par + 2*sizeIn + pin][tagOffset] = sum_b[pin];
target[out_par + pin + sw_len][tagOffset] = sum[pin + sizeIn];
target[out_par + sizeIn + pin + sw_len][tagOffset] = sum_m[pin + sizeIn];
target[out_par + 2*sizeIn + pin + sw_len][tagOffset] = sum_b[pin + sizeIn];
}
}
}
//---------------------------------------
template <int sizeIn>
__global__ void kEltwiseFuncBCWeightGrad(const float* input, const float* actGrad, float* const tagC, float* const tagB,
const uint imgInPixels, const uint numCases,
const uint strideInp, const uint strideTag,
const int numPixelsPerChannel,
const float Csw, const float Lim, const float Bsw,
const uint sizeOut) {
const int numPixelsPerGroup = imgInPixels/sizeIn;
// dim3 blocks(::min(NUM_BLOCKS_MAX, DIVUP(out_width, ELTWISE_THREADS_X)),
// ::min(NUM_BLOCKS_MAX, DIVUP(numPixelsPerGroup, ELTWISE_THREADS_Y)));
const uint idxX = blockIdx.x * blockDim.x + threadIdx.x;
const uint idxY = blockIdx.y * blockDim.y + threadIdx.y;
#ifdef MIX_F
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
const int sw_len = sizeIn*ELWISE_FUNC_SEC;
// ix, iy == 0 almost always
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y*blockDim.y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x*blockDim.x) {
float inpVal[sizeIn];//use shared instead?
float v_sw =0;
for (uint inp_i = 0; inp_i < sizeIn; inp_i++) {
#ifdef MIX_F
int inp_off = hiID*sizeIn*numPixelsPerChannel*strideInp
+ inp_i*numPixelsPerChannel*strideInp + pixelChannelID*strideInp + x;
#else
int inp_off = inp_i*numPixelsPerGroup*strideInp + y*strideInp + x;
#endif
float val = input[inp_off];
inpVal[inp_i] = val;
v_sw += val;
}
//float v_sw = Median3(inpVal[0],inpVal[1],inpVal[2]);
for (uint out_i = 0; out_i < sizeOut; out_i++) {
int out_par = out_i*sizeIn*EL_SWITCH*ELWISE_FUNC_SEC;
float output = 0;
float output_1 = 0;
float gradNext = actGrad[ y * strideInp + x + out_i*numPixelsPerGroup*strideInp];
for (uint inp_i = 0; inp_i < sizeIn; inp_i++)
{
float val = inpVal[inp_i];
{
float param = const_area[out_par + inp_i];
float paramM = const_area[out_par + sizeIn + inp_i];
float paramB = const_area[out_par + 2*sizeIn + inp_i];
output += param*val + paramM*fmax(val+paramB, 0);
}
{
float param = const_area[out_par + inp_i+sw_len];
float paramM = const_area[out_par + sizeIn + inp_i+sw_len];
float paramB = const_area[out_par + 2*sizeIn + inp_i+sw_len];
output_1 += param*val + paramM*fmax(val+paramB, 0);
}
}// inp_i
int tag_off = out_i*numPixelsPerGroup*strideTag + y*strideTag + x;
float v_b = v_sw + Bsw;//Csw*(v_sw + Bsw);
// tagC[tag_off] = gradNext*(output - output_1)*(v_b > -invC && v_b < invC)*v_sw;
//tagB[tag_off] = gradNext*(output - output_1)*(v_b > -.5 && v_b < .5);
tagB[tag_off] = gradNext*(output - output_1)*(v_b > -Lim && v_b < Lim);
}//out_i
}
}
}
//----------------------------
template <int B_X, int B_Y, int sizeIn>
__global__ void kEltwiseFuncGroupTest(float* actGrad, float* input, float** target,
const uint imgInPixels, const uint numCases,
const uint stride, const uint strideTag,
const uint numPixelsPerChannel,
const uint cnttest)
{
const int numPixelsPerGroup = imgInPixels/sizeIn;
#ifdef MIX_F
const int groupStride = numPixelsPerChannel*stride;
#else
const int groupStride = numPixelsPerGroup*stride;
#endif
const uint idxX = blockIdx.x * B_X + threadIdx.x;
const uint idxY = blockIdx.y * B_Y + threadIdx.y;
const int tagOffset = (threadIdx.x + blockIdx.x*blockDim.x) + (threadIdx.y + blockIdx.y*blockDim.y)*strideTag;
float sum[sizeIn];
memset(sum, 0, sizeof(sum));
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y * B_Y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x * B_X) {
#ifdef MIX_F
int offset_in = hiID*sizeIn*groupStride
+ pixelChannelID*stride + x;
#else
int offset_in = y * stride + x;
#endif
float v0 = input[offset_in + cnttest*groupStride];
sum[0] += fabs(v0);
int ks =0;
for(int pin = 0; pin < sizeIn; pin++)
{
if(pin == cnttest)
continue;
float in_val = input[offset_in + pin*groupStride];
sum[1+ks] += fabs(in_val -v0);
ks++;
}
}
}
for(int pin = 0; pin < sizeIn; pin++)
{
target[pin][tagOffset] = sum[pin];
}
}
template <int B_X, int B_Y, int sizeIn>
__global__ void kNormalizeParam(float* input, float** target,
const uint imgInPixels, const uint numCases,
const uint stride, const uint strideTag,
const uint numPixelsPerChannel)
{
const int numPixelsPerGroup = imgInPixels/sizeIn;
#ifdef MIX_F
const int groupStride = numPixelsPerChannel*stride;
#else
const int groupStride = numPixelsPerGroup*stride;
#endif
const uint idxX = blockIdx.x * B_X + threadIdx.x;
const uint idxY = blockIdx.y * B_Y + threadIdx.y;
const int tagOffset = (threadIdx.x + blockIdx.x*blockDim.x) + (threadIdx.y + blockIdx.y*blockDim.y)*strideTag;
float sum2[sizeIn];
memset(sum2, 0, sizeof(sum2));
float sum_pair[(sizeIn-1)*sizeIn/2];
memset(sum_pair, 0, sizeof(sum_pair));
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y * B_Y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x * B_X) {
#ifdef MIX_F
int offset_in = hiID*sizeIn*groupStride
+ pixelChannelID*stride + x;
#else
int offset_in = y * stride + x;
#endif
float inp[sizeIn];
for(int pin_i = 0; pin_i < sizeIn; pin_i++)
{
inp[pin_i] = input[offset_in + pin_i*groupStride];
sum2[pin_i] += inp[pin_i]*inp[pin_i];
}
int ks =0;
for(int pin_i = 0; pin_i < sizeIn; pin_i++)
{
for(int pin_j = 0; pin_j < sizeIn; pin_j++)
{
if(pin_i == pin_j)
continue;
sum_pair[ks] += inp[pin_i]*inp[pin_j];
ks++;
}
}
}
}
for(int pin = 0; pin < sizeIn; pin++)
{
target[pin][tagOffset] = sum2[pin];
}
for(int ks = 0; ks < sizeIn*(sizeIn-1)/2; ks++)
{
target[sizeIn+ks][tagOffset] = sum_pair[ks];
}
}
template <int B_X, int B_Y, int sizeIn>
__global__ void kEltwiseFuncGroupTestS(float* actGrad, float* input, float** target,
const uint imgInPixels, const uint numCases,
const uint stride, const uint strideTag,
const uint numPixelsPerChannel,
const uint cnttest)
{
const int numPixelsPerGroup = imgInPixels/sizeIn;
#ifdef MIX_F
const int groupStride = numPixelsPerChannel*stride;
#else
const int groupStride = numPixelsPerGroup*stride;
#endif
const uint idxX = blockIdx.x * B_X + threadIdx.x;
const uint idxY = blockIdx.y * B_Y + threadIdx.y;
const int tagOffset = (threadIdx.x + blockIdx.x*blockDim.x) + (threadIdx.y + blockIdx.y*blockDim.y)*strideTag;
float sum_1[sizeIn];
memset(sum_1, 0, sizeof(sum_1));
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y * B_Y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x * B_X) {
#ifdef MIX_F
int offset_in = hiID*sizeIn*groupStride
+ pixelChannelID*stride + x;
#else
int offset_in = y * stride + x;
#endif
for(int pin = 0; pin < sizeIn; pin++)
{
float in_val = input[offset_in + pin*groupStride];
sum_1[pin] += in_val;
}
}
}
for(int pin = 0; pin < sizeIn; pin++)
{
target[pin+sizeIn][tagOffset] = sum_1[pin];
}
}
//-------------------------------------------------------------
//MicroConv
//-------------------------------------------------------------
#define SMEM(X, Y, sdata) sdata[(X)*sharedY+(Y) + sOffset]
#define SHARED_MEM(x, y, z, LOBE, getVal, sdata) \
SMEM((LOBE) + sx, (LOBE) + sy, sdata) = getVal(x, y, z);\
if (sx < (LOBE)) {\
SMEM(sx, (LOBE) + sy, sdata) = getVal(max(x - (LOBE), 0), y, z);\
SMEM((LOBE) + bw + sx, (LOBE) + sy, sdata) = getVal(min(x + bw, imgSizeX-1), y, z);\
}\
if (sy < (LOBE)) {\
SMEM((LOBE) + sx, sy, sdata) = getVal(x, max(y - (LOBE), 0), z);\
SMEM((LOBE) + sx, (LOBE) + bh + sy, sdata) = getVal(x, min(y + bh, imgSizeY-1), z);\
}\
if ((sx < (LOBE)) && (sy < (LOBE))) {\
SMEM(sx, sy, sdata) = getVal(max(x - (LOBE), 0), max(y - (LOBE), 0), z);\
SMEM(sx, (LOBE) + bh + sy, sdata) = getVal(max(x - (LOBE), 0), min(y + bh, imgSizeY-1), z);\
SMEM((LOBE) + bw + sx, sy, sdata) = getVal(min(x + bw, imgSizeX-1), max(y - (LOBE), 0), z);\
SMEM((LOBE) + bw + sx, (LOBE) + bh + sy, sdata) = getVal(min(x + bw, imgSizeX-1), min(y + bh, imgSizeY-1), z);\
}
#define getValInput(X, Y, Z) input[channelOffset + (X)*widthyz+(Y)*widthz + (Z)]
#define getValAct(X, Y, Z) actGrad[filterOffset + (X)*widthyz+(Y)*widthz + (Z)]
template < int LOBE, int SIZE_CONV>
__global__ void kMAvgAct(const float* input, float* const target,
const uint numCases, const uint channels, const uint casePerThread,
const uint sharedY, const uint modulesPerBlockX, const uint modulesPerBlockY,
const uint imgSizeX, const uint imgSizeY,
const uint imgPixels, const float scale)
{
extern __shared__ float sdata[];
//order x>y>z, *not* y>x
//const int bsizeX = imgSizeX/modulesPerBlockX;
const int bsizeY = imgSizeY/modulesPerBlockY;
const int startX = (blockIdx.y/bsizeY)*modulesPerBlockX;
const int startY = (blockIdx.y%bsizeY)*modulesPerBlockY;
const int bw = modulesPerBlockX;
const int bh = modulesPerBlockY;
const int sx = threadIdx.y/modulesPerBlockY;
const int sy = threadIdx.y - sx*modulesPerBlockY;
const int ix = sx+startX;
const int iy = sy+startY;
const int widthz = numCases;
const int widthyz = imgSizeY*numCases;
//const int sizeConv2 = SIZE_CONV*SIZE_CONV;
const int sharedY2 = sharedY*sharedY;
//put pragme unroll here
for(int zind = 0; zind < casePerThread; zind++)
{
const int z = threadIdx.x + blockIdx.x*blockDim.x + zind*blockDim.x*gridDim.x;
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int sOffset = channelInd*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int channelOffset = channelInd*imgPixels*numCases;
if(z < numCases)
{
SHARED_MEM(ix, iy, z, LOBE, getValInput, sdata)
}
}
__syncthreads();
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int sOffset = channelInd*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int channelOffset = channelInd*imgPixels*numCases;
if(z < numCases)
{
float sum = 0;
for(int dsx = - LOBE; dsx < LOBE+1; dsx++)
for(int dsy = - LOBE; dsy < LOBE+1; dsy++)
{
int idx = min(max(ix + dsx, 0), imgSizeX-1);
int idy = min(max(iy + dsy, 0), imgSizeY-1);
float sd = sdata[(sx + dsx + LOBE)*sharedY+(sy + dsy + LOBE) + sOffset];
sum += sd;
}
target[channelOffset + ix*widthyz + iy*widthz + z] = scale*sum;
}//if
}//channel
}//zind
}
template < int LOBE, int SIZE_CONV>
__global__ void kMAvgGrad(const float* actGrad, float* const target,
const uint numCases, const uint channels, const uint casePerThread,
const uint sharedY, const uint modulesPerBlockX, const uint modulesPerBlockY,
const uint imgSizeX, const uint imgSizeY,
const uint imgPixels, const float scale)
{
extern __shared__ float sdata[];
//order x>y>z, *not* y>x
//const int bsizeX = imgSizeX/modulesPerBlockX;
const int bsizeY = imgSizeY/modulesPerBlockY;
const int startX = (blockIdx.y/bsizeY)*modulesPerBlockX;
const int startY = (blockIdx.y%bsizeY)*modulesPerBlockY;
const int bw = modulesPerBlockX;
const int bh = modulesPerBlockY;
const int sx = threadIdx.y/modulesPerBlockY;
const int sy = threadIdx.y - sx*modulesPerBlockY;
const int ix = sx+startX;
const int iy = sy+startY;
const int widthz = numCases;
const int widthyz = imgSizeY*numCases;
//const int sizeConv2 = SIZE_CONV*SIZE_CONV;
const int sharedY2 = sharedY*sharedY;
//put pragme unroll here
for(int zind = 0; zind < casePerThread; zind++)
{
const int z = threadIdx.x + blockIdx.x*blockDim.x + zind*blockDim.x*gridDim.x;
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int sOffset = channelInd*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int filterOffset = channelInd*imgPixels*numCases;
if(z < numCases)
{
SHARED_MEM(ix, iy, z, LOBE, getValAct, sdata)
}
}
__syncthreads();
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int sOffset = channelInd*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int channelOffset = channelInd*imgPixels*numCases;
if(z < numCases)
{
float sum = 0;
for(int dsx = - LOBE; dsx < LOBE+1; dsx++)
for(int dsy = - LOBE; dsy < LOBE+1; dsy++)
{
int idx = min(max(ix + dsx, 0), imgSizeX-1);
int idy = min(max(iy + dsy, 0), imgSizeY-1);
float sd = sdata[(sx + dsx + LOBE)*sharedY+(sy + dsy + LOBE) + sOffset];
sum += sd;
}
target[channelOffset + ix*widthyz + iy*widthz + z] = scale*sum;
}//if
}//channel
}//zind
}
template < int LOBE, int SIZE_CONV>
__global__ void kMicroConvFilterAct(const float* input, float* const target,
const uint numCases, const uint channels, const uint numFilters, const uint casePerThread,
const uint sharedY, const uint modulesPerBlockX, const uint modulesPerBlockY,
const uint imgSizeX, const uint imgSizeY,
const uint imgPixels)
{
extern __shared__ float sdata[];
//order x>y>z, *not* y>x
//const int bsizeX = imgSizeX/modulesPerBlockX;
const int bsizeY = imgSizeY/modulesPerBlockY;
const int startX = (blockIdx.y/bsizeY)*modulesPerBlockX;
const int startY = (blockIdx.y%bsizeY)*modulesPerBlockY;
const int bw = modulesPerBlockX;
const int bh = modulesPerBlockY;
const int sx = threadIdx.y/modulesPerBlockY;
const int sy = threadIdx.y - sx*modulesPerBlockY;
const int ix = sx+startX;
const int iy = sy+startY;
const int widthz = numCases;
const int widthyz = imgSizeY*numCases;
const int sizeConv2 = SIZE_CONV*SIZE_CONV;
const int sharedY2 = sharedY*sharedY;
//put pragme unroll here
for(int zind = 0; zind < casePerThread; zind++)
{
const int z = threadIdx.x + blockIdx.x*blockDim.x + zind*blockDim.x*gridDim.x;
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int sOffset = channelInd*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int channelOffset = channelInd*imgPixels*numCases;
if(z < numCases)
{
SHARED_MEM(ix, iy, z, LOBE, getValInput, sdata)
}
}
__syncthreads();
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int sOffset = channelInd*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int channelOffset = channelInd*imgPixels*numCases;
if(z < numCases)
{
for(int filterID = 0; filterID < numFilters; filterID++)
{
float sum = 0;
for(int dsx = - LOBE; dsx < LOBE+1; dsx++)
for(int dsy = - LOBE; dsy < LOBE+1; dsy++)
{
int idx = min(max(ix + dsx, 0), imgSizeX-1);
int idy = min(max(iy + dsy, 0), imgSizeY-1);
float sd = sdata[(sx + dsx + LOBE)*sharedY+(sy + dsy + LOBE) + sOffset];
sum += sd*const_area[channelInd*sizeConv2*numFilters + filterID*sizeConv2 + (dsy + LOBE)*SIZE_CONV +(dsx + LOBE)];
}
target[numFilters*channelOffset + filterID*imgPixels*numCases + ix*widthyz + iy*widthz + z] = sum;
}//filter
}//if
}//channel
}//zind
}
__global__ void kMicroConvActGrad(const float* actGrad, float* const target,
const uint numCases, const uint channels, const uint numFilters, const uint casePerThread,
const uint modulesPerBlockX, const uint modulesPerBlockY,
const uint sharedY, const uint sizeModule, const uint lobe,
const uint imgSizeX, const uint imgSizeY,
const uint imgPixels)
{
extern __shared__ float sdata[];
//order x>y>z, *not* y>x
//const int bsizeX = imgSizeX/modulesPerBlockX;
const int bsizeY = imgSizeY/modulesPerBlockY;
const int startX = (blockIdx.y/bsizeY)*modulesPerBlockX;
const int startY = (blockIdx.y%bsizeY)*modulesPerBlockY;
const int bw = modulesPerBlockX;
const int bh = modulesPerBlockY;
const int sx = threadIdx.y/modulesPerBlockY;
const int sy = threadIdx.y - sx*modulesPerBlockY;
const int ix = sx+startX;
const int iy = sy+startY;
const int widthz = numCases;
const int widthyz = imgSizeY*numCases;
const int sizeModule2 = sizeModule*sizeModule;
const int sharedY2 = sharedY*sharedY;
for(int zind = 0; zind < casePerThread; zind++)
{
const int z = threadIdx.x + blockIdx.x*blockDim.x + zind*blockDim.x*gridDim.x;
//pragma unroll here
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int channelOffset = channelInd*imgPixels*numCases;
//float sum = 0;
for(int filterID = 0; filterID < numFilters; filterID++)
{
const int sOffset = channelInd*numFilters*sharedY2*blockDim.x + filterID*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int filterOffset = numFilters*channelOffset + filterID*imgPixels*numCases;
SHARED_MEM(ix, iy, z, lobe, getValAct, sdata)
}
}
__syncthreads();
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int channelOffset = channelInd*imgPixels*numCases;
float sum = 0;
for(int filterID = 0; filterID < numFilters; filterID++)
{
//const int sOffset = channelInd*numFilters*sharedY2*blockDim.x + filterID*sharedY2*blockDim.x + threadIdx.x*sharedY2;
//const int filterOffset = numFilters*channelOffset + filterID*imgPixels*numCases;
for(int dsx = - lobe; dsx < lobe+1; dsx++)
for(int dsy = - lobe; dsy < lobe+1; dsy++)
sum += sdata[(sx + dsx + lobe)*sharedY+(sy + dsy + lobe)]
*const_area[filterID*sizeModule2 + (-dsy + lobe)*sizeModule +(-dsx + lobe)];
}
target[channelOffset + ix*widthyz + iy*widthz + z] = sum;
}
}
}
template <int lobe>
__global__ void kMicroConvWeightGrad(const float* actGrad, const float* input, float** const target,
const uint target_size, const uint numCases, const uint casePerThread, const uint tagWidth,
const uint channels, const uint numFilters,
const uint modulesPerBlockX, const uint modulesPerBlockY,
const uint imgSizeX, const uint imgSizeY, const uint imgPixels)
{
//order x>y>z, *not* y>x
extern __shared__ float sdata[];
//const int imgSize = imgSizeX*imgSizeY;
const int sharedY = modulesPerBlockY + 2*lobe;
const int sizeSharedBlock = sharedY*(modulesPerBlockX + 2*lobe);
float* sdataImg = sdata;
float* sdataRes = sdata + sizeSharedBlock*blockDim.x;
//const int bsizeX = imgSizeX/modulesPerBlockX;
const int bsizeY = imgSizeY/modulesPerBlockY;
const int startX = (blockIdx.y/bsizeY)*modulesPerBlockX;
const int startY = (blockIdx.y%bsizeY)*modulesPerBlockY;
const int bw = modulesPerBlockX;
const int bh = modulesPerBlockY;
const int sx = threadIdx.y/modulesPerBlockY;
const int sy = threadIdx.y - sx*modulesPerBlockY;
const int ix = sx+startX;
const int iy = sy+startY;
const int zoff = threadIdx.x + blockIdx.x*blockDim.x;
const int widthz = numCases;
const int widthyz = imgSizeY*numCases;
const int sharedY2 = sharedY*sharedY;
const int conv_size = 2*lobe+1;
const int conv2 = conv_size*conv_size;
int resStride = numFilters*conv2;
int res_off = resStride*(threadIdx.y*blockDim.x + threadIdx.x);
const int sOffset = threadIdx.x*sharedY2;
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int channelOffset = channelInd*imgPixels*numCases;
memset(sdataRes + res_off, 0, resStride*sizeof(float));
for(int zind = 0; zind < casePerThread; zind++)
{
const int z = zoff + zind*blockDim.x*gridDim.x;
for(int filterID = 0; filterID < numFilters; filterID++)
{
SHARED_MEM(ix, iy, z, lobe, getValInput, sdataImg)
__syncthreads();
for(int dsx = - lobe; dsx < lobe+1; dsx++)
for(int dsy = - lobe; dsy < lobe+1; dsy++)
{
int idx = min(max(ix + dsx, 0), imgSizeX-1);
int idy = min(max(iy + dsy, 0), imgSizeY-1);
const int filterOffset = numFilters*channelOffset + filterID*imgPixels*numCases;
float vact = actGrad[filterOffset + ix*widthyz + iy*widthz + z];
float vimg = sdataImg[(sx + dsx + lobe)*sharedY+(sy + dsy + lobe) + sOffset];
int ind_coeff = filterID*conv2 + (dsy + lobe)*conv_size +(dsx + lobe);
sdataRes[res_off + ind_coeff] += vact*vimg;
}//dsx
}//filter
}//z
for(int isx = 0; isx < conv_size; isx++)
for(int isy = 0; isy < conv_size; isy++)
{
for(int filterID = 0; filterID < numFilters; filterID++)
{
int ind_coeff = filterID*conv2 + isy*conv_size + isx;
int ind_ch = ind_coeff + channelInd*numFilters*conv2;
target[ind_ch][ix*imgSizeX*tagWidth + tagWidth*iy + zoff] = sdataRes[res_off + ind_coeff];
}
}
}//channel
}
//-------------------------------------------------------------
//VectFunc
//-------------------------------------------------------------
#define SCALE_H 1.
template <int sizeV>
__global__ void kVectFuncAct(const float* input, float* const target,
const uint numPixelsPerGroup, const uint numCases,
const uint strideInp, const uint strideTag, int numColors, int sizeH) {
// ix, iy == 0 almost always
const int bd_off = (blockDim.y*blockIdx.y + threadIdx.y)*strideInp + blockDim.x*blockIdx.x + threadIdx.x;
const int pix_stride = numPixelsPerGroup*strideInp;
const int pix_tag_stride = numPixelsPerGroup*strideTag;
for (uint iy = 0; iy < numPixelsPerGroup; iy += gridDim.y*blockDim.y)
{
for (uint ix = 0; ix < numCases; ix += gridDim.x*blockDim.x)
{
int xy_off = iy*strideInp + ix + bd_off;
for (uint color = 0; color < numColors; color ++) {
int color_off = color*pix_stride;
float inpVal[sizeV];//use shared instead?
#pragma unroll
for (uint inp_i = 0; inp_i < sizeV; inp_i++) {
int voff = color_off*sizeV + inp_i*pix_stride + xy_off;
float val = input[voff];
inpVal[inp_i] = val;
}
float vmax= 0;
#pragma unroll
for (uint out_i = 0; out_i < sizeH; out_i++) {
int out_par = out_i*sizeV;
float output = 0;
#pragma unroll
for (uint inp_i = 0; inp_i < sizeV; inp_i++)
{
float param = const_area[out_par + inp_i];
float val = inpVal[inp_i];
output += param*val;
}// inp_i
//suppression filter
//output = fmaxf(output, 0);
vmax = fmaxf(output, vmax);
}//out_i
for (uint out_i = 0; out_i < sizeH; out_i++) {
int out_par = out_i*sizeV;
float output = 0;
#pragma unroll
for (uint inp_i = 0; inp_i < sizeV; inp_i++)
{
float param = const_area[out_par + inp_i];
float val = inpVal[inp_i];
output += param*val;
}// inp_i
//suppression filter
output = fmaxf(output - SCALE_H*(vmax-output), 0);
int toffset = color_off*sizeH + out_i*pix_tag_stride + xy_off;
target[toffset] = output;
}//out_i
}//color
}
}
}
template <int sizeV>
__global__ void kVectFuncGrad(const float* actGrad, const float* input, float* const target,
const uint numPixelsPerGroup, const uint numCases,
const uint strideInp, const uint strideOut,
int numColors, int sizeH) {
const int inStep = strideInp*numPixelsPerGroup;
const int outStep = strideOut*numPixelsPerGroup;
const int btx = blockDim.x*blockIdx.x + threadIdx.x;
const int bty = blockDim.y*blockIdx.y + threadIdx.y;
const int bd_off_in = bty*strideInp + btx;
const int bd_off_out = bty*strideOut + btx;
//with no N_SUM ix, iy == 0 almost always
for (uint iy = 0; iy < numPixelsPerGroup; iy += gridDim.y*blockDim.y) {
for (uint ix = 0; ix < numCases; ix += gridDim.x*blockDim.x) {
int xy_off_in = iy*strideInp + ix + bd_off_in;
int xy_off_out = iy*strideOut + ix + bd_off_out;
for (uint color = 0; color < numColors; color ++) { //optimize away
int v_off = color*inStep*sizeV + xy_off_in;
int out_off = color*outStep*sizeH + xy_off_out;
float vmax = 0;
int kmax = 0;
for (uint out_i = 0; out_i < sizeH; out_i++)
{
float vsum = 0;
for (uint inp_i = 0; inp_i < sizeV; inp_i++) {
int inp_offset = v_off + inp_i*inStep;
vsum += input[inp_offset]*const_area[out_i*sizeV + inp_i];
}
if(vsum > vmax)
{
vmax = vsum;
kmax = out_i;
}
}
float vres[sizeV];
memset(vres, 0, sizeof(vres));
for (uint out_i = 0; out_i < sizeH; out_i++)
{
float output = 0;
for (uint inp_i = 0; inp_i < sizeV; inp_i++) {
int inp_offset = v_off + inp_i*inStep;
output += input[inp_offset]*const_area[out_i*sizeV + inp_i];
}
output = fmaxf(output - SCALE_H*(vmax-output), 0);
if(output > 0)
{
int out_offset = out_i*outStep + out_off;
float grad_next = actGrad[out_offset];
for (uint inp_i = 0; inp_i < sizeV; inp_i++)
vres[inp_i] += grad_next*((1+SCALE_H)*const_area[out_i*sizeV + inp_i] - SCALE_H*const_area[kmax*sizeV + inp_i]);
}
}
for (uint inp_i = 0; inp_i < sizeV; inp_i++)
{
int in_off = inp_i*inStep + v_off;
target[in_off] = vres[inp_i];
}
}//color
}//ix
}//iy
}
template <int sizeV>
__global__ void kVectFuncParamWeightGrad( const float* actGrad, const float* input, float** const target,
const uint numColors,
const uint target_size, const uint numPixelsPerGroup, const uint numCases,
const uint strideInp, const uint strideOut, const uint strideTag, int sizeH)
{
extern __shared__ float sh_mem[];
const int xy_off = threadIdx.y*blockDim.x + threadIdx.x;
const int res_off = xy_off*sizeV*sizeH;
float* resh = sh_mem + sizeV*blockDim.x*blockDim.y + res_off;
float* in_store = sh_mem;
memset(resh, 0, sizeV*sizeH*sizeof(float));
const int btx = blockDim.x*blockIdx.x + threadIdx.x;
const int bty = blockDim.y*blockIdx.y + threadIdx.y;
const int bd_off_in = bty*strideInp + btx;
const int bd_off_out = bty*strideOut + btx;
const int bd_off_tag = bty*strideTag + btx;
const int pix_out_stride = numPixelsPerGroup*strideOut;
const int pix_in_stride = numPixelsPerGroup*strideInp;
for (uint iy = 0; iy < numPixelsPerGroup; iy += gridDim.y*blockDim.y) {
for (uint ix = 0; ix < numCases; ix += gridDim.x*blockDim.x) {
int xy_off_in = iy*strideInp + ix + bd_off_in;
int xy_off_out = iy*strideOut + ix + bd_off_out;
for (uint color = 0; color < numColors; color ++) {
float* inp_val = in_store + xy_off*sizeV;
//float inp_val[sizeV];
for (uint pin = 0; pin < sizeV; pin++)
{
int in_off = color*pix_in_stride*sizeV + pin*pix_in_stride + xy_off_in;
inp_val[pin] = input[in_off];
}
int kmax= 0;
float vmax = 0;
for (uint pout = 0; pout < sizeH; pout++)
{
float vsum = 0;
#pragma unroll
for (uint pin = 0; pin < sizeV; pin++)
{
vsum += inp_val[pin]*const_area[pout*sizeV + pin];
}
if(vsum > vmax)
{
vmax = vsum;
kmax = pout;
};
}//pout
float vres_max[sizeV];
memset(vres_max, 0, sizeof(vres_max));
for (uint pout = 0; pout < sizeH; pout++)
{
float* vres = resh + sizeV*pout;
int out_off = color*pix_out_stride*sizeH + pout*pix_out_stride + xy_off_out;
float grad_next = actGrad[out_off];
float output = 0;
#pragma unroll
for (uint pin = 0; pin < sizeV; pin++)
{
output += inp_val[pin]*const_area[pout*sizeV + pin];
}
output = fmaxf(output - SCALE_H*(vmax-output), 0);
if(output > 0)
{
for (uint pin = 0; pin < sizeV; pin++)
{
vres[pin] += grad_next*(1+SCALE_H)*inp_val[pin];
vres_max[pin] += - SCALE_H*grad_next*inp_val[pin];
}
}//vsum
}//pout
#pragma unroll
for (uint pin = 0; pin < sizeV; pin++)
{
resh[kmax*sizeV + pin] += vres_max[pin];
}
}//color
}//ix
}//iy
for (uint pout = 0; pout < sizeH; pout++)
#pragma unroll
for (uint pin = 0; pin < sizeV; pin++)
{
target[pout*sizeV+pin][bd_off_tag] = resh[pout*sizeV+pin];
}
}
//*************************************************************************************
//-------------------------------------------------------------
//API EltwiseMax
//-------------------------------------------------------------
void computeEltwiseMaxGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& output, NVMatrix& target, bool add) {
assert(actGrad.isContiguous());
assert(output.isContiguous());
assert(input.isContiguous());
assert(actGrad.isSameDims(input));
assert(actGrad.isSameDims(output));
dim3 blocks(DIVUP(actGrad.getNumElements(), 128));
dim3 threads(128);
if (add) {
assert(actGrad.isSameDims(target));
hipFuncSetCacheConfig(kEltwiseMaxGrad<128, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kEltwiseMaxGrad<128, true>), dim3(blocks), dim3(threads), 0, 0, actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements());
} else {
target.resize(actGrad);
hipFuncSetCacheConfig(kEltwiseMaxGrad<128, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kEltwiseMaxGrad<128, false>), dim3(blocks), dim3(threads), 0, 0, actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements());
}
cutilCheckMsg("computeEltwiseMaxGrad: Kernel execution failed");
}
//-------------------------------------------------------------
//API EltwiseFunc
//-------------------------------------------------------------
void computeEltwiseFuncAct(NVMatrix& input, NVMatrix& target, vector<double>& param, int channels, int size_in, int size_out)
{
assert(size_in <= 4);
//int height = input.getFollowingDim(), width = input.getLeadingDim();
//int numCases = input.getNumCols();
//int numIn = input.getNumRows();
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
int out_width = inp_width;
int out_height = (inp_height*size_out)/size_in;
//printf("computeEltwiseFuncAct inp_height %i inp_width %i tran %i \n",inp_height, inp_width, input.isTrans());
//printf(" size_in %i size_out %i \n", size_in, size_out);
//printf(" out_height %i out_width %i \n",out_height, out_width);
if (target.getNumCols() != out_width || target.getNumRows() != out_height) {
target.resize(out_height, out_width);
//printf("**resize out_height %i out_width %i \n",out_height, out_width);
}
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
hipMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, hipMemcpyHostToDevice);
//el switch
float Csw = param[param.size()-2];
float Bsw = param[param.size()-1];
int numPixelsPerGroup = inp_height/size_in;
//int numChannelsPerGroup = channels/size_in;
int numPixelsPerChannel = inp_height/channels;
// printf(" size_in %i size_out %i inp_width %i numPixelsPerChannel %i channels %i \n", size_in, size_out, inp_width, numPixelsPerChannel, channels);
// int numChannelsPerThread = DIVUP(numChannelsPerGroup, ELTWISE_THREADS_Y);
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(::min(NUM_BLOCKS_MAX, (int)DIVUP(out_width, threads.x)),
::min(NUM_BLOCKS_MAX, DIVUP(numPixelsPerGroup, ELTWISE_THREADS_Y)));
//debug
//printf("kEltwiseFuncAct -------------\n");
//printf("temp %f %f %f %f %f %f \n", temp[0],temp[1],temp[2],temp[3],temp[4],temp[5]);
//input.nan2zero();
//float sum = input.sum();
//printf(" size_in %i size_out %i sum %f \n", size_in, size_out, sum);
// const int numPixelsPerGroup1 = inp_height/size_in;
// printf(" numPixelsPerGroup %i numPixelsPerGroup1 %i target.getNumRows %i \n", numPixelsPerGroup, numPixelsPerGroup1, target.getNumRows());
// //hipMemset(target.getDevData(), 0, target.getNumElements()*sizeof(float));
//printf(" target.getStride() %i target.getNumRows() %i target.getNumCols() %i \n", target.getStride(), target.getNumRows(), target.getNumCols());
//kEltwiseFuncAct_t<ELTWISE_THREADS_X, ELTWISE_THREADS_Y, 3><<<blocks, threads>>>(input.getDevData(),
//target.getDevData(), inp_height, inp_width, input.getStride(), target.getStride(), size_in, size_out);
//float sumt0 = target.sum();
//printf("kEltwiseFuncAct_t sumt_0 %f \n", sumt0);
#define ELT_ACT(SIZE_ARR) \
if(size_in == SIZE_ARR){\
hipFuncSetCacheConfig(kEltwiseFuncAct<SIZE_ARR>, hipFuncCachePreferL1);\
hipLaunchKernelGGL(( kEltwiseFuncAct<SIZE_ARR>), dim3(blocks), dim3(threads), 0, 0, input.getDevData(),\
target.getDevData(), inp_height, inp_width, input.getStride(), target.getStride(), numPixelsPerChannel,\
Csw, Bsw, size_in, size_out);};
ELT_ACT(1)
ELT_ACT(2)
ELT_ACT(3)
ELT_ACT(4)
//ELT_ACT(6)
//ELT_ACT(8)
//ELT_ACT(12)
//ELT_ACT(16)
#undef ELT_ACT
//float sumt = target.sum();
// printf("kEltwiseFuncAct sumt %f \n", sumt);
cutilCheckMsg("computeEltwiseFuncAct: Kernel execution failed");
}
void computeEltwiseFuncGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& target,
vector<double>& param, int channels, int size_in, int size_out)
{
assert(size_out <= 4);
//int height = input.getFollowingDim(), width = input.getLeadingDim();
//int numCases = input.getNumCols();
//int numIn = input.getNumRows();
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
if (target.getNumCols() != inp_width || target.getNumRows() != inp_height) {
target.resize(inp_height, inp_width);
}
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
hipMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, hipMemcpyHostToDevice);
//el switch
float Csw = param[param.size()-2];
float Bsw = param[param.size()-1];
int numPixelsPerGroup = inp_height/size_in;
//int numChannelsPerGroup = channels/size_in;
int numPixelsPerChannel = inp_height/channels;
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
::min(NUM_BLOCKS_MAX, DIVUP(numPixelsPerGroup, ELTWISE_THREADS_Y)));
//printf("computeEltwiseFuncGrad numPixelsPerGroup %i --------------------\n", numPixelsPerGroup);
//float sumA = actGrad.sum();
//float sumI = input.sum();
//printf("sum actGrad %f input %f \n", sumA, sumI);
//printf(" size_in %i size_out %i tag size %i sumt %f \n", size_in, size_out, target.getNumElements());
//printf(" target.getStride() %i actGrad %i input %i \n", target.getStride(), actGrad.getNumRows(), input.getNumRows());
//kEltwiseFuncGrad_t<ELTWISE_THREADS_X, ELTWISE_THREADS_Y, 3><<<blocks, threads>>>(actGrad.getDevData(),
// input.getDevData(), target.getDevData(), inp_height, inp_width,
// input.getStride(), actGrad.getStride(), size_in, size_out);
//float sumtt = target.sum();
//printf("sum_test_tag %f \n", sumtt);
#define ELT_GRAD(SIZE_ARR) \
if(size_out == SIZE_ARR){\
hipFuncSetCacheConfig(kEltwiseFuncGrad<SIZE_ARR>, hipFuncCachePreferL1);\
hipLaunchKernelGGL(( kEltwiseFuncGrad<SIZE_ARR>), dim3(blocks), dim3(threads), 0, 0, actGrad.getDevData(),\
input.getDevData(), target.getDevData(), inp_height, inp_width,\
input.getStride(), actGrad.getStride(), numPixelsPerChannel,\
Csw, Bsw, size_in, size_out);};
ELT_GRAD(1)
ELT_GRAD(2)
ELT_GRAD(3)
ELT_GRAD(4)
//ELT_GRAD(6)
//ELT_GRAD(8)
//ELT_GRAD(12)
//ELT_GRAD(16)
#undef ELT_GRAD
// float sumt = target.sum();
// printf("FuncGrad sum_tag %f \n", sumt);
cutilCheckMsg("computeEltwiseFuncGrad: Kernel execution failed");
};
void computeEltwiseFuncParamWeightGrad(NVMatrix& actGrad, NVMatrix& input,
void* arrayPtr, vector<NVMatrix>& tempMatrix,
NVMatrix& tempC, NVMatrix& tempB,
vector<double>& param, float lim,
int channels, int size_in, int size_out)
{
assert(size_out <= 4 && size_in <= 4);// || size_out == 12 || size_out == 16);
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
assert(input.getStride() == actGrad.getStride());
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
hipMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, hipMemcpyHostToDevice);
//el switch
float Csw = param[param.size()-2];
float Bsw = param[param.size()-1];
int numPixelsPerGroup = inp_height/size_in;
//int numChannelsPerGroup = channels/size_in;
int numPixelsPerChannel = inp_height/channels;
// printf("inp_height %i numPixelsPerGroup %i %i\n", inp_height, numPixelsPerGroup, actGrad.getNumRows()/size_out);
#define N_SUM 1
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
::min(NUM_BLOCKS_MAX, (int)DIVUP(numPixelsPerGroup/N_SUM, ELTWISE_THREADS_Y)));
#undef N_SUM
int tag_width = blocks.x*threads.x;
int tag_height = blocks.y*threads.y;
int tag_size = tag_width*tag_height;
float* tempMatrixPtr[CONST_AREA_SIZE];
for(int i =0; i < tempMatrix.size(); i++)
{
if (tempMatrix[i].getNumCols() != tag_width || tempMatrix[i].getNumRows() != tag_height) {
tempMatrix[i].resize(tag_height, tag_width);
hipMemset(tempMatrix[i].getDevData(), 0, tag_size*sizeof(float));
}
tempMatrixPtr[i] = tempMatrix[i].getDevData();
}
hipMemcpy(arrayPtr, tempMatrixPtr, sizeof(float*)*tempMatrix.size(), hipMemcpyHostToDevice);
#define ELT_W_GRAD(SIZE_ARR_OUT, SIZE_ARR_IN) \
if(size_out == SIZE_ARR_OUT && size_in == SIZE_ARR_IN){\
hipLaunchKernelGGL(( kEltwiseFuncParamWeightGrad<ELTWISE_THREADS_X, ELTWISE_THREADS_Y, SIZE_ARR_OUT, SIZE_ARR_IN>), dim3(blocks), dim3(threads), 0, 0, actGrad.getDevData(),\
input.getDevData(), (float**)arrayPtr,\
inp_height, inp_width,\
input.getStride(), tempMatrix[0].getStride(), numPixelsPerChannel, Csw, Bsw);};
ELT_W_GRAD(1,2)
ELT_W_GRAD(2,2)
ELT_W_GRAD(3,2)
ELT_W_GRAD(4,2)
ELT_W_GRAD(1,3)
ELT_W_GRAD(2,3)
ELT_W_GRAD(3,3)
ELT_W_GRAD(4,3)
ELT_W_GRAD(1,4)
ELT_W_GRAD(2,4)
ELT_W_GRAD(3,4)
ELT_W_GRAD(4,4)
#undef ELT_W_GRAD
// printf("size_in %i size_out %i sum %f act %f inp %f \n", size_in, size_out, tempMatrix[0].sum(), actGrad.sum(), input.sum());
// int tagc_width = inp_width;
// int tagc_height = inp_height*size_out/size_in;
//
// //if (tempC.getNumCols() != tagc_width || tempC.getNumRows() != tagc_height) {
// // tempC.resize(tagc_height, tagc_width);
// //}
//
// if (tempB.getNumCols() != tagc_width || tempB.getNumRows() != tagc_height) {
// tempB.resize(tagc_height, tagc_width);
// }
////
//#define ELT_W_BCGRAD(SIZE_ARR_IN) \
// if(size_in == SIZE_ARR_IN){\
// kEltwiseFuncBCWeightGrad<SIZE_ARR_IN><<<blocks, threads>>>(input.getDevData(), actGrad.getDevData(), tempC.getDevData(), tempB.getDevData(),\
// inp_height, inp_width,\
// input.getStride(), tempB.getStride(),\
// numPixelsPerChannel,\
// param[param.size()-2], lim, param[param.size()-1],\
// size_out);};
// ELT_W_BCGRAD(1)
// ELT_W_BCGRAD(2)
// ELT_W_BCGRAD(3)
// ELT_W_BCGRAD(4)
//#undef ELT_W_BCGRAD
// float sum = tempC.sum();
// printf("kEltwiseFuncBCWeightGrad sum %f \n", sum);
}
void testGroupsEltwiseFunc(NVMatrix& actGrad, NVMatrix& input,
void* arrayPtr, vector<NVMatrix>& tempMatrix,
vector<double>& param,
int size_in, int size_out, int channels, int cnttest)
{
assert(size_in <= 4);// || size_out == 12 || size_out == 16);
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
assert(input.getStride() == actGrad.getStride());
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
hipMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, hipMemcpyHostToDevice);
int numPixelsPerGroup = inp_height/size_in;
// printf("inp_height %i numPixelsPerGroup %i \n", inp_height, numPixelsPerGroup);
#define N_SUM 1
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
::min(NUM_BLOCKS_MAX, (int)DIVUP(numPixelsPerGroup/N_SUM, ELTWISE_THREADS_Y)));
#undef N_SUM
int tag_width = blocks.x*threads.x;
int tag_height = blocks.y*threads.y;
int tag_size = tag_width*tag_height;
int numPixelsPerChannel = inp_height/channels;
float* tempMatrixPtr[CONST_AREA_SIZE];
for(int i =0; i < tempMatrix.size(); i++)
{
if (tempMatrix[i].getNumCols() != tag_width || tempMatrix[i].getNumRows() != tag_height) {
tempMatrix[i].resize(tag_height, tag_width);
hipMemset(tempMatrix[i].getDevData(), 0, tag_size*sizeof(float));
}
tempMatrixPtr[i] = tempMatrix[i].getDevData();
}
hipMemcpy(arrayPtr, tempMatrixPtr, sizeof(float*)*tempMatrix.size(), hipMemcpyHostToDevice);
#define ELT_T_GRAD(SIZE_ARR) \
if(size_in == SIZE_ARR){\
hipLaunchKernelGGL(( kEltwiseFuncGroupTest<ELTWISE_THREADS_X, ELTWISE_THREADS_Y, SIZE_ARR>), dim3(blocks), dim3(threads), 0, 0, actGrad.getDevData(),\
input.getDevData(), (float**)arrayPtr,\
inp_height, inp_width,\
input.getStride(), tempMatrix[0].getStride(),\
numPixelsPerChannel, cnttest);};
ELT_T_GRAD(1)
ELT_T_GRAD(2)
ELT_T_GRAD(3)
ELT_T_GRAD(4)
//ELT_T_GRAD(5)
//ELT_T_GRAD(6)
//ELT_T_GRAD(7)
//ELT_T_GRAD(8)
#undef ELT_T_GRAD
//kEltwiseFuncGroupTestS<ELTWISE_THREADS_X, ELTWISE_THREADS_Y, 3><<<blocks, threads>>>(actGrad.getDevData(),\
//input.getDevData(), (float**)arrayPtr,\
//inp_height, inp_width,\
//input.getStride(), tempMatrix[0].getStride(),\
//numPixelsPerChannel, cnttest);
}
void normalizeGroups(NVMatrix& input,
void* arrayPtr, vector<NVMatrix>& tempMatrix,
int size_in, int size_out, int channels)
{
assert(size_in <= 4);// || size_out == 12 || size_out == 16);
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
int numPixelsPerGroup = inp_height/size_in;
// printf("inp_height %i numPixelsPerGroup %i \n", inp_height, numPixelsPerGroup);
#define N_SUM 1
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
::min(NUM_BLOCKS_MAX, (int)DIVUP(numPixelsPerGroup/N_SUM, ELTWISE_THREADS_Y)));
#undef N_SUM
int tag_width = blocks.x*threads.x;
int tag_height = blocks.y*threads.y;
int tag_size = tag_width*tag_height;
int numPixelsPerChannel = inp_height/channels;
float* tempMatrixPtr[CONST_AREA_SIZE];
for(int i =0; i < tempMatrix.size(); i++)
{
if (tempMatrix[i].getNumCols() != tag_width || tempMatrix[i].getNumRows() != tag_height) {
tempMatrix[i].resize(tag_height, tag_width);
hipMemset(tempMatrix[i].getDevData(), 0, tag_size*sizeof(float));
}
tempMatrixPtr[i] = tempMatrix[i].getDevData();
}
hipMemcpy(arrayPtr, tempMatrixPtr, sizeof(float*)*tempMatrix.size(), hipMemcpyHostToDevice);
#define NORM_GROUP(SIZE_ARR) \
if(size_in == SIZE_ARR){\
hipLaunchKernelGGL(( kNormalizeParam<ELTWISE_THREADS_X, ELTWISE_THREADS_Y, SIZE_ARR>), dim3(blocks), dim3(threads), 0, 0, \
input.getDevData(), (float**)arrayPtr,\
inp_height, inp_width,\
input.getStride(), tempMatrix[0].getStride(),\
numPixelsPerChannel);};
NORM_GROUP(2)
NORM_GROUP(3)
NORM_GROUP(4)
#undef ELT_T_GRAD
}
//void computeEltwiseFuncParamGradSingle(NVMatrix& actGrad, NVMatrix& input,
// NVMatrix& target, NVMatrix& target_m,
// int pin, int pout, int size_in, int size_out)
//{
//
// int inp_width = input.getNumCols();
// int inp_height = input.getNumRows();
//
//
// int numPixelsPerGroup = inp_height/size_in;
//// printf("inp_height %i numPixelsPerGroup %i \n", inp_height, numPixelsPerGroup);
//#define N_SUM 1
// dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
// dim3 blocks(::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
// ::min(NUM_BLOCKS_MAX, (int)DIVUP(numPixelsPerGroup/N_SUM, ELTWISE_THREADS_Y)));
//#undef N_SUM
//
// int sizeX = blocks.x*threads.x;
// int sizeY = blocks.y*threads.y;
//
// if (target.getNumCols() != sizeX || target.getNumRows() != sizeY) {
// //printf(" tresize %i %i \n", sizeX, sizeY);
// target.resize(sizeY, sizeX);// numRows, numCols !
// }
//
//
// if (!target_m.isSameDims(target)) {
// target_m.resize(target);
// }
//
//
//
// hipFuncSetCacheConfig(kEltwiseFuncParamGradSingle, hipFuncCachePreferL1);
//
//
// kEltwiseFuncParamGradSingle<<<blocks, threads>>>(actGrad.getDevData(),
// input.getDevData(), target.getDevData(), target_m.getDevData(),
// pin, pout, inp_height, inp_width,
// input.getStride(), actGrad.getStride(), target.getStride(),
// size_in, size_out);
//
//
// cutilCheckMsg("kEltwiseFuncParamGrad: Kernel execution failed");
//};
//-------------------------------------------------------------
//API MAvg
//-------------------------------------------------------------
#include "conv_debug.h"
#define SIZE_CONV 3
void computeMAvgAct(NVMatrix& input, NVMatrix& target, int sizeModuleSide, int channels,
int imgSize, int imgPixels)
{
int out_width = input.getNumCols();
int out_height = input.getNumRows();
if (target.getNumCols() != out_width || target.getNumRows() != out_height) {
target.resize(out_height, out_width);
//printf("**resize out_height %i out_width %i \n",out_height, out_width);
}
int numCases = out_width;
int imgSizeX = imgSize;
int imgSizeY = imgSize;
int img_threads_x = 8;
int img_threads_y = 8;
int casePerThread = 16;
int nblocksx = 2;//~number of blocks x
int case_threads = DIVUP(numCases, nblocksx*casePerThread);
int imgBlocksY = DIVUP(imgSizeY,img_threads_x);
int imgBlocksX = DIVUP(imgSizeX,img_threads_y);
int lobe = sizeModuleSide/2;
float scale = 1./(sizeModuleSide*sizeModuleSide);
int sharedX = lobe*2 + img_threads_x;
int sharedY = lobe*2 + img_threads_y;
int shared_size = sharedX*sharedY*channels*case_threads*sizeof(float);
dim3 threads(case_threads, img_threads_x*img_threads_y);
dim3 blocks = dim3(DIVUP(numCases, threads.x*casePerThread), imgBlocksY*imgBlocksX);
//printf("blocks.x %i blocks.y %i threads.x %i threads.y %i shared_size %i casePerThread %i\n",
// blocks.x, blocks.y, threads.x, threads.y, shared_size, casePerThread);
//printf("sharedY %i img_threads_x %i img_threads_y %i sizeModuleSide %i imgSizeX %i imgSizeY %i imgPixels %i numFilters %i numCases %i lobe %i\n",
// sharedY,img_threads_x,img_threads_y,sizeModuleSide,imgSizeX,imgSizeY, imgPixels,numFilters,numCases,lobe);
assert(SIZE_CONV == 3);
//singletonTempMem.allocFloatElement(input.getNumCols()*input.getNumRows());
//singletonTempMem.allocFloatElement(out_height*out_width);
//float* tempHostInput = singletonTempMem.getPtr(0);
//float* tempHostTarget = singletonTempMem.getPtr(1);
//int deltan = singletonTempMem._start[1]-singletonTempMem._start[0];
//printf(" size inp %i singletonTempMem._size %i deltan %i \n",
// input.getNumCols()*input.getNumRows(),singletonTempMem._size, deltan);
//cutilSafeCallNoSync( hipMemcpy(tempHostInput, input.getDevData(), input.getNumCols()*input.getNumRows()*sizeof(float), hipMemcpyDeviceToHost) );
//double sum_host =0;
//debugMicroConvFilterAct((SIZE_CONV-1)/2, SIZE_CONV, temp, tempHostInput, tempHostTarget,
// numCases, channels, numFilters,
// sharedY, img_threads_x, img_threads_y,
// imgSizeX, imgSizeY,
// imgPixels);
// sum_host = Sum(tempHostTarget, out_height*out_width);
//printf(" debugMicroConvFilterAct sum %f \n", sum_host);
//emuMicroConvFilterAct(threads.x, threads.y, blocks.x, blocks.y,
// (SIZE_CONV-1)/2, SIZE_CONV,
// temp, tempHostInput, tempHostTarget,
// numCases, channels, numFilters, casePerThread,
// sharedY, img_threads_x, img_threads_y,
// imgSizeX, imgSizeY,
// imgPixels);
//sum_host = Sum(tempHostTarget, out_height*out_width);
//printf(" emuMicroConvFilterAct sum %f \n", sum_host);
//singletonTempMem.reset();
hipLaunchKernelGGL(( kMAvgAct<(SIZE_CONV-1)/2, SIZE_CONV>), dim3(blocks), dim3(threads), shared_size, 0, input.getDevData(), target.getDevData(),
numCases, channels, casePerThread,
sharedY, img_threads_x, img_threads_y,
imgSizeX, imgSizeY,
imgPixels, scale);
//debug
//printf("kMicroConvAct4Channel end \n");
//float sum = target.sum();
//printf(" kMicroConvAct4Channel sum %f \n", sum);
cutilCheckMsg("computeMAvgAct: Kernel execution failed");
};
void computeMAvgGrad(NVMatrix& actGrad, NVMatrix& target, int sizeModuleSide, int channels,
int imgSize, int imgPixels)
{
int out_width = actGrad.getNumCols();
int out_height = actGrad.getNumRows();
if (target.getNumCols() != out_width || target.getNumRows() != out_height) {
target.resize(out_height, out_width);
//printf("**resize out_height %i out_width %i \n",out_height, out_width);
}
int numCases = out_width;
int imgSizeX = imgSize;
int imgSizeY = imgSize;
int img_threads_x = 8;
int img_threads_y = 8;
int casePerThread = 16;
int nblocksx = 2;//~number of blocks x
int case_threads = DIVUP(numCases, nblocksx*casePerThread);
int imgBlocksY = DIVUP(imgSizeY,img_threads_x);
int imgBlocksX = DIVUP(imgSizeX,img_threads_y);
int lobe = sizeModuleSide/2;
float scale = 1./(sizeModuleSide*sizeModuleSide);
int sharedX = lobe*2 + img_threads_x;
int sharedY = lobe*2 + img_threads_y;
int shared_size = sharedX*sharedY*channels*case_threads*sizeof(float);
dim3 threads(case_threads, img_threads_x*img_threads_y);
dim3 blocks = dim3(DIVUP(numCases, threads.x*casePerThread), imgBlocksY*imgBlocksX);
//printf("blocks.x %i blocks.y %i threads.x %i threads.y %i shared_size %i casePerThread %i\n",
// blocks.x, blocks.y, threads.x, threads.y, shared_size, casePerThread);
//printf("sharedY %i img_threads_x %i img_threads_y %i sizeModuleSide %i imgSizeX %i imgSizeY %i imgPixels %i numFilters %i numCases %i lobe %i\n",
// sharedY,img_threads_x,img_threads_y,sizeModuleSide,imgSizeX,imgSizeY, imgPixels,numFilters,numCases,lobe);
assert(SIZE_CONV == 3);
hipLaunchKernelGGL(( kMAvgGrad<(SIZE_CONV-1)/2, SIZE_CONV>), dim3(blocks), dim3(threads), shared_size, 0, actGrad.getDevData(), target.getDevData(),
numCases, channels, casePerThread,
sharedY, img_threads_x, img_threads_y,
imgSizeX, imgSizeY,
imgPixels, scale);
cutilCheckMsg("computeMAvgGrad: Kernel execution failed");
}
//-------------------------------------------------------------
//API MicroConv
//-------------------------------------------------------------
void computeMicroConvAct(NVMatrix& input, NVMatrix& target, vector<double>& param, int sizeModuleSide, int channels,
int imgSize, int imgPixels, int numFilters)
{
int out_width = input.getNumCols();
int out_height = input.getNumRows()*numFilters;
if (target.getNumCols() != out_width || target.getNumRows() != out_height) {
target.resize(out_height, out_width);
//printf("**resize out_height %i out_width %i \n",out_height, out_width);
}
int numCases = out_width;
int imgSizeX = imgSize;
int imgSizeY = imgSize;
int img_threads_x = 8;
int img_threads_y = 8;
int casePerThread = 16;
int nblocksx = 2;//~number of blocks x
int case_threads = DIVUP(numCases, nblocksx*casePerThread);
int imgBlocksY = DIVUP(imgSizeY,img_threads_x);
int imgBlocksX = DIVUP(imgSizeX,img_threads_y);
int lobe = sizeModuleSide/2;
int sharedX = lobe*2 + img_threads_x;
int sharedY = lobe*2 + img_threads_y;
int shared_size = sharedX*sharedY*channels*case_threads*sizeof(float);
dim3 threads(case_threads, img_threads_x*img_threads_y);
dim3 blocks = dim3(DIVUP(numCases, threads.x*casePerThread), imgBlocksY*imgBlocksX);
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
hipMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, hipMemcpyHostToDevice);
//printf("blocks.x %i blocks.y %i threads.x %i threads.y %i shared_size %i casePerThread %i\n",
// blocks.x, blocks.y, threads.x, threads.y, shared_size, casePerThread);
//printf("sharedY %i img_threads_x %i img_threads_y %i sizeModuleSide %i imgSizeX %i imgSizeY %i imgPixels %i numFilters %i numCases %i lobe %i\n",
// sharedY,img_threads_x,img_threads_y,sizeModuleSide,imgSizeX,imgSizeY, imgPixels,numFilters,numCases,lobe);
assert(SIZE_CONV == 3);
//singletonTempMem.allocFloatElement(input.getNumCols()*input.getNumRows());
//singletonTempMem.allocFloatElement(out_height*out_width);
//float* tempHostInput = singletonTempMem.getPtr(0);
//float* tempHostTarget = singletonTempMem.getPtr(1);
//int deltan = singletonTempMem._start[1]-singletonTempMem._start[0];
//printf(" size inp %i singletonTempMem._size %i deltan %i \n",
// input.getNumCols()*input.getNumRows(),singletonTempMem._size, deltan);
//cutilSafeCallNoSync( hipMemcpy(tempHostInput, input.getDevData(), input.getNumCols()*input.getNumRows()*sizeof(float), hipMemcpyDeviceToHost) );
//double sum_host =0;
//debugMicroConvFilterAct((SIZE_CONV-1)/2, SIZE_CONV, temp, tempHostInput, tempHostTarget,
// numCases, channels, numFilters,
// sharedY, img_threads_x, img_threads_y,
// imgSizeX, imgSizeY,
// imgPixels);
// sum_host = Sum(tempHostTarget, out_height*out_width);
//printf(" debugMicroConvFilterAct sum %f \n", sum_host);
//emuMicroConvFilterAct(threads.x, threads.y, blocks.x, blocks.y,
// (SIZE_CONV-1)/2, SIZE_CONV,
// temp, tempHostInput, tempHostTarget,
// numCases, channels, numFilters, casePerThread,
// sharedY, img_threads_x, img_threads_y,
// imgSizeX, imgSizeY,
// imgPixels);
//sum_host = Sum(tempHostTarget, out_height*out_width);
//printf(" emuMicroConvFilterAct sum %f \n", sum_host);
//singletonTempMem.reset();
hipLaunchKernelGGL(( kMicroConvFilterAct<(SIZE_CONV-1)/2, SIZE_CONV>), dim3(blocks), dim3(threads), shared_size, 0, input.getDevData(), target.getDevData(),
numCases, channels, numFilters, casePerThread,
sharedY, img_threads_x, img_threads_y,
imgSizeX, imgSizeY,
imgPixels);
//debug
//printf("kMicroConvAct4Channel end \n");
//float sum = target.sum();
//printf(" kMicroConvAct4Channel sum %f \n", sum);
cutilCheckMsg("computeMicroConvAct: Kernel execution failed");
};
void computeMicroConvActGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& target,
vector<double>& param, int sizeModuleSide, int channels,
int imgSize, int imgPixels, int numFilters)
{
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
if (target.getNumCols() != inp_width || target.getNumRows() != inp_height) {
target.resize(inp_height, inp_width);
}
int numCases = inp_width;
int imgSizeX = imgSize;
int imgSizeY = imgSize;
int img_threads_x = 8;
int img_threads_y = 8;
int casePerThread = 16;
int nblocksx = 2;//~number of blocks x
int case_threads = DIVUP(numCases, nblocksx*casePerThread);
int lobe = sizeModuleSide/2;
int sharedX = lobe*2 + img_threads_x;
int sharedY = lobe*2 + img_threads_y;
int shared_size = sharedX*sharedY*numFilters*channels*case_threads*sizeof(float);
int imgBlocksY = DIVUP(imgSizeY,img_threads_x);
int imgBlocksX = DIVUP(imgSizeX,img_threads_y);
dim3 threads(case_threads, img_threads_x*img_threads_y);
dim3 blocks = dim3(DIVUP(numCases, threads.x*casePerThread), imgBlocksY*imgBlocksX);
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
hipMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, hipMemcpyHostToDevice);
printf("blocks.x %i blocks.y %i threads.x %i threads.y %i shared_size %i casePerThread %i\n",
blocks.x, blocks.y, threads.x, threads.y, shared_size, casePerThread);
printf("sharedY %i img_threads_x %i img_threads_y %i sizeModuleSide %i imgSizeX %i imgSizeY %i imgPixels %i numFilters %i numCases %i lobe %i\n",
sharedY,img_threads_x,img_threads_y,sizeModuleSide,imgSizeX,imgSizeY, imgPixels,numFilters,numCases,lobe);
//singletonTempMem.allocFloatElement(actGrad.getNumCols()*actGrad.getNumRows());
//singletonTempMem.allocFloatElement(target.getNumCols()*target.getNumRows());
//float* tempHostInput = singletonTempMem.getPtr(0);
//float* tempHostTarget = singletonTempMem.getPtr(1);
//cutilSafeCallNoSync( hipMemcpy(tempHostInput, actGrad.getDevData(), actGrad.getNumCols()*actGrad.getNumRows()*sizeof(float),
// hipMemcpyDeviceToHost) );
//double sum_host =0;
//debugMicroConvActGrad((SIZE_CONV-1)/2, SIZE_CONV, temp, tempHostInput, tempHostTarget,
// numCases, channels, numFilters, casePerThread,
// img_threads_x, img_threads_y,
// sharedY, sizeModuleSide, lobe,
// imgSizeX, imgSizeY,
// imgPixels);
//sum_host = Sum(tempHostTarget, target.getNumCols()*target.getNumRows());
//printf(" debugMicroConvFilterAct sum %f \n", sum_host);
//singletonTempMem.reset();
hipLaunchKernelGGL(( kMicroConvActGrad), dim3(blocks), dim3(threads), shared_size, 0, actGrad.getDevData(), target.getDevData(),
numCases, channels, numFilters, casePerThread,
img_threads_x, img_threads_y,
sharedY, sizeModuleSide, lobe,
imgSizeX, imgSizeY,
imgPixels);
// double sum = target.sum();
// printf(" kMicroConvGrad sum %f \n", sum);
// printf("kMicroConvGrad end \n");
cutilCheckMsg("kMicroConvGrad: Kernel execution failed");
}
void computeMicroConvWeightGrad(NVMatrix& actGrad, NVMatrix& input,
vector<NVMatrix>& tempMatrix, void* arrayPtr,
vector<double>& param, int sizeModuleSide, int channels,
int imgSize, int imgPixels, int numFilters)
{
int numCases = input.getNumCols();
int imgSizeX = imgSize;
int imgSizeY = imgSize;
int img_threads_x = 8;
int img_threads_y = 8;
int casePerThread = 16;
int nblocksx = 2;//~number of blocks x
int case_threads = DIVUP(numCases, nblocksx*casePerThread);
int lobe = sizeModuleSide/2;
int sharedX = lobe*2 + img_threads_x;
int sharedY = lobe*2 + img_threads_y;
int conv_size = (lobe*2 + 1);
int conv_size2 = conv_size*conv_size;
int imgBlocksY = DIVUP(imgSizeY,img_threads_x);
int imgBlocksX = DIVUP(imgSizeX,img_threads_y);
//for optimization can change both block sizes!
dim3 threads(case_threads, img_threads_x*img_threads_y);
dim3 blocks = dim3(DIVUP(numCases, threads.x*casePerThread), imgBlocksY*imgBlocksX);
int sizeSharedBlock = sharedX*sharedY;
int shared_size = (sizeSharedBlock*threads.x + threads.x*threads.y*numFilters*conv_size2)*sizeof(float);//looped out - case_threads*imgsPerThread;
int tag_width = DIVUP(input.getNumCols(), casePerThread) ; //could be reduced
int tag_height = blocks.y*threads.y;//could be reduced
int tag_size = tag_width*tag_height;
float* tempMatrixPtr[CONST_AREA_SIZE];
for(int i =0; i < tempMatrix.size(); i++)
{
if (tempMatrix[i].getNumCols() != tag_width || tempMatrix[i].getNumRows() != tag_height) {
tempMatrix[i].resize(tag_height, tag_width);
hipMemset(tempMatrix[i].getDevData(), 0, tag_size*sizeof(float));
}
tempMatrixPtr[i] = tempMatrix[i].getDevData();
}
hipMemcpy(arrayPtr, tempMatrixPtr, sizeof(float*)*tempMatrix.size(), hipMemcpyHostToDevice);
// printf("kMicroConvWeightGrad *************** \n");
// printf("tag_width %i tag_height %i shared_size %i tempMatrix.size() %i conv_size %i casePerThread %i\n",
// tag_width, tag_height, shared_size, tempMatrix.size(), conv_size, casePerThread);
//
// printf("blocks.x %i blocks.y %i threads.x %i threads.y %i shared_size %i \n",
// blocks.x, blocks.y, threads.x, threads.y, shared_size);
// printf("sharedY %i img_threads_x %i img_threads_y %i sizeModuleSide %i imgSizeX %i imgSizeY %i imgPixels %i numFilters %i numCases %i lobe %i\n",
// sharedY,img_threads_x,img_threads_y,sizeModuleSide,imgSizeX,imgSizeY, imgPixels,numFilters,numCases,lobe);
//
//
//const int sizeConv2 = SIZE_CONV*SIZE_CONV;
//int filterID = 0;
//int dsy = 0;
//int dsx = 1;
//int channelID = 0;
//int ind_coeff = filterID*sizeConv2 + (dsy + lobe)*SIZE_CONV +(dsx + lobe);
//
// singletonTempMem.allocFloatElement(actGrad.getNumCols()*actGrad.getNumRows());
// singletonTempMem.allocFloatElement(input.getNumCols()*input.getNumRows());
// singletonTempMem.allocFloatElement(tag_height*tag_width);
// int out_width = input.getNumCols();
// int out_height = input.getNumRows()*numFilters;
// singletonTempMem.allocFloatElement(out_width*out_height);
//
// float* tempHostAct = singletonTempMem.getPtr(0);
// float* tempHostInp = singletonTempMem.getPtr(1);
// float* tempHostTag = singletonTempMem.getPtr(2);
// float* tempHostTagA = singletonTempMem.getPtr(3);
//
// hipMemcpy(tempHostAct, actGrad.getDevData(), actGrad.getNumCols()*actGrad.getNumRows()*sizeof(float),
// hipMemcpyDeviceToHost);
//
// hipMemcpy(tempHostInp, input.getDevData(), input.getNumCols()*input.getNumRows()*sizeof(float),
// hipMemcpyDeviceToHost);
// //memset(tempHostTagA, 0, tag_height*tag_width*sizeof(float));
// memset(tempHostTag, 0, tag_height*tag_width*sizeof(float));
//
// double sum_a = Sum(tempHostAct, actGrad.getNumCols()*actGrad.getNumRows());
// double sum_i = Sum(tempHostInp, input.getNumCols()*input.getNumRows());
// printf(" sum_a %f sum_i %f \n", sum_a, sum_i);
//
// float temp[CONST_AREA_SIZE];
// assert(param.size() <= CONST_AREA_SIZE);
// memset(temp, 0, sizeof(temp));
// for(int i = 0; i < param.size(); i++)
// temp[i] = (float)param[i];
//
//
// debugMicroConvLinApprox((SIZE_CONV-1)/2, SIZE_CONV, temp, tempHostInp, tempHostAct, tempHostTagA,
// numCases, channels, numFilters,
// sharedY, img_threads_x, img_threads_y,
// imgSizeX, imgSizeY,
// imgPixels);
// double sum_host0 = Sum(tempHostTagA, out_height*out_width);
// printf(" debugMicroConvFilterAct sum0 %f \n", sum_host0);
// double delta = 1e-3;
// temp[ind_coeff] += delta;
//
// debugMicroConvLinApprox((SIZE_CONV-1)/2, SIZE_CONV, temp, tempHostInp, tempHostAct, tempHostTagA,
// numCases, channels, numFilters,
// sharedY, img_threads_x, img_threads_y,
// imgSizeX, imgSizeY,
// imgPixels);
// double sum_host1 = Sum(tempHostTagA, out_height*out_width);
// printf(" debugMicroConvFilterAct sum1 %f \n", sum_host1);
//
// printf(" debugMicroConv grad %f \n", (sum_host1-sum_host0)/delta);
//
//
//memset(tempHostTag, 0, tag_height*tag_width*sizeof(float));
// debugMicroConvWeightGrad(lobe, SIZE_CONV, dsx, dsy, filterID, channelID, tempHostAct, tempHostInp, tempHostTag,
// tag_size, numCases,
// channels, numFilters,
// img_threads_x, img_threads_y, sharedY,
// lobe, sizeModuleSide, sizeSharedBlock,
// imgSizeX, imgSizeY, imgPixels);
//
// double sum_host = Sum(tempHostTag, tag_height*tag_width);
// printf(" debugMicroConvWeightGrad sum %f \n", sum_host);
//
//memset(tempHostTag, 0, tag_height*tag_width*sizeof(float));
//emuMicroConvWeightGrad(threads.x, threads.y, blocks.x, blocks.y,
// lobe, SIZE_CONV, dsx, dsy, filterID, channelID, tempHostAct, tempHostInp, tempHostTag,
// tag_size, numCases, casePerThread, tag_width,
// channels, numFilters,
// img_threads_x, img_threads_y, sharedY,
// sizeSharedBlock,
// imgSizeX, imgSizeY, imgPixels);
// double sum_host_emu = Sum(tempHostTag, tag_height*tag_width);
//printf(" emuMicroConvWeightGrad sum %f \n", sum_host_emu);
hipLaunchKernelGGL(( kMicroConvWeightGrad<SIZE_CONV/2>), dim3(blocks), dim3(threads), shared_size, 0, actGrad.getDevData(), input.getDevData(), (float**)arrayPtr,
tag_size, numCases, casePerThread, tag_width,
channels, numFilters,
img_threads_x, img_threads_y,
imgSizeX, imgSizeY, imgPixels);
// double sum_ag = actGrad.sum();
// double sum_ig = input.sum();
//double sum = tempMatrix[ind_coeff].sum();
//printf(" kMicroConvWeightGrad sum %f \n", sum);
//printf(" kMicroConvWeightGrad sum %f sum_ag %f sum_ig %f \n", sum, sum_ag, sum_ig);
////debug
// printf("kMicroConvWeightGrad end \n");
cutilCheckMsg("kMicroConvWeightGrad: Kernel execution failed");
}
//-------------------------------------------------------------
//API VectFunc
//-------------------------------------------------------------
void computeVectFuncAct(NVMatrix& input, NVMatrix& target, vector<double>& param, int sizeV, int sizeH, int channels)
{
//printf("\n kVectFuncAct start*** \n");
assert(sizeV <= 4 || sizeV == 6 || sizeV == 8 || sizeV == 12 || sizeV == 16);
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
int out_width = inp_width;
int out_height = (inp_height*sizeH)/sizeV;
int numCases = out_width;
int numPixelsPerGroup = inp_height/channels;
int numColors = channels/sizeV;
if (target.getNumCols() != out_width || target.getNumRows() != out_height) {
// printf("**resize out_height %i out_width %i \n",out_height, out_width);
target.resize(out_height, out_width);
}
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
hipMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, hipMemcpyHostToDevice);
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
::min(NUM_BLOCKS_MAX, DIVUP(numPixelsPerGroup, ELTWISE_THREADS_Y)));
// float sumi = input.sum();
// printf("sumi %f \n", sumi);
// printf("blocks.x %i blocks.y %i threads.x %i threads.y %i numColors %i \n",blocks.x, blocks.y, threads.x, threads.y, numColors);
// printf("inp_height %i numPixelsPerGroup %i out_width %i out_height %i sizeV %i \n",inp_height, numPixelsPerGroup,out_width,out_height,sizeV);
// printf("sizeV %i sizeH %i strides %i %i \n", sizeV, sizeH, input.getStride(), target.getStride());
////debug
// hipMemset(target.getDevData(), 0, out_height*out_width*sizeof(float));
//
// singletonTempMem.allocFloatElement(input.getNumCols()*input.getNumRows());
// singletonTempMem.allocFloatElement(out_height*out_width);
// float* tempHostInput = singletonTempMem.getPtr(0);
// float* tempHostTarget = singletonTempMem.getPtr(1);
// hipMemcpy(tempHostInput, input.getDevData(), input.getNumCols()*input.getNumRows()*sizeof(float), hipMemcpyDeviceToHost);
// hipDeviceSynchronize();
//
// double sum_inp = Sum(tempHostInput, input.getNumCols()*input.getNumRows());
// printf("sum_inp %f \n", sum_inp);
//
// double sum_host =0;
// memset(tempHostTarget, 0, out_height*out_width*sizeof(float));
// debugVectFuncAct(sizeV, temp, tempHostInput, tempHostTarget,
// numPixelsPerGroup, numCases, input.getStride(), target.getStride(), numColors, sizeH);
//
// sum_host = Sum(tempHostTarget, out_height*out_width);
//
// printf(" debugVectFuncAct sum %f \n", sum_host);
//
// singletonTempMem.reset();
#define ELT_ACT(SIZE_ARR) \
if(sizeV == SIZE_ARR){\
hipFuncSetCacheConfig(kVectFuncAct<SIZE_ARR>, hipFuncCachePreferL1);\
hipLaunchKernelGGL(( kVectFuncAct<SIZE_ARR>), dim3(blocks), dim3(threads), 0, 0, input.getDevData(),\
target.getDevData(), numPixelsPerGroup, numCases, input.getStride(), target.getStride(), numColors, sizeH);};
ELT_ACT(1)
ELT_ACT(2)
ELT_ACT(3)
ELT_ACT(4)
ELT_ACT(6)
ELT_ACT(8)
ELT_ACT(12)
ELT_ACT(16)
#undef ELT_ACT
// float sumt = target.sum();
// printf("kVectFuncAct sumt %f \n", sumt);
//printf("kVectFuncAct end \n");
cutilCheckMsg("kVectFuncAct: Kernel execution failed");
}
void computeVectFuncGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& target,
vector<double>& param, int sizeV, int sizeH, int channels)
{
assert(sizeV <= 4 || sizeV == 6 || sizeV == 8 || sizeV == 12 || sizeV == 16);
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
if (target.getNumCols() != inp_width || target.getNumRows() != inp_height) {
target.resize(inp_height, inp_width);
}
int out_width = inp_width;
//int out_height = (inp_height*sizeH)/sizeV;
int numCases = out_width;
int numPixelsPerGroup = inp_height/channels;
int numColors = channels/sizeV;
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
hipMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, hipMemcpyHostToDevice);
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
::min(NUM_BLOCKS_MAX, DIVUP(numPixelsPerGroup, ELTWISE_THREADS_Y)));
//printf("kVectFuncGrad start ************************\n");
//printf("blocks.x %i blocks.y %i threads.x %i threads.y %i \n",
// blocks.x, blocks.y, threads.x, threads.y);
//printf("numPixelsPerGroup %i numCases %i numColors %i out_width %i out_height %i\n",
// numPixelsPerGroup, numCases, numColors, out_width, out_height);
//singletonTempMem.allocFloatElement(input.getNumCols()*input.getNumRows());
//singletonTempMem.allocFloatElement(inp_height*inp_width);
//singletonTempMem.allocFloatElement(actGrad.getNumCols()*actGrad.getNumRows());
//singletonTempMem.allocFloatElement(inp_height*inp_width);
//float* tempHostInput = singletonTempMem.getPtr(0);
//float* tempHostTarget = singletonTempMem.getPtr(1);
//float* tempHostActGrad = singletonTempMem.getPtr(2);
//float* tempHostTarget1 = singletonTempMem.getPtr(3);
//hipMemcpy(tempHostInput, input.getDevData(), input.getNumCols()*input.getNumRows()*sizeof(float), hipMemcpyDeviceToHost);
//hipMemcpy(tempHostActGrad, actGrad.getDevData(), actGrad.getNumCols()*actGrad.getNumRows()*sizeof(float), hipMemcpyDeviceToHost);
//hipDeviceSynchronize();
//debugVectFuncGrad(sizeV, temp, tempHostActGrad,
// tempHostInput, tempHostTarget, tempHostTarget1, numPixelsPerGroup, numCases,
// input.getStride(), actGrad.getStride(), numColors, sizeH);
//double sum_host = Sum(tempHostTarget, inp_height*inp_width);
//double sum_host1 = Sum(tempHostTarget1, inp_height*inp_width);
//printf(" debugVectFuncAct sum %f sum1 %f \n", sum_host, sum_host1);
//singletonTempMem.reset();
#define ELT_GRAD(SIZE_ARR) \
if(sizeV == SIZE_ARR){\
hipFuncSetCacheConfig(kVectFuncGrad<SIZE_ARR>, hipFuncCachePreferL1);\
hipLaunchKernelGGL(( kVectFuncGrad<SIZE_ARR>), dim3(blocks), dim3(threads), 0, 0, actGrad.getDevData(),\
input.getDevData(), target.getDevData(), numPixelsPerGroup, numCases,\
input.getStride(), actGrad.getStride(), numColors, sizeH);};
ELT_GRAD(1)
ELT_GRAD(2)
ELT_GRAD(3)
ELT_GRAD(4)
ELT_GRAD(6)
ELT_GRAD(8)
ELT_GRAD(12)
ELT_GRAD(16)
#undef ELT_GRAD
//float sumt = target.sum();
//printf("kVectFuncGrad sum_tag %f \n", sumt);
cutilCheckMsg("kVectFuncGrad: Kernel execution failed");
};
void computeVectFuncWeightGrad(NVMatrix& actGrad, NVMatrix& input,
vector<NVMatrix>& tempMatrix,
void* arrayPtr,
vector<double>& param, int sizeV, int sizeH, int channels)
{
assert(sizeV <= 4 || sizeV == 6 || sizeV == 8 || sizeV == 12 || sizeV == 16);
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
int out_width = inp_width;
//int out_height = (inp_height*sizeH)/sizeV;
int numCases = out_width;
int numPixelsPerGroup = inp_height/channels;
int numColors = channels/sizeV;
#define N_SUM 1
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),//reduce
::min(NUM_BLOCKS_MAX, (int)DIVUP(numPixelsPerGroup/N_SUM, ELTWISE_THREADS_Y)));
#undef N_SUM
int shared_size = sizeV*(sizeH+1)*threads.x*threads.y*sizeof(float);
int tag_width = blocks.x*threads.x; //could be reduced
int tag_height = blocks.y*threads.y;//could be reduced
int tag_size = tag_width*tag_height;
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
hipMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, hipMemcpyHostToDevice);
float* tempMatrixPtr[CONST_AREA_SIZE];
for(int i =0; i < tempMatrix.size(); i++)
{
if (tempMatrix[i].getNumCols() != tag_width || tempMatrix[i].getNumRows() != tag_height) {
tempMatrix[i].resize(tag_height, tag_width);
}
tempMatrixPtr[i] = tempMatrix[i].getDevData();
}
hipMemcpy(arrayPtr, tempMatrixPtr, sizeof(float*)*tempMatrix.size(), hipMemcpyHostToDevice);
//for(int i =0; i < tempMatrix.size(); i++)
//{
// hipMemset(tempMatrix[i].getDevData(), 0, tag_size*sizeof(float));
//}
//----------
//printf("kVectFuncParamWeightGrad start ************************\n");
//printf("blocks.x %i blocks.y %i threads.x %i threads.y %i shared_size %i \n",
// blocks.x, blocks.y, threads.x, threads.y, shared_size);
//printf("numPixelsPerGroup %i numCases %i numColors %i out_width %i out_height %i\n",
// numPixelsPerGroup, numCases, numColors, out_width, out_height);
////float sumi = input.sum();
////printf("sumi %f \n", sumi);
//printf( "tempMatrix.size() %i tag_width %i tag_height %i actGrad %i %i tempMatrix[0].getStride() %i \n",
// tempMatrix.size(), tag_width, tag_height, actGrad.getNumCols(), actGrad.getNumRows(), tempMatrix[0].getStride());
//
//singletonTempMem.allocFloatElement(input.getNumCols()*input.getNumRows());
//singletonTempMem.allocFloatElement(max(tag_height*tag_width, out_height*out_width));
//singletonTempMem.allocFloatElement(actGrad.getNumCols()*actGrad.getNumRows());
//float* tempHostInput = singletonTempMem.getPtr(0);
//float* tempHostTarget = singletonTempMem.getPtr(1);
//float* tempHostActGrad = singletonTempMem.getPtr(2);
//hipMemcpy(tempHostInput, input.getDevData(), input.getNumCols()*input.getNumRows()*sizeof(float), hipMemcpyDeviceToHost);
//hipMemcpy(tempHostActGrad, actGrad.getDevData(), actGrad.getNumCols()*actGrad.getNumRows()*sizeof(float), hipMemcpyDeviceToHost);
//hipDeviceSynchronize();
//debugVectFuncParamWeightGrad(sizeV, temp, blocks.y, threads.y, blocks.x, threads.x,
// tempHostActGrad, tempHostInput, tempHostTarget, numColors, tag_size, numPixelsPerGroup, numCases,
// input.getStride(), actGrad.getStride(), tempMatrix[0].getStride(), sizeH);
//double sum_host = Sum(tempHostTarget, tag_height*tag_width);
// double sum_act = Sum(tempHostActGrad, actGrad.getNumCols()*actGrad.getNumRows());
//singletonTempMem.reset();
//float suma = actGrad.sum();
//printf("debugVectFuncParamWeightGrad******* sum_host %f sum_act %f suma %f\n", sum_host, sum_act, suma);
//debugVectFuncLinApprox(sizeV, temp, tempHostInput,
// tempHostActGrad, tempHostTarget,
// numPixelsPerGroup, numCases,
// input.getStride(), tempMatrix[0].getStride(), numColors, sizeH);
//float delta = 1e-4;
//float sumLA0 = Sum(tempHostTarget, out_height*out_width);
//temp[1] += delta;
//debugVectFuncLinApprox(sizeV, temp, tempHostInput,
// tempHostActGrad, tempHostTarget,
// numPixelsPerGroup, numCases,
// input.getStride(), tempMatrix[0].getStride(), numColors, sizeH);
//float sumLA1 = Sum(tempHostTarget, out_height*out_width);
//printf("debugVectFunc * s0 %f s1 %f deriv %f\n", sumLA0, sumLA1, (sumLA1-sumLA0)/delta);
//----------
#define ELT_GRAD(SIZE_ARR) \
if(sizeV == SIZE_ARR){\
hipFuncSetCacheConfig(kVectFuncParamWeightGrad<SIZE_ARR>, hipFuncCachePreferL1);\
hipLaunchKernelGGL(( kVectFuncParamWeightGrad<SIZE_ARR>), dim3(blocks), dim3(threads), shared_size, 0, actGrad.getDevData(),\
input.getDevData(), (float**)arrayPtr, numColors, tag_size, numPixelsPerGroup, numCases,\
input.getStride(), actGrad.getStride(), tempMatrix[0].getStride(), sizeH);};
ELT_GRAD(1)
ELT_GRAD(2)
ELT_GRAD(3)
ELT_GRAD(4)
ELT_GRAD(6)
ELT_GRAD(8)
ELT_GRAD(12)
ELT_GRAD(16)
#undef ELT_GRAD
//float sumt = tempMatrix[1].sum();
//printf("kVectFuncParamWeightGrad sum_tag %f \n", sumt);
cutilCheckMsg("kVectFuncParamWeightGrad: Kernel execution failed");
} | 29b649bc646f3991225785dad5639e5452d612df.cu |
#include <assert.h>
#include <layer_kernels.cuh>
//-------------------------------------------------------------
//EltwiseMax
//-------------------------------------------------------------
template <int B_X, bool add>
__global__ void kEltwiseMaxGrad(float* actGrad, float* input, float* output, float* target,
const int numElements) {
for (int i = B_X * blockIdx.x + threadIdx.x; i < numElements; i += B_X * gridDim.x) {
if (add) {
target[i] += actGrad[i] * (output[i] == input[i]);
} else {
target[i] = actGrad[i] * (output[i] == input[i]);
}
}
}
#include "tt.h"
#define CONST_AREA_SIZE 256
__device__ __constant__ float const_area[CONST_AREA_SIZE];
//-------------------------------------------------------------
//EltwiseFunc
//-------------------------------------------------------------
__device__ inline float Switch(float s, float C)
{
//return fminf(fmaxf(s*C, -.5), .5);
return (s>0)*.5 - (s<0)*.5;
}
__device__ inline float Median3(float a, float b, float c)
{
return fmaxf(fminf(a,b), fmaxf(max(a,b),c));
}
//#define MIX_F
template <int sizeArr>
__global__ void kEltwiseFuncAct(const float* input, float* const target,
const uint imgInPixels, const uint numCases,
const uint strideInp, const uint strideTag,
const int numPixelsPerChannel,
const float Csw, const float Bsw,
const uint sizeIn, const uint sizeOut) {
const int numPixelsPerGroup = imgInPixels/sizeIn;
// dim3 blocks(std::min(NUM_BLOCKS_MAX, DIVUP(out_width, ELTWISE_THREADS_X)),
// std::min(NUM_BLOCKS_MAX, DIVUP(numPixelsPerGroup, ELTWISE_THREADS_Y)));
const uint idxX = blockIdx.x * blockDim.x + threadIdx.x;
const uint idxY = blockIdx.y * blockDim.y + threadIdx.y;
#ifdef MIX_F
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
const int sw_len = sizeIn*ELWISE_FUNC_SEC;
// ix, iy == 0 almost always
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y*blockDim.y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x*blockDim.x) {
float inpVal[sizeArr];//use shared instead?
float v_sw =0;
for (uint inp_i = 0; inp_i < sizeIn; inp_i++) {
#ifdef MIX_F
int inp_off = hiID*sizeIn*numPixelsPerChannel*strideInp
+ inp_i*numPixelsPerChannel*strideInp + pixelChannelID*strideInp + x;
#else
int inp_off = inp_i*numPixelsPerGroup*strideInp + y*strideInp + x;
#endif
float val = input[inp_off];
inpVal[inp_i] = val;
v_sw += (val>0) - (val<0);
}
float Sw = Switch(v_sw + Bsw, Csw);
//float v_sw = Median3(inpVal[0],inpVal[1],inpVal[2]);
for (uint out_i = 0; out_i < sizeOut; out_i++) {
int out_par = out_i*EL_SWITCH*sizeIn*ELWISE_FUNC_SEC;
float sum = 0;
for (uint inp_i = 0; inp_i < sizeIn; inp_i++)
{
float val = inpVal[inp_i];
float param = const_area[out_par + inp_i];
float paramM = const_area[out_par + sizeIn + inp_i];
float paramB = const_area[out_par + 2*sizeIn + inp_i];
float output = param*val + paramM*fmax(val+paramB, 0);
float param_1 = const_area[out_par + inp_i+sw_len];
float paramM_1 = const_area[out_par + sizeIn + inp_i+sw_len];
float paramB_1 = const_area[out_par + 2*sizeIn + inp_i+sw_len];
float output_1 = param_1*val + paramM_1*fmax(-val+paramB_1, 0);
sum += Sw*(output - output_1) + .5*(output + output_1);
}// inp_i
int tag_off = out_i*numPixelsPerGroup*strideTag + y*strideTag + x;
//target[tag_off] = Switch(v_sw+Bsw, Csw)*(output - output_1) + .5*(output + output_1);
target[tag_off] = sum;
}//out_i
}
}
}
template <int sizeArr>
__global__ void kEltwiseFuncGrad(const float* actGrad, const float* input, float* const target,
const uint imgInPixels, const uint numCases,
const uint strideInp, const uint strideOut,
const int numPixelsPerChannel,
const float Csw, const float Bsw,
const uint sizeIn, const uint sizeOut) {
const int numPixelsPerGroup = imgInPixels/sizeIn;
const int outStep = strideOut*numPixelsPerGroup;
const uint idxX = blockIdx.x * blockDim.x + threadIdx.x;
const uint idxY = blockIdx.y * blockDim.y + threadIdx.y;
#ifdef MIX_F
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
const int sw_len = sizeIn*ELWISE_FUNC_SEC;
//with no N_SUM ix, iy == 0 almost always
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y*blockDim.y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x*blockDim.x) {
float grad_next[sizeArr];
int act_off = y*strideInp + x;
#ifdef MIX_F
int inp_off = hiID*sizeIn*numPixelsPerChannel*strideInp
+ pixelChannelID*strideInp + x;
#define strideInpStep strideInp*numPixelsPerChannel
#else
#define inp_off act_off
#define strideInpStep strideInp*numPixelsPerGroup
#endif
for (uint out_i = 0; out_i < sizeOut; out_i++)
{
grad_next[out_i] = actGrad[act_off + outStep*out_i];
}//out_i
//debug
float inpArr[3];
float v_sw =0;
for (uint inp_i = 0; inp_i < sizeIn; inp_i++)
{
float val = input[inp_off + inp_i*strideInpStep];
inpArr[inp_i] = val;
v_sw += (val>0) - (val<0);
}
//float v_sw = Median3(inpArr[0],inpArr[1],inpArr[2]);
float Sw = Switch(v_sw+Bsw, Csw);
for (uint inp_i = 0; inp_i < sizeIn; inp_i++) {
float val = inpArr[inp_i];
float sum_grad = 0;
for (uint out_i = 0; out_i < sizeOut; out_i++)
{
int out_par = out_i*EL_SWITCH*sizeIn*ELWISE_FUNC_SEC;
float vsign_0 = (val + const_area[out_par + 2*sizeIn + inp_i] > 0);
float vsign_1 = (-val + const_area[out_par + sw_len + 2*sizeIn + inp_i] > 0);
float c_0 = vsign_0*const_area[out_par + sizeIn + inp_i] + const_area[out_par + inp_i];
float c_1 = -vsign_1*const_area[out_par + sw_len + sizeIn + inp_i] + const_area[out_par + sw_len + inp_i];
sum_grad += grad_next[out_i]*((Sw+.5)*c_0 + (.5-Sw)*c_1);
//+(v_sw+Bsw > -InvCsw && v_sw+Bsw < InvCsw)*Csw*(c_0-c_1));
}
#ifdef MIX_F
target[inp_off + inp_i*numPixelsPerChannel*strideInp] = sum_grad;
#else
target[inp_off + inp_i*strideInpStep] = sum_grad;
#endif
}//inp_i
}//ix
}//iy
}
//---------------
template <int B_X, int B_Y, int sizeOut, int sizeIn>
__global__ void kEltwiseFuncParamWeightGrad(float* actGrad, float* input, float** target,
const uint imgInPixels, const uint numCases,
const uint stride, const uint strideTag,
const uint numPixelsPerChannel,
const float Csw, const float Bsw)
{
const int numPixelsPerGroup = imgInPixels/sizeIn;
#ifdef MIX_F
const int groupStride = numPixelsPerChannel*stride;
#else
const int groupStride = numPixelsPerGroup*stride;
#endif
const int sw_len = sizeIn*ELWISE_FUNC_SEC;
const uint idxX = blockIdx.x * B_X + threadIdx.x;
const uint idxY = blockIdx.y * B_Y + threadIdx.y;
for(int pout = 0; pout < sizeOut; pout++)
{
//debug
float sum[2*sizeIn];
float sum_m[2*sizeIn];
float sum_b[2*sizeIn];
memset(sum, 0, sizeof(sum));
memset(sum_m, 0, sizeof(sum_m));
memset(sum_b, 0, sizeof(sum_b));
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y * B_Y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x * B_X) {
int offset_act = y * stride + x;
float InArr[sizeIn];
float v_sw = 0;
for(int pin = 0; pin < sizeIn; pin++)
{
#ifdef MIX_F
int offset_in = hiID*sizeIn*groupStride
+ pin*groupStride + pixelChannelID*stride + x;
#else
int offset_in = offset_act + pin*groupStride;
#endif
float val = input[offset_in];
InArr[pin] = val;
v_sw += (val>0) - (val<0);
}
float Sw = Switch(v_sw+ Bsw, Csw);
//float v_sw = Median3(InArr[0],InArr[1],InArr[2]);
float grad_next = actGrad[offset_act + pout*groupStride];
for(int pin = 0; pin < sizeIn; pin++)
{
float in_val = InArr[pin];
float val_m_0 = in_val + const_area[pout*sizeIn*ELWISE_FUNC_SEC + 2*sizeIn + pin];
sum[pin] += (.5+Sw)*grad_next*in_val;
sum_m[pin] += (.5+Sw)*grad_next*(val_m_0 > 0)*in_val;
sum_b[pin] += (.5+Sw)*grad_next*const_area[pout*sizeIn*ELWISE_FUNC_SEC + sizeIn + pin]*(val_m_0 > 0);
float val_m_1 = in_val + const_area[pout*sizeIn*ELWISE_FUNC_SEC + 2*sizeIn + pin + sw_len];
sum[pin + sizeIn] += (.5-Sw)*grad_next*in_val;
sum_m[pin + sizeIn] += -(.5-Sw)*grad_next*(val_m_1 < 0)*in_val;
sum_b[pin + sizeIn] += (.5-Sw)*grad_next*const_area[pout*sizeIn*ELWISE_FUNC_SEC + sizeIn + pin + sw_len]*(val_m_1 > 0);
}
}
}
const int tagOffset = (threadIdx.x + blockIdx.x*blockDim.x) + (threadIdx.y + blockIdx.y*blockDim.y)*strideTag;
for(int pin = 0; pin < sizeIn; pin++)
{
int out_par = pout*EL_SWITCH*sizeIn*ELWISE_FUNC_SEC;
target[out_par + pin][tagOffset] = sum[pin];
target[out_par + sizeIn + pin][tagOffset] = sum_m[pin];
target[out_par + 2*sizeIn + pin][tagOffset] = sum_b[pin];
target[out_par + pin + sw_len][tagOffset] = sum[pin + sizeIn];
target[out_par + sizeIn + pin + sw_len][tagOffset] = sum_m[pin + sizeIn];
target[out_par + 2*sizeIn + pin + sw_len][tagOffset] = sum_b[pin + sizeIn];
}
}
}
//---------------------------------------
template <int sizeIn>
__global__ void kEltwiseFuncBCWeightGrad(const float* input, const float* actGrad, float* const tagC, float* const tagB,
const uint imgInPixels, const uint numCases,
const uint strideInp, const uint strideTag,
const int numPixelsPerChannel,
const float Csw, const float Lim, const float Bsw,
const uint sizeOut) {
const int numPixelsPerGroup = imgInPixels/sizeIn;
// dim3 blocks(std::min(NUM_BLOCKS_MAX, DIVUP(out_width, ELTWISE_THREADS_X)),
// std::min(NUM_BLOCKS_MAX, DIVUP(numPixelsPerGroup, ELTWISE_THREADS_Y)));
const uint idxX = blockIdx.x * blockDim.x + threadIdx.x;
const uint idxY = blockIdx.y * blockDim.y + threadIdx.y;
#ifdef MIX_F
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
const int sw_len = sizeIn*ELWISE_FUNC_SEC;
// ix, iy == 0 almost always
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y*blockDim.y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x*blockDim.x) {
float inpVal[sizeIn];//use shared instead?
float v_sw =0;
for (uint inp_i = 0; inp_i < sizeIn; inp_i++) {
#ifdef MIX_F
int inp_off = hiID*sizeIn*numPixelsPerChannel*strideInp
+ inp_i*numPixelsPerChannel*strideInp + pixelChannelID*strideInp + x;
#else
int inp_off = inp_i*numPixelsPerGroup*strideInp + y*strideInp + x;
#endif
float val = input[inp_off];
inpVal[inp_i] = val;
v_sw += val;
}
//float v_sw = Median3(inpVal[0],inpVal[1],inpVal[2]);
for (uint out_i = 0; out_i < sizeOut; out_i++) {
int out_par = out_i*sizeIn*EL_SWITCH*ELWISE_FUNC_SEC;
float output = 0;
float output_1 = 0;
float gradNext = actGrad[ y * strideInp + x + out_i*numPixelsPerGroup*strideInp];
for (uint inp_i = 0; inp_i < sizeIn; inp_i++)
{
float val = inpVal[inp_i];
{
float param = const_area[out_par + inp_i];
float paramM = const_area[out_par + sizeIn + inp_i];
float paramB = const_area[out_par + 2*sizeIn + inp_i];
output += param*val + paramM*fmax(val+paramB, 0);
}
{
float param = const_area[out_par + inp_i+sw_len];
float paramM = const_area[out_par + sizeIn + inp_i+sw_len];
float paramB = const_area[out_par + 2*sizeIn + inp_i+sw_len];
output_1 += param*val + paramM*fmax(val+paramB, 0);
}
}// inp_i
int tag_off = out_i*numPixelsPerGroup*strideTag + y*strideTag + x;
float v_b = v_sw + Bsw;//Csw*(v_sw + Bsw);
// tagC[tag_off] = gradNext*(output - output_1)*(v_b > -invC && v_b < invC)*v_sw;
//tagB[tag_off] = gradNext*(output - output_1)*(v_b > -.5 && v_b < .5);
tagB[tag_off] = gradNext*(output - output_1)*(v_b > -Lim && v_b < Lim);
}//out_i
}
}
}
//----------------------------
template <int B_X, int B_Y, int sizeIn>
__global__ void kEltwiseFuncGroupTest(float* actGrad, float* input, float** target,
const uint imgInPixels, const uint numCases,
const uint stride, const uint strideTag,
const uint numPixelsPerChannel,
const uint cnttest)
{
const int numPixelsPerGroup = imgInPixels/sizeIn;
#ifdef MIX_F
const int groupStride = numPixelsPerChannel*stride;
#else
const int groupStride = numPixelsPerGroup*stride;
#endif
const uint idxX = blockIdx.x * B_X + threadIdx.x;
const uint idxY = blockIdx.y * B_Y + threadIdx.y;
const int tagOffset = (threadIdx.x + blockIdx.x*blockDim.x) + (threadIdx.y + blockIdx.y*blockDim.y)*strideTag;
float sum[sizeIn];
memset(sum, 0, sizeof(sum));
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y * B_Y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x * B_X) {
#ifdef MIX_F
int offset_in = hiID*sizeIn*groupStride
+ pixelChannelID*stride + x;
#else
int offset_in = y * stride + x;
#endif
float v0 = input[offset_in + cnttest*groupStride];
sum[0] += fabs(v0);
int ks =0;
for(int pin = 0; pin < sizeIn; pin++)
{
if(pin == cnttest)
continue;
float in_val = input[offset_in + pin*groupStride];
sum[1+ks] += fabs(in_val -v0);
ks++;
}
}
}
for(int pin = 0; pin < sizeIn; pin++)
{
target[pin][tagOffset] = sum[pin];
}
}
template <int B_X, int B_Y, int sizeIn>
__global__ void kNormalizeParam(float* input, float** target,
const uint imgInPixels, const uint numCases,
const uint stride, const uint strideTag,
const uint numPixelsPerChannel)
{
const int numPixelsPerGroup = imgInPixels/sizeIn;
#ifdef MIX_F
const int groupStride = numPixelsPerChannel*stride;
#else
const int groupStride = numPixelsPerGroup*stride;
#endif
const uint idxX = blockIdx.x * B_X + threadIdx.x;
const uint idxY = blockIdx.y * B_Y + threadIdx.y;
const int tagOffset = (threadIdx.x + blockIdx.x*blockDim.x) + (threadIdx.y + blockIdx.y*blockDim.y)*strideTag;
float sum2[sizeIn];
memset(sum2, 0, sizeof(sum2));
float sum_pair[(sizeIn-1)*sizeIn/2];
memset(sum_pair, 0, sizeof(sum_pair));
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y * B_Y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x * B_X) {
#ifdef MIX_F
int offset_in = hiID*sizeIn*groupStride
+ pixelChannelID*stride + x;
#else
int offset_in = y * stride + x;
#endif
float inp[sizeIn];
for(int pin_i = 0; pin_i < sizeIn; pin_i++)
{
inp[pin_i] = input[offset_in + pin_i*groupStride];
sum2[pin_i] += inp[pin_i]*inp[pin_i];
}
int ks =0;
for(int pin_i = 0; pin_i < sizeIn; pin_i++)
{
for(int pin_j = 0; pin_j < sizeIn; pin_j++)
{
if(pin_i == pin_j)
continue;
sum_pair[ks] += inp[pin_i]*inp[pin_j];
ks++;
}
}
}
}
for(int pin = 0; pin < sizeIn; pin++)
{
target[pin][tagOffset] = sum2[pin];
}
for(int ks = 0; ks < sizeIn*(sizeIn-1)/2; ks++)
{
target[sizeIn+ks][tagOffset] = sum_pair[ks];
}
}
template <int B_X, int B_Y, int sizeIn>
__global__ void kEltwiseFuncGroupTestS(float* actGrad, float* input, float** target,
const uint imgInPixels, const uint numCases,
const uint stride, const uint strideTag,
const uint numPixelsPerChannel,
const uint cnttest)
{
const int numPixelsPerGroup = imgInPixels/sizeIn;
#ifdef MIX_F
const int groupStride = numPixelsPerChannel*stride;
#else
const int groupStride = numPixelsPerGroup*stride;
#endif
const uint idxX = blockIdx.x * B_X + threadIdx.x;
const uint idxY = blockIdx.y * B_Y + threadIdx.y;
const int tagOffset = (threadIdx.x + blockIdx.x*blockDim.x) + (threadIdx.y + blockIdx.y*blockDim.y)*strideTag;
float sum_1[sizeIn];
memset(sum_1, 0, sizeof(sum_1));
for (uint y = idxY; y < numPixelsPerGroup; y += gridDim.y * B_Y) {
#ifdef MIX_F
const int hiID = y/numPixelsPerChannel;
const int pixelChannelID = idxY%numPixelsPerChannel;
#endif
for (uint x = idxX; x < numCases; x += gridDim.x * B_X) {
#ifdef MIX_F
int offset_in = hiID*sizeIn*groupStride
+ pixelChannelID*stride + x;
#else
int offset_in = y * stride + x;
#endif
for(int pin = 0; pin < sizeIn; pin++)
{
float in_val = input[offset_in + pin*groupStride];
sum_1[pin] += in_val;
}
}
}
for(int pin = 0; pin < sizeIn; pin++)
{
target[pin+sizeIn][tagOffset] = sum_1[pin];
}
}
//-------------------------------------------------------------
//MicroConv
//-------------------------------------------------------------
#define SMEM(X, Y, sdata) sdata[(X)*sharedY+(Y) + sOffset]
#define SHARED_MEM(x, y, z, LOBE, getVal, sdata) \
SMEM((LOBE) + sx, (LOBE) + sy, sdata) = getVal(x, y, z);\
if (sx < (LOBE)) {\
SMEM(sx, (LOBE) + sy, sdata) = getVal(max(x - (LOBE), 0), y, z);\
SMEM((LOBE) + bw + sx, (LOBE) + sy, sdata) = getVal(min(x + bw, imgSizeX-1), y, z);\
}\
if (sy < (LOBE)) {\
SMEM((LOBE) + sx, sy, sdata) = getVal(x, max(y - (LOBE), 0), z);\
SMEM((LOBE) + sx, (LOBE) + bh + sy, sdata) = getVal(x, min(y + bh, imgSizeY-1), z);\
}\
if ((sx < (LOBE)) && (sy < (LOBE))) {\
SMEM(sx, sy, sdata) = getVal(max(x - (LOBE), 0), max(y - (LOBE), 0), z);\
SMEM(sx, (LOBE) + bh + sy, sdata) = getVal(max(x - (LOBE), 0), min(y + bh, imgSizeY-1), z);\
SMEM((LOBE) + bw + sx, sy, sdata) = getVal(min(x + bw, imgSizeX-1), max(y - (LOBE), 0), z);\
SMEM((LOBE) + bw + sx, (LOBE) + bh + sy, sdata) = getVal(min(x + bw, imgSizeX-1), min(y + bh, imgSizeY-1), z);\
}
#define getValInput(X, Y, Z) input[channelOffset + (X)*widthyz+(Y)*widthz + (Z)]
#define getValAct(X, Y, Z) actGrad[filterOffset + (X)*widthyz+(Y)*widthz + (Z)]
template < int LOBE, int SIZE_CONV>
__global__ void kMAvgAct(const float* input, float* const target,
const uint numCases, const uint channels, const uint casePerThread,
const uint sharedY, const uint modulesPerBlockX, const uint modulesPerBlockY,
const uint imgSizeX, const uint imgSizeY,
const uint imgPixels, const float scale)
{
extern __shared__ float sdata[];
//order x>y>z, *not* y>x
//const int bsizeX = imgSizeX/modulesPerBlockX;
const int bsizeY = imgSizeY/modulesPerBlockY;
const int startX = (blockIdx.y/bsizeY)*modulesPerBlockX;
const int startY = (blockIdx.y%bsizeY)*modulesPerBlockY;
const int bw = modulesPerBlockX;
const int bh = modulesPerBlockY;
const int sx = threadIdx.y/modulesPerBlockY;
const int sy = threadIdx.y - sx*modulesPerBlockY;
const int ix = sx+startX;
const int iy = sy+startY;
const int widthz = numCases;
const int widthyz = imgSizeY*numCases;
//const int sizeConv2 = SIZE_CONV*SIZE_CONV;
const int sharedY2 = sharedY*sharedY;
//put pragme unroll here
for(int zind = 0; zind < casePerThread; zind++)
{
const int z = threadIdx.x + blockIdx.x*blockDim.x + zind*blockDim.x*gridDim.x;
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int sOffset = channelInd*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int channelOffset = channelInd*imgPixels*numCases;
if(z < numCases)
{
SHARED_MEM(ix, iy, z, LOBE, getValInput, sdata)
}
}
__syncthreads();
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int sOffset = channelInd*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int channelOffset = channelInd*imgPixels*numCases;
if(z < numCases)
{
float sum = 0;
for(int dsx = - LOBE; dsx < LOBE+1; dsx++)
for(int dsy = - LOBE; dsy < LOBE+1; dsy++)
{
int idx = min(max(ix + dsx, 0), imgSizeX-1);
int idy = min(max(iy + dsy, 0), imgSizeY-1);
float sd = sdata[(sx + dsx + LOBE)*sharedY+(sy + dsy + LOBE) + sOffset];
sum += sd;
}
target[channelOffset + ix*widthyz + iy*widthz + z] = scale*sum;
}//if
}//channel
}//zind
}
template < int LOBE, int SIZE_CONV>
__global__ void kMAvgGrad(const float* actGrad, float* const target,
const uint numCases, const uint channels, const uint casePerThread,
const uint sharedY, const uint modulesPerBlockX, const uint modulesPerBlockY,
const uint imgSizeX, const uint imgSizeY,
const uint imgPixels, const float scale)
{
extern __shared__ float sdata[];
//order x>y>z, *not* y>x
//const int bsizeX = imgSizeX/modulesPerBlockX;
const int bsizeY = imgSizeY/modulesPerBlockY;
const int startX = (blockIdx.y/bsizeY)*modulesPerBlockX;
const int startY = (blockIdx.y%bsizeY)*modulesPerBlockY;
const int bw = modulesPerBlockX;
const int bh = modulesPerBlockY;
const int sx = threadIdx.y/modulesPerBlockY;
const int sy = threadIdx.y - sx*modulesPerBlockY;
const int ix = sx+startX;
const int iy = sy+startY;
const int widthz = numCases;
const int widthyz = imgSizeY*numCases;
//const int sizeConv2 = SIZE_CONV*SIZE_CONV;
const int sharedY2 = sharedY*sharedY;
//put pragme unroll here
for(int zind = 0; zind < casePerThread; zind++)
{
const int z = threadIdx.x + blockIdx.x*blockDim.x + zind*blockDim.x*gridDim.x;
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int sOffset = channelInd*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int filterOffset = channelInd*imgPixels*numCases;
if(z < numCases)
{
SHARED_MEM(ix, iy, z, LOBE, getValAct, sdata)
}
}
__syncthreads();
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int sOffset = channelInd*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int channelOffset = channelInd*imgPixels*numCases;
if(z < numCases)
{
float sum = 0;
for(int dsx = - LOBE; dsx < LOBE+1; dsx++)
for(int dsy = - LOBE; dsy < LOBE+1; dsy++)
{
int idx = min(max(ix + dsx, 0), imgSizeX-1);
int idy = min(max(iy + dsy, 0), imgSizeY-1);
float sd = sdata[(sx + dsx + LOBE)*sharedY+(sy + dsy + LOBE) + sOffset];
sum += sd;
}
target[channelOffset + ix*widthyz + iy*widthz + z] = scale*sum;
}//if
}//channel
}//zind
}
template < int LOBE, int SIZE_CONV>
__global__ void kMicroConvFilterAct(const float* input, float* const target,
const uint numCases, const uint channels, const uint numFilters, const uint casePerThread,
const uint sharedY, const uint modulesPerBlockX, const uint modulesPerBlockY,
const uint imgSizeX, const uint imgSizeY,
const uint imgPixels)
{
extern __shared__ float sdata[];
//order x>y>z, *not* y>x
//const int bsizeX = imgSizeX/modulesPerBlockX;
const int bsizeY = imgSizeY/modulesPerBlockY;
const int startX = (blockIdx.y/bsizeY)*modulesPerBlockX;
const int startY = (blockIdx.y%bsizeY)*modulesPerBlockY;
const int bw = modulesPerBlockX;
const int bh = modulesPerBlockY;
const int sx = threadIdx.y/modulesPerBlockY;
const int sy = threadIdx.y - sx*modulesPerBlockY;
const int ix = sx+startX;
const int iy = sy+startY;
const int widthz = numCases;
const int widthyz = imgSizeY*numCases;
const int sizeConv2 = SIZE_CONV*SIZE_CONV;
const int sharedY2 = sharedY*sharedY;
//put pragme unroll here
for(int zind = 0; zind < casePerThread; zind++)
{
const int z = threadIdx.x + blockIdx.x*blockDim.x + zind*blockDim.x*gridDim.x;
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int sOffset = channelInd*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int channelOffset = channelInd*imgPixels*numCases;
if(z < numCases)
{
SHARED_MEM(ix, iy, z, LOBE, getValInput, sdata)
}
}
__syncthreads();
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int sOffset = channelInd*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int channelOffset = channelInd*imgPixels*numCases;
if(z < numCases)
{
for(int filterID = 0; filterID < numFilters; filterID++)
{
float sum = 0;
for(int dsx = - LOBE; dsx < LOBE+1; dsx++)
for(int dsy = - LOBE; dsy < LOBE+1; dsy++)
{
int idx = min(max(ix + dsx, 0), imgSizeX-1);
int idy = min(max(iy + dsy, 0), imgSizeY-1);
float sd = sdata[(sx + dsx + LOBE)*sharedY+(sy + dsy + LOBE) + sOffset];
sum += sd*const_area[channelInd*sizeConv2*numFilters + filterID*sizeConv2 + (dsy + LOBE)*SIZE_CONV +(dsx + LOBE)];
}
target[numFilters*channelOffset + filterID*imgPixels*numCases + ix*widthyz + iy*widthz + z] = sum;
}//filter
}//if
}//channel
}//zind
}
__global__ void kMicroConvActGrad(const float* actGrad, float* const target,
const uint numCases, const uint channels, const uint numFilters, const uint casePerThread,
const uint modulesPerBlockX, const uint modulesPerBlockY,
const uint sharedY, const uint sizeModule, const uint lobe,
const uint imgSizeX, const uint imgSizeY,
const uint imgPixels)
{
extern __shared__ float sdata[];
//order x>y>z, *not* y>x
//const int bsizeX = imgSizeX/modulesPerBlockX;
const int bsizeY = imgSizeY/modulesPerBlockY;
const int startX = (blockIdx.y/bsizeY)*modulesPerBlockX;
const int startY = (blockIdx.y%bsizeY)*modulesPerBlockY;
const int bw = modulesPerBlockX;
const int bh = modulesPerBlockY;
const int sx = threadIdx.y/modulesPerBlockY;
const int sy = threadIdx.y - sx*modulesPerBlockY;
const int ix = sx+startX;
const int iy = sy+startY;
const int widthz = numCases;
const int widthyz = imgSizeY*numCases;
const int sizeModule2 = sizeModule*sizeModule;
const int sharedY2 = sharedY*sharedY;
for(int zind = 0; zind < casePerThread; zind++)
{
const int z = threadIdx.x + blockIdx.x*blockDim.x + zind*blockDim.x*gridDim.x;
//pragma unroll here
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int channelOffset = channelInd*imgPixels*numCases;
//float sum = 0;
for(int filterID = 0; filterID < numFilters; filterID++)
{
const int sOffset = channelInd*numFilters*sharedY2*blockDim.x + filterID*sharedY2*blockDim.x + threadIdx.x*sharedY2;
const int filterOffset = numFilters*channelOffset + filterID*imgPixels*numCases;
SHARED_MEM(ix, iy, z, lobe, getValAct, sdata)
}
}
__syncthreads();
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int channelOffset = channelInd*imgPixels*numCases;
float sum = 0;
for(int filterID = 0; filterID < numFilters; filterID++)
{
//const int sOffset = channelInd*numFilters*sharedY2*blockDim.x + filterID*sharedY2*blockDim.x + threadIdx.x*sharedY2;
//const int filterOffset = numFilters*channelOffset + filterID*imgPixels*numCases;
for(int dsx = - lobe; dsx < lobe+1; dsx++)
for(int dsy = - lobe; dsy < lobe+1; dsy++)
sum += sdata[(sx + dsx + lobe)*sharedY+(sy + dsy + lobe)]
*const_area[filterID*sizeModule2 + (-dsy + lobe)*sizeModule +(-dsx + lobe)];
}
target[channelOffset + ix*widthyz + iy*widthz + z] = sum;
}
}
}
template <int lobe>
__global__ void kMicroConvWeightGrad(const float* actGrad, const float* input, float** const target,
const uint target_size, const uint numCases, const uint casePerThread, const uint tagWidth,
const uint channels, const uint numFilters,
const uint modulesPerBlockX, const uint modulesPerBlockY,
const uint imgSizeX, const uint imgSizeY, const uint imgPixels)
{
//order x>y>z, *not* y>x
extern __shared__ float sdata[];
//const int imgSize = imgSizeX*imgSizeY;
const int sharedY = modulesPerBlockY + 2*lobe;
const int sizeSharedBlock = sharedY*(modulesPerBlockX + 2*lobe);
float* sdataImg = sdata;
float* sdataRes = sdata + sizeSharedBlock*blockDim.x;
//const int bsizeX = imgSizeX/modulesPerBlockX;
const int bsizeY = imgSizeY/modulesPerBlockY;
const int startX = (blockIdx.y/bsizeY)*modulesPerBlockX;
const int startY = (blockIdx.y%bsizeY)*modulesPerBlockY;
const int bw = modulesPerBlockX;
const int bh = modulesPerBlockY;
const int sx = threadIdx.y/modulesPerBlockY;
const int sy = threadIdx.y - sx*modulesPerBlockY;
const int ix = sx+startX;
const int iy = sy+startY;
const int zoff = threadIdx.x + blockIdx.x*blockDim.x;
const int widthz = numCases;
const int widthyz = imgSizeY*numCases;
const int sharedY2 = sharedY*sharedY;
const int conv_size = 2*lobe+1;
const int conv2 = conv_size*conv_size;
int resStride = numFilters*conv2;
int res_off = resStride*(threadIdx.y*blockDim.x + threadIdx.x);
const int sOffset = threadIdx.x*sharedY2;
for(int channelInd = 0; channelInd < channels; channelInd++)
{
const int channelOffset = channelInd*imgPixels*numCases;
memset(sdataRes + res_off, 0, resStride*sizeof(float));
for(int zind = 0; zind < casePerThread; zind++)
{
const int z = zoff + zind*blockDim.x*gridDim.x;
for(int filterID = 0; filterID < numFilters; filterID++)
{
SHARED_MEM(ix, iy, z, lobe, getValInput, sdataImg)
__syncthreads();
for(int dsx = - lobe; dsx < lobe+1; dsx++)
for(int dsy = - lobe; dsy < lobe+1; dsy++)
{
int idx = min(max(ix + dsx, 0), imgSizeX-1);
int idy = min(max(iy + dsy, 0), imgSizeY-1);
const int filterOffset = numFilters*channelOffset + filterID*imgPixels*numCases;
float vact = actGrad[filterOffset + ix*widthyz + iy*widthz + z];
float vimg = sdataImg[(sx + dsx + lobe)*sharedY+(sy + dsy + lobe) + sOffset];
int ind_coeff = filterID*conv2 + (dsy + lobe)*conv_size +(dsx + lobe);
sdataRes[res_off + ind_coeff] += vact*vimg;
}//dsx
}//filter
}//z
for(int isx = 0; isx < conv_size; isx++)
for(int isy = 0; isy < conv_size; isy++)
{
for(int filterID = 0; filterID < numFilters; filterID++)
{
int ind_coeff = filterID*conv2 + isy*conv_size + isx;
int ind_ch = ind_coeff + channelInd*numFilters*conv2;
target[ind_ch][ix*imgSizeX*tagWidth + tagWidth*iy + zoff] = sdataRes[res_off + ind_coeff];
}
}
}//channel
}
//-------------------------------------------------------------
//VectFunc
//-------------------------------------------------------------
#define SCALE_H 1.
template <int sizeV>
__global__ void kVectFuncAct(const float* input, float* const target,
const uint numPixelsPerGroup, const uint numCases,
const uint strideInp, const uint strideTag, int numColors, int sizeH) {
// ix, iy == 0 almost always
const int bd_off = (blockDim.y*blockIdx.y + threadIdx.y)*strideInp + blockDim.x*blockIdx.x + threadIdx.x;
const int pix_stride = numPixelsPerGroup*strideInp;
const int pix_tag_stride = numPixelsPerGroup*strideTag;
for (uint iy = 0; iy < numPixelsPerGroup; iy += gridDim.y*blockDim.y)
{
for (uint ix = 0; ix < numCases; ix += gridDim.x*blockDim.x)
{
int xy_off = iy*strideInp + ix + bd_off;
for (uint color = 0; color < numColors; color ++) {
int color_off = color*pix_stride;
float inpVal[sizeV];//use shared instead?
#pragma unroll
for (uint inp_i = 0; inp_i < sizeV; inp_i++) {
int voff = color_off*sizeV + inp_i*pix_stride + xy_off;
float val = input[voff];
inpVal[inp_i] = val;
}
float vmax= 0;
#pragma unroll
for (uint out_i = 0; out_i < sizeH; out_i++) {
int out_par = out_i*sizeV;
float output = 0;
#pragma unroll
for (uint inp_i = 0; inp_i < sizeV; inp_i++)
{
float param = const_area[out_par + inp_i];
float val = inpVal[inp_i];
output += param*val;
}// inp_i
//suppression filter
//output = fmaxf(output, 0);
vmax = fmaxf(output, vmax);
}//out_i
for (uint out_i = 0; out_i < sizeH; out_i++) {
int out_par = out_i*sizeV;
float output = 0;
#pragma unroll
for (uint inp_i = 0; inp_i < sizeV; inp_i++)
{
float param = const_area[out_par + inp_i];
float val = inpVal[inp_i];
output += param*val;
}// inp_i
//suppression filter
output = fmaxf(output - SCALE_H*(vmax-output), 0);
int toffset = color_off*sizeH + out_i*pix_tag_stride + xy_off;
target[toffset] = output;
}//out_i
}//color
}
}
}
template <int sizeV>
__global__ void kVectFuncGrad(const float* actGrad, const float* input, float* const target,
const uint numPixelsPerGroup, const uint numCases,
const uint strideInp, const uint strideOut,
int numColors, int sizeH) {
const int inStep = strideInp*numPixelsPerGroup;
const int outStep = strideOut*numPixelsPerGroup;
const int btx = blockDim.x*blockIdx.x + threadIdx.x;
const int bty = blockDim.y*blockIdx.y + threadIdx.y;
const int bd_off_in = bty*strideInp + btx;
const int bd_off_out = bty*strideOut + btx;
//with no N_SUM ix, iy == 0 almost always
for (uint iy = 0; iy < numPixelsPerGroup; iy += gridDim.y*blockDim.y) {
for (uint ix = 0; ix < numCases; ix += gridDim.x*blockDim.x) {
int xy_off_in = iy*strideInp + ix + bd_off_in;
int xy_off_out = iy*strideOut + ix + bd_off_out;
for (uint color = 0; color < numColors; color ++) { //optimize away
int v_off = color*inStep*sizeV + xy_off_in;
int out_off = color*outStep*sizeH + xy_off_out;
float vmax = 0;
int kmax = 0;
for (uint out_i = 0; out_i < sizeH; out_i++)
{
float vsum = 0;
for (uint inp_i = 0; inp_i < sizeV; inp_i++) {
int inp_offset = v_off + inp_i*inStep;
vsum += input[inp_offset]*const_area[out_i*sizeV + inp_i];
}
if(vsum > vmax)
{
vmax = vsum;
kmax = out_i;
}
}
float vres[sizeV];
memset(vres, 0, sizeof(vres));
for (uint out_i = 0; out_i < sizeH; out_i++)
{
float output = 0;
for (uint inp_i = 0; inp_i < sizeV; inp_i++) {
int inp_offset = v_off + inp_i*inStep;
output += input[inp_offset]*const_area[out_i*sizeV + inp_i];
}
output = fmaxf(output - SCALE_H*(vmax-output), 0);
if(output > 0)
{
int out_offset = out_i*outStep + out_off;
float grad_next = actGrad[out_offset];
for (uint inp_i = 0; inp_i < sizeV; inp_i++)
vres[inp_i] += grad_next*((1+SCALE_H)*const_area[out_i*sizeV + inp_i] - SCALE_H*const_area[kmax*sizeV + inp_i]);
}
}
for (uint inp_i = 0; inp_i < sizeV; inp_i++)
{
int in_off = inp_i*inStep + v_off;
target[in_off] = vres[inp_i];
}
}//color
}//ix
}//iy
}
template <int sizeV>
__global__ void kVectFuncParamWeightGrad( const float* actGrad, const float* input, float** const target,
const uint numColors,
const uint target_size, const uint numPixelsPerGroup, const uint numCases,
const uint strideInp, const uint strideOut, const uint strideTag, int sizeH)
{
extern __shared__ float sh_mem[];
const int xy_off = threadIdx.y*blockDim.x + threadIdx.x;
const int res_off = xy_off*sizeV*sizeH;
float* resh = sh_mem + sizeV*blockDim.x*blockDim.y + res_off;
float* in_store = sh_mem;
memset(resh, 0, sizeV*sizeH*sizeof(float));
const int btx = blockDim.x*blockIdx.x + threadIdx.x;
const int bty = blockDim.y*blockIdx.y + threadIdx.y;
const int bd_off_in = bty*strideInp + btx;
const int bd_off_out = bty*strideOut + btx;
const int bd_off_tag = bty*strideTag + btx;
const int pix_out_stride = numPixelsPerGroup*strideOut;
const int pix_in_stride = numPixelsPerGroup*strideInp;
for (uint iy = 0; iy < numPixelsPerGroup; iy += gridDim.y*blockDim.y) {
for (uint ix = 0; ix < numCases; ix += gridDim.x*blockDim.x) {
int xy_off_in = iy*strideInp + ix + bd_off_in;
int xy_off_out = iy*strideOut + ix + bd_off_out;
for (uint color = 0; color < numColors; color ++) {
float* inp_val = in_store + xy_off*sizeV;
//float inp_val[sizeV];
for (uint pin = 0; pin < sizeV; pin++)
{
int in_off = color*pix_in_stride*sizeV + pin*pix_in_stride + xy_off_in;
inp_val[pin] = input[in_off];
}
int kmax= 0;
float vmax = 0;
for (uint pout = 0; pout < sizeH; pout++)
{
float vsum = 0;
#pragma unroll
for (uint pin = 0; pin < sizeV; pin++)
{
vsum += inp_val[pin]*const_area[pout*sizeV + pin];
}
if(vsum > vmax)
{
vmax = vsum;
kmax = pout;
};
}//pout
float vres_max[sizeV];
memset(vres_max, 0, sizeof(vres_max));
for (uint pout = 0; pout < sizeH; pout++)
{
float* vres = resh + sizeV*pout;
int out_off = color*pix_out_stride*sizeH + pout*pix_out_stride + xy_off_out;
float grad_next = actGrad[out_off];
float output = 0;
#pragma unroll
for (uint pin = 0; pin < sizeV; pin++)
{
output += inp_val[pin]*const_area[pout*sizeV + pin];
}
output = fmaxf(output - SCALE_H*(vmax-output), 0);
if(output > 0)
{
for (uint pin = 0; pin < sizeV; pin++)
{
vres[pin] += grad_next*(1+SCALE_H)*inp_val[pin];
vres_max[pin] += - SCALE_H*grad_next*inp_val[pin];
}
}//vsum
}//pout
#pragma unroll
for (uint pin = 0; pin < sizeV; pin++)
{
resh[kmax*sizeV + pin] += vres_max[pin];
}
}//color
}//ix
}//iy
for (uint pout = 0; pout < sizeH; pout++)
#pragma unroll
for (uint pin = 0; pin < sizeV; pin++)
{
target[pout*sizeV+pin][bd_off_tag] = resh[pout*sizeV+pin];
}
}
//*************************************************************************************
//-------------------------------------------------------------
//API EltwiseMax
//-------------------------------------------------------------
void computeEltwiseMaxGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& output, NVMatrix& target, bool add) {
assert(actGrad.isContiguous());
assert(output.isContiguous());
assert(input.isContiguous());
assert(actGrad.isSameDims(input));
assert(actGrad.isSameDims(output));
dim3 blocks(DIVUP(actGrad.getNumElements(), 128));
dim3 threads(128);
if (add) {
assert(actGrad.isSameDims(target));
cudaFuncSetCacheConfig(kEltwiseMaxGrad<128, true>, cudaFuncCachePreferL1);
kEltwiseMaxGrad<128, true><<<blocks, threads>>>(actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements());
} else {
target.resize(actGrad);
cudaFuncSetCacheConfig(kEltwiseMaxGrad<128, false>, cudaFuncCachePreferL1);
kEltwiseMaxGrad<128, false><<<blocks, threads>>>(actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements());
}
cutilCheckMsg("computeEltwiseMaxGrad: Kernel execution failed");
}
//-------------------------------------------------------------
//API EltwiseFunc
//-------------------------------------------------------------
void computeEltwiseFuncAct(NVMatrix& input, NVMatrix& target, vector<double>& param, int channels, int size_in, int size_out)
{
assert(size_in <= 4);
//int height = input.getFollowingDim(), width = input.getLeadingDim();
//int numCases = input.getNumCols();
//int numIn = input.getNumRows();
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
int out_width = inp_width;
int out_height = (inp_height*size_out)/size_in;
//printf("computeEltwiseFuncAct inp_height %i inp_width %i tran %i \n",inp_height, inp_width, input.isTrans());
//printf(" size_in %i size_out %i \n", size_in, size_out);
//printf(" out_height %i out_width %i \n",out_height, out_width);
if (target.getNumCols() != out_width || target.getNumRows() != out_height) {
target.resize(out_height, out_width);
//printf("**resize out_height %i out_width %i \n",out_height, out_width);
}
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
cudaMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, cudaMemcpyHostToDevice);
//el switch
float Csw = param[param.size()-2];
float Bsw = param[param.size()-1];
int numPixelsPerGroup = inp_height/size_in;
//int numChannelsPerGroup = channels/size_in;
int numPixelsPerChannel = inp_height/channels;
// printf(" size_in %i size_out %i inp_width %i numPixelsPerChannel %i channels %i \n", size_in, size_out, inp_width, numPixelsPerChannel, channels);
// int numChannelsPerThread = DIVUP(numChannelsPerGroup, ELTWISE_THREADS_Y);
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(std::min(NUM_BLOCKS_MAX, (int)DIVUP(out_width, threads.x)),
std::min(NUM_BLOCKS_MAX, DIVUP(numPixelsPerGroup, ELTWISE_THREADS_Y)));
//debug
//printf("kEltwiseFuncAct -------------\n");
//printf("temp %f %f %f %f %f %f \n", temp[0],temp[1],temp[2],temp[3],temp[4],temp[5]);
//input.nan2zero();
//float sum = input.sum();
//printf(" size_in %i size_out %i sum %f \n", size_in, size_out, sum);
// const int numPixelsPerGroup1 = inp_height/size_in;
// printf(" numPixelsPerGroup %i numPixelsPerGroup1 %i target.getNumRows %i \n", numPixelsPerGroup, numPixelsPerGroup1, target.getNumRows());
// //cudaMemset(target.getDevData(), 0, target.getNumElements()*sizeof(float));
//printf(" target.getStride() %i target.getNumRows() %i target.getNumCols() %i \n", target.getStride(), target.getNumRows(), target.getNumCols());
//kEltwiseFuncAct_t<ELTWISE_THREADS_X, ELTWISE_THREADS_Y, 3><<<blocks, threads>>>(input.getDevData(),
//target.getDevData(), inp_height, inp_width, input.getStride(), target.getStride(), size_in, size_out);
//float sumt0 = target.sum();
//printf("kEltwiseFuncAct_t sumt_0 %f \n", sumt0);
#define ELT_ACT(SIZE_ARR) \
if(size_in == SIZE_ARR){\
cudaFuncSetCacheConfig(kEltwiseFuncAct<SIZE_ARR>, cudaFuncCachePreferL1);\
kEltwiseFuncAct<SIZE_ARR><<<blocks, threads>>>(input.getDevData(),\
target.getDevData(), inp_height, inp_width, input.getStride(), target.getStride(), numPixelsPerChannel,\
Csw, Bsw, size_in, size_out);};
ELT_ACT(1)
ELT_ACT(2)
ELT_ACT(3)
ELT_ACT(4)
//ELT_ACT(6)
//ELT_ACT(8)
//ELT_ACT(12)
//ELT_ACT(16)
#undef ELT_ACT
//float sumt = target.sum();
// printf("kEltwiseFuncAct sumt %f \n", sumt);
cutilCheckMsg("computeEltwiseFuncAct: Kernel execution failed");
}
void computeEltwiseFuncGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& target,
vector<double>& param, int channels, int size_in, int size_out)
{
assert(size_out <= 4);
//int height = input.getFollowingDim(), width = input.getLeadingDim();
//int numCases = input.getNumCols();
//int numIn = input.getNumRows();
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
if (target.getNumCols() != inp_width || target.getNumRows() != inp_height) {
target.resize(inp_height, inp_width);
}
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
cudaMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, cudaMemcpyHostToDevice);
//el switch
float Csw = param[param.size()-2];
float Bsw = param[param.size()-1];
int numPixelsPerGroup = inp_height/size_in;
//int numChannelsPerGroup = channels/size_in;
int numPixelsPerChannel = inp_height/channels;
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(std::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
std::min(NUM_BLOCKS_MAX, DIVUP(numPixelsPerGroup, ELTWISE_THREADS_Y)));
//printf("computeEltwiseFuncGrad numPixelsPerGroup %i --------------------\n", numPixelsPerGroup);
//float sumA = actGrad.sum();
//float sumI = input.sum();
//printf("sum actGrad %f input %f \n", sumA, sumI);
//printf(" size_in %i size_out %i tag size %i sumt %f \n", size_in, size_out, target.getNumElements());
//printf(" target.getStride() %i actGrad %i input %i \n", target.getStride(), actGrad.getNumRows(), input.getNumRows());
//kEltwiseFuncGrad_t<ELTWISE_THREADS_X, ELTWISE_THREADS_Y, 3><<<blocks, threads>>>(actGrad.getDevData(),
// input.getDevData(), target.getDevData(), inp_height, inp_width,
// input.getStride(), actGrad.getStride(), size_in, size_out);
//float sumtt = target.sum();
//printf("sum_test_tag %f \n", sumtt);
#define ELT_GRAD(SIZE_ARR) \
if(size_out == SIZE_ARR){\
cudaFuncSetCacheConfig(kEltwiseFuncGrad<SIZE_ARR>, cudaFuncCachePreferL1);\
kEltwiseFuncGrad<SIZE_ARR><<<blocks, threads>>>(actGrad.getDevData(),\
input.getDevData(), target.getDevData(), inp_height, inp_width,\
input.getStride(), actGrad.getStride(), numPixelsPerChannel,\
Csw, Bsw, size_in, size_out);};
ELT_GRAD(1)
ELT_GRAD(2)
ELT_GRAD(3)
ELT_GRAD(4)
//ELT_GRAD(6)
//ELT_GRAD(8)
//ELT_GRAD(12)
//ELT_GRAD(16)
#undef ELT_GRAD
// float sumt = target.sum();
// printf("FuncGrad sum_tag %f \n", sumt);
cutilCheckMsg("computeEltwiseFuncGrad: Kernel execution failed");
};
void computeEltwiseFuncParamWeightGrad(NVMatrix& actGrad, NVMatrix& input,
void* arrayPtr, vector<NVMatrix>& tempMatrix,
NVMatrix& tempC, NVMatrix& tempB,
vector<double>& param, float lim,
int channels, int size_in, int size_out)
{
assert(size_out <= 4 && size_in <= 4);// || size_out == 12 || size_out == 16);
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
assert(input.getStride() == actGrad.getStride());
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
cudaMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, cudaMemcpyHostToDevice);
//el switch
float Csw = param[param.size()-2];
float Bsw = param[param.size()-1];
int numPixelsPerGroup = inp_height/size_in;
//int numChannelsPerGroup = channels/size_in;
int numPixelsPerChannel = inp_height/channels;
// printf("inp_height %i numPixelsPerGroup %i %i\n", inp_height, numPixelsPerGroup, actGrad.getNumRows()/size_out);
#define N_SUM 1
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(std::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
std::min(NUM_BLOCKS_MAX, (int)DIVUP(numPixelsPerGroup/N_SUM, ELTWISE_THREADS_Y)));
#undef N_SUM
int tag_width = blocks.x*threads.x;
int tag_height = blocks.y*threads.y;
int tag_size = tag_width*tag_height;
float* tempMatrixPtr[CONST_AREA_SIZE];
for(int i =0; i < tempMatrix.size(); i++)
{
if (tempMatrix[i].getNumCols() != tag_width || tempMatrix[i].getNumRows() != tag_height) {
tempMatrix[i].resize(tag_height, tag_width);
cudaMemset(tempMatrix[i].getDevData(), 0, tag_size*sizeof(float));
}
tempMatrixPtr[i] = tempMatrix[i].getDevData();
}
cudaMemcpy(arrayPtr, tempMatrixPtr, sizeof(float*)*tempMatrix.size(), cudaMemcpyHostToDevice);
#define ELT_W_GRAD(SIZE_ARR_OUT, SIZE_ARR_IN) \
if(size_out == SIZE_ARR_OUT && size_in == SIZE_ARR_IN){\
kEltwiseFuncParamWeightGrad<ELTWISE_THREADS_X, ELTWISE_THREADS_Y, SIZE_ARR_OUT, SIZE_ARR_IN><<<blocks, threads>>>(actGrad.getDevData(),\
input.getDevData(), (float**)arrayPtr,\
inp_height, inp_width,\
input.getStride(), tempMatrix[0].getStride(), numPixelsPerChannel, Csw, Bsw);};
ELT_W_GRAD(1,2)
ELT_W_GRAD(2,2)
ELT_W_GRAD(3,2)
ELT_W_GRAD(4,2)
ELT_W_GRAD(1,3)
ELT_W_GRAD(2,3)
ELT_W_GRAD(3,3)
ELT_W_GRAD(4,3)
ELT_W_GRAD(1,4)
ELT_W_GRAD(2,4)
ELT_W_GRAD(3,4)
ELT_W_GRAD(4,4)
#undef ELT_W_GRAD
// printf("size_in %i size_out %i sum %f act %f inp %f \n", size_in, size_out, tempMatrix[0].sum(), actGrad.sum(), input.sum());
// int tagc_width = inp_width;
// int tagc_height = inp_height*size_out/size_in;
//
// //if (tempC.getNumCols() != tagc_width || tempC.getNumRows() != tagc_height) {
// // tempC.resize(tagc_height, tagc_width);
// //}
//
// if (tempB.getNumCols() != tagc_width || tempB.getNumRows() != tagc_height) {
// tempB.resize(tagc_height, tagc_width);
// }
////
//#define ELT_W_BCGRAD(SIZE_ARR_IN) \
// if(size_in == SIZE_ARR_IN){\
// kEltwiseFuncBCWeightGrad<SIZE_ARR_IN><<<blocks, threads>>>(input.getDevData(), actGrad.getDevData(), tempC.getDevData(), tempB.getDevData(),\
// inp_height, inp_width,\
// input.getStride(), tempB.getStride(),\
// numPixelsPerChannel,\
// param[param.size()-2], lim, param[param.size()-1],\
// size_out);};
// ELT_W_BCGRAD(1)
// ELT_W_BCGRAD(2)
// ELT_W_BCGRAD(3)
// ELT_W_BCGRAD(4)
//#undef ELT_W_BCGRAD
// float sum = tempC.sum();
// printf("kEltwiseFuncBCWeightGrad sum %f \n", sum);
}
void testGroupsEltwiseFunc(NVMatrix& actGrad, NVMatrix& input,
void* arrayPtr, vector<NVMatrix>& tempMatrix,
vector<double>& param,
int size_in, int size_out, int channels, int cnttest)
{
assert(size_in <= 4);// || size_out == 12 || size_out == 16);
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
assert(input.getStride() == actGrad.getStride());
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
cudaMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, cudaMemcpyHostToDevice);
int numPixelsPerGroup = inp_height/size_in;
// printf("inp_height %i numPixelsPerGroup %i \n", inp_height, numPixelsPerGroup);
#define N_SUM 1
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(std::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
std::min(NUM_BLOCKS_MAX, (int)DIVUP(numPixelsPerGroup/N_SUM, ELTWISE_THREADS_Y)));
#undef N_SUM
int tag_width = blocks.x*threads.x;
int tag_height = blocks.y*threads.y;
int tag_size = tag_width*tag_height;
int numPixelsPerChannel = inp_height/channels;
float* tempMatrixPtr[CONST_AREA_SIZE];
for(int i =0; i < tempMatrix.size(); i++)
{
if (tempMatrix[i].getNumCols() != tag_width || tempMatrix[i].getNumRows() != tag_height) {
tempMatrix[i].resize(tag_height, tag_width);
cudaMemset(tempMatrix[i].getDevData(), 0, tag_size*sizeof(float));
}
tempMatrixPtr[i] = tempMatrix[i].getDevData();
}
cudaMemcpy(arrayPtr, tempMatrixPtr, sizeof(float*)*tempMatrix.size(), cudaMemcpyHostToDevice);
#define ELT_T_GRAD(SIZE_ARR) \
if(size_in == SIZE_ARR){\
kEltwiseFuncGroupTest<ELTWISE_THREADS_X, ELTWISE_THREADS_Y, SIZE_ARR><<<blocks, threads>>>(actGrad.getDevData(),\
input.getDevData(), (float**)arrayPtr,\
inp_height, inp_width,\
input.getStride(), tempMatrix[0].getStride(),\
numPixelsPerChannel, cnttest);};
ELT_T_GRAD(1)
ELT_T_GRAD(2)
ELT_T_GRAD(3)
ELT_T_GRAD(4)
//ELT_T_GRAD(5)
//ELT_T_GRAD(6)
//ELT_T_GRAD(7)
//ELT_T_GRAD(8)
#undef ELT_T_GRAD
//kEltwiseFuncGroupTestS<ELTWISE_THREADS_X, ELTWISE_THREADS_Y, 3><<<blocks, threads>>>(actGrad.getDevData(),\
//input.getDevData(), (float**)arrayPtr,\
//inp_height, inp_width,\
//input.getStride(), tempMatrix[0].getStride(),\
//numPixelsPerChannel, cnttest);
}
void normalizeGroups(NVMatrix& input,
void* arrayPtr, vector<NVMatrix>& tempMatrix,
int size_in, int size_out, int channels)
{
assert(size_in <= 4);// || size_out == 12 || size_out == 16);
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
int numPixelsPerGroup = inp_height/size_in;
// printf("inp_height %i numPixelsPerGroup %i \n", inp_height, numPixelsPerGroup);
#define N_SUM 1
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(std::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
std::min(NUM_BLOCKS_MAX, (int)DIVUP(numPixelsPerGroup/N_SUM, ELTWISE_THREADS_Y)));
#undef N_SUM
int tag_width = blocks.x*threads.x;
int tag_height = blocks.y*threads.y;
int tag_size = tag_width*tag_height;
int numPixelsPerChannel = inp_height/channels;
float* tempMatrixPtr[CONST_AREA_SIZE];
for(int i =0; i < tempMatrix.size(); i++)
{
if (tempMatrix[i].getNumCols() != tag_width || tempMatrix[i].getNumRows() != tag_height) {
tempMatrix[i].resize(tag_height, tag_width);
cudaMemset(tempMatrix[i].getDevData(), 0, tag_size*sizeof(float));
}
tempMatrixPtr[i] = tempMatrix[i].getDevData();
}
cudaMemcpy(arrayPtr, tempMatrixPtr, sizeof(float*)*tempMatrix.size(), cudaMemcpyHostToDevice);
#define NORM_GROUP(SIZE_ARR) \
if(size_in == SIZE_ARR){\
kNormalizeParam<ELTWISE_THREADS_X, ELTWISE_THREADS_Y, SIZE_ARR><<<blocks, threads>>>(\
input.getDevData(), (float**)arrayPtr,\
inp_height, inp_width,\
input.getStride(), tempMatrix[0].getStride(),\
numPixelsPerChannel);};
NORM_GROUP(2)
NORM_GROUP(3)
NORM_GROUP(4)
#undef ELT_T_GRAD
}
//void computeEltwiseFuncParamGradSingle(NVMatrix& actGrad, NVMatrix& input,
// NVMatrix& target, NVMatrix& target_m,
// int pin, int pout, int size_in, int size_out)
//{
//
// int inp_width = input.getNumCols();
// int inp_height = input.getNumRows();
//
//
// int numPixelsPerGroup = inp_height/size_in;
//// printf("inp_height %i numPixelsPerGroup %i \n", inp_height, numPixelsPerGroup);
//#define N_SUM 1
// dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
// dim3 blocks(std::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
// std::min(NUM_BLOCKS_MAX, (int)DIVUP(numPixelsPerGroup/N_SUM, ELTWISE_THREADS_Y)));
//#undef N_SUM
//
// int sizeX = blocks.x*threads.x;
// int sizeY = blocks.y*threads.y;
//
// if (target.getNumCols() != sizeX || target.getNumRows() != sizeY) {
// //printf(" tresize %i %i \n", sizeX, sizeY);
// target.resize(sizeY, sizeX);// numRows, numCols !
// }
//
//
// if (!target_m.isSameDims(target)) {
// target_m.resize(target);
// }
//
//
//
// cudaFuncSetCacheConfig(kEltwiseFuncParamGradSingle, cudaFuncCachePreferL1);
//
//
// kEltwiseFuncParamGradSingle<<<blocks, threads>>>(actGrad.getDevData(),
// input.getDevData(), target.getDevData(), target_m.getDevData(),
// pin, pout, inp_height, inp_width,
// input.getStride(), actGrad.getStride(), target.getStride(),
// size_in, size_out);
//
//
// cutilCheckMsg("kEltwiseFuncParamGrad: Kernel execution failed");
//};
//-------------------------------------------------------------
//API MAvg
//-------------------------------------------------------------
#include "conv_debug.h"
#define SIZE_CONV 3
void computeMAvgAct(NVMatrix& input, NVMatrix& target, int sizeModuleSide, int channels,
int imgSize, int imgPixels)
{
int out_width = input.getNumCols();
int out_height = input.getNumRows();
if (target.getNumCols() != out_width || target.getNumRows() != out_height) {
target.resize(out_height, out_width);
//printf("**resize out_height %i out_width %i \n",out_height, out_width);
}
int numCases = out_width;
int imgSizeX = imgSize;
int imgSizeY = imgSize;
int img_threads_x = 8;
int img_threads_y = 8;
int casePerThread = 16;
int nblocksx = 2;//~number of blocks x
int case_threads = DIVUP(numCases, nblocksx*casePerThread);
int imgBlocksY = DIVUP(imgSizeY,img_threads_x);
int imgBlocksX = DIVUP(imgSizeX,img_threads_y);
int lobe = sizeModuleSide/2;
float scale = 1./(sizeModuleSide*sizeModuleSide);
int sharedX = lobe*2 + img_threads_x;
int sharedY = lobe*2 + img_threads_y;
int shared_size = sharedX*sharedY*channels*case_threads*sizeof(float);
dim3 threads(case_threads, img_threads_x*img_threads_y);
dim3 blocks = dim3(DIVUP(numCases, threads.x*casePerThread), imgBlocksY*imgBlocksX);
//printf("blocks.x %i blocks.y %i threads.x %i threads.y %i shared_size %i casePerThread %i\n",
// blocks.x, blocks.y, threads.x, threads.y, shared_size, casePerThread);
//printf("sharedY %i img_threads_x %i img_threads_y %i sizeModuleSide %i imgSizeX %i imgSizeY %i imgPixels %i numFilters %i numCases %i lobe %i\n",
// sharedY,img_threads_x,img_threads_y,sizeModuleSide,imgSizeX,imgSizeY, imgPixels,numFilters,numCases,lobe);
assert(SIZE_CONV == 3);
//singletonTempMem.allocFloatElement(input.getNumCols()*input.getNumRows());
//singletonTempMem.allocFloatElement(out_height*out_width);
//float* tempHostInput = singletonTempMem.getPtr(0);
//float* tempHostTarget = singletonTempMem.getPtr(1);
//int deltan = singletonTempMem._start[1]-singletonTempMem._start[0];
//printf(" size inp %i singletonTempMem._size %i deltan %i \n",
// input.getNumCols()*input.getNumRows(),singletonTempMem._size, deltan);
//cutilSafeCallNoSync( cudaMemcpy(tempHostInput, input.getDevData(), input.getNumCols()*input.getNumRows()*sizeof(float), cudaMemcpyDeviceToHost) );
//double sum_host =0;
//debugMicroConvFilterAct((SIZE_CONV-1)/2, SIZE_CONV, temp, tempHostInput, tempHostTarget,
// numCases, channels, numFilters,
// sharedY, img_threads_x, img_threads_y,
// imgSizeX, imgSizeY,
// imgPixels);
// sum_host = Sum(tempHostTarget, out_height*out_width);
//printf(" debugMicroConvFilterAct sum %f \n", sum_host);
//emuMicroConvFilterAct(threads.x, threads.y, blocks.x, blocks.y,
// (SIZE_CONV-1)/2, SIZE_CONV,
// temp, tempHostInput, tempHostTarget,
// numCases, channels, numFilters, casePerThread,
// sharedY, img_threads_x, img_threads_y,
// imgSizeX, imgSizeY,
// imgPixels);
//sum_host = Sum(tempHostTarget, out_height*out_width);
//printf(" emuMicroConvFilterAct sum %f \n", sum_host);
//singletonTempMem.reset();
kMAvgAct<(SIZE_CONV-1)/2, SIZE_CONV><<<blocks, threads, shared_size>>>(input.getDevData(), target.getDevData(),
numCases, channels, casePerThread,
sharedY, img_threads_x, img_threads_y,
imgSizeX, imgSizeY,
imgPixels, scale);
//debug
//printf("kMicroConvAct4Channel end \n");
//float sum = target.sum();
//printf(" kMicroConvAct4Channel sum %f \n", sum);
cutilCheckMsg("computeMAvgAct: Kernel execution failed");
};
void computeMAvgGrad(NVMatrix& actGrad, NVMatrix& target, int sizeModuleSide, int channels,
int imgSize, int imgPixels)
{
int out_width = actGrad.getNumCols();
int out_height = actGrad.getNumRows();
if (target.getNumCols() != out_width || target.getNumRows() != out_height) {
target.resize(out_height, out_width);
//printf("**resize out_height %i out_width %i \n",out_height, out_width);
}
int numCases = out_width;
int imgSizeX = imgSize;
int imgSizeY = imgSize;
int img_threads_x = 8;
int img_threads_y = 8;
int casePerThread = 16;
int nblocksx = 2;//~number of blocks x
int case_threads = DIVUP(numCases, nblocksx*casePerThread);
int imgBlocksY = DIVUP(imgSizeY,img_threads_x);
int imgBlocksX = DIVUP(imgSizeX,img_threads_y);
int lobe = sizeModuleSide/2;
float scale = 1./(sizeModuleSide*sizeModuleSide);
int sharedX = lobe*2 + img_threads_x;
int sharedY = lobe*2 + img_threads_y;
int shared_size = sharedX*sharedY*channels*case_threads*sizeof(float);
dim3 threads(case_threads, img_threads_x*img_threads_y);
dim3 blocks = dim3(DIVUP(numCases, threads.x*casePerThread), imgBlocksY*imgBlocksX);
//printf("blocks.x %i blocks.y %i threads.x %i threads.y %i shared_size %i casePerThread %i\n",
// blocks.x, blocks.y, threads.x, threads.y, shared_size, casePerThread);
//printf("sharedY %i img_threads_x %i img_threads_y %i sizeModuleSide %i imgSizeX %i imgSizeY %i imgPixels %i numFilters %i numCases %i lobe %i\n",
// sharedY,img_threads_x,img_threads_y,sizeModuleSide,imgSizeX,imgSizeY, imgPixels,numFilters,numCases,lobe);
assert(SIZE_CONV == 3);
kMAvgGrad<(SIZE_CONV-1)/2, SIZE_CONV><<<blocks, threads, shared_size>>>(actGrad.getDevData(), target.getDevData(),
numCases, channels, casePerThread,
sharedY, img_threads_x, img_threads_y,
imgSizeX, imgSizeY,
imgPixels, scale);
cutilCheckMsg("computeMAvgGrad: Kernel execution failed");
}
//-------------------------------------------------------------
//API MicroConv
//-------------------------------------------------------------
void computeMicroConvAct(NVMatrix& input, NVMatrix& target, vector<double>& param, int sizeModuleSide, int channels,
int imgSize, int imgPixels, int numFilters)
{
int out_width = input.getNumCols();
int out_height = input.getNumRows()*numFilters;
if (target.getNumCols() != out_width || target.getNumRows() != out_height) {
target.resize(out_height, out_width);
//printf("**resize out_height %i out_width %i \n",out_height, out_width);
}
int numCases = out_width;
int imgSizeX = imgSize;
int imgSizeY = imgSize;
int img_threads_x = 8;
int img_threads_y = 8;
int casePerThread = 16;
int nblocksx = 2;//~number of blocks x
int case_threads = DIVUP(numCases, nblocksx*casePerThread);
int imgBlocksY = DIVUP(imgSizeY,img_threads_x);
int imgBlocksX = DIVUP(imgSizeX,img_threads_y);
int lobe = sizeModuleSide/2;
int sharedX = lobe*2 + img_threads_x;
int sharedY = lobe*2 + img_threads_y;
int shared_size = sharedX*sharedY*channels*case_threads*sizeof(float);
dim3 threads(case_threads, img_threads_x*img_threads_y);
dim3 blocks = dim3(DIVUP(numCases, threads.x*casePerThread), imgBlocksY*imgBlocksX);
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
cudaMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, cudaMemcpyHostToDevice);
//printf("blocks.x %i blocks.y %i threads.x %i threads.y %i shared_size %i casePerThread %i\n",
// blocks.x, blocks.y, threads.x, threads.y, shared_size, casePerThread);
//printf("sharedY %i img_threads_x %i img_threads_y %i sizeModuleSide %i imgSizeX %i imgSizeY %i imgPixels %i numFilters %i numCases %i lobe %i\n",
// sharedY,img_threads_x,img_threads_y,sizeModuleSide,imgSizeX,imgSizeY, imgPixels,numFilters,numCases,lobe);
assert(SIZE_CONV == 3);
//singletonTempMem.allocFloatElement(input.getNumCols()*input.getNumRows());
//singletonTempMem.allocFloatElement(out_height*out_width);
//float* tempHostInput = singletonTempMem.getPtr(0);
//float* tempHostTarget = singletonTempMem.getPtr(1);
//int deltan = singletonTempMem._start[1]-singletonTempMem._start[0];
//printf(" size inp %i singletonTempMem._size %i deltan %i \n",
// input.getNumCols()*input.getNumRows(),singletonTempMem._size, deltan);
//cutilSafeCallNoSync( cudaMemcpy(tempHostInput, input.getDevData(), input.getNumCols()*input.getNumRows()*sizeof(float), cudaMemcpyDeviceToHost) );
//double sum_host =0;
//debugMicroConvFilterAct((SIZE_CONV-1)/2, SIZE_CONV, temp, tempHostInput, tempHostTarget,
// numCases, channels, numFilters,
// sharedY, img_threads_x, img_threads_y,
// imgSizeX, imgSizeY,
// imgPixels);
// sum_host = Sum(tempHostTarget, out_height*out_width);
//printf(" debugMicroConvFilterAct sum %f \n", sum_host);
//emuMicroConvFilterAct(threads.x, threads.y, blocks.x, blocks.y,
// (SIZE_CONV-1)/2, SIZE_CONV,
// temp, tempHostInput, tempHostTarget,
// numCases, channels, numFilters, casePerThread,
// sharedY, img_threads_x, img_threads_y,
// imgSizeX, imgSizeY,
// imgPixels);
//sum_host = Sum(tempHostTarget, out_height*out_width);
//printf(" emuMicroConvFilterAct sum %f \n", sum_host);
//singletonTempMem.reset();
kMicroConvFilterAct<(SIZE_CONV-1)/2, SIZE_CONV><<<blocks, threads, shared_size>>>(input.getDevData(), target.getDevData(),
numCases, channels, numFilters, casePerThread,
sharedY, img_threads_x, img_threads_y,
imgSizeX, imgSizeY,
imgPixels);
//debug
//printf("kMicroConvAct4Channel end \n");
//float sum = target.sum();
//printf(" kMicroConvAct4Channel sum %f \n", sum);
cutilCheckMsg("computeMicroConvAct: Kernel execution failed");
};
void computeMicroConvActGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& target,
vector<double>& param, int sizeModuleSide, int channels,
int imgSize, int imgPixels, int numFilters)
{
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
if (target.getNumCols() != inp_width || target.getNumRows() != inp_height) {
target.resize(inp_height, inp_width);
}
int numCases = inp_width;
int imgSizeX = imgSize;
int imgSizeY = imgSize;
int img_threads_x = 8;
int img_threads_y = 8;
int casePerThread = 16;
int nblocksx = 2;//~number of blocks x
int case_threads = DIVUP(numCases, nblocksx*casePerThread);
int lobe = sizeModuleSide/2;
int sharedX = lobe*2 + img_threads_x;
int sharedY = lobe*2 + img_threads_y;
int shared_size = sharedX*sharedY*numFilters*channels*case_threads*sizeof(float);
int imgBlocksY = DIVUP(imgSizeY,img_threads_x);
int imgBlocksX = DIVUP(imgSizeX,img_threads_y);
dim3 threads(case_threads, img_threads_x*img_threads_y);
dim3 blocks = dim3(DIVUP(numCases, threads.x*casePerThread), imgBlocksY*imgBlocksX);
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
cudaMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, cudaMemcpyHostToDevice);
printf("blocks.x %i blocks.y %i threads.x %i threads.y %i shared_size %i casePerThread %i\n",
blocks.x, blocks.y, threads.x, threads.y, shared_size, casePerThread);
printf("sharedY %i img_threads_x %i img_threads_y %i sizeModuleSide %i imgSizeX %i imgSizeY %i imgPixels %i numFilters %i numCases %i lobe %i\n",
sharedY,img_threads_x,img_threads_y,sizeModuleSide,imgSizeX,imgSizeY, imgPixels,numFilters,numCases,lobe);
//singletonTempMem.allocFloatElement(actGrad.getNumCols()*actGrad.getNumRows());
//singletonTempMem.allocFloatElement(target.getNumCols()*target.getNumRows());
//float* tempHostInput = singletonTempMem.getPtr(0);
//float* tempHostTarget = singletonTempMem.getPtr(1);
//cutilSafeCallNoSync( cudaMemcpy(tempHostInput, actGrad.getDevData(), actGrad.getNumCols()*actGrad.getNumRows()*sizeof(float),
// cudaMemcpyDeviceToHost) );
//double sum_host =0;
//debugMicroConvActGrad((SIZE_CONV-1)/2, SIZE_CONV, temp, tempHostInput, tempHostTarget,
// numCases, channels, numFilters, casePerThread,
// img_threads_x, img_threads_y,
// sharedY, sizeModuleSide, lobe,
// imgSizeX, imgSizeY,
// imgPixels);
//sum_host = Sum(tempHostTarget, target.getNumCols()*target.getNumRows());
//printf(" debugMicroConvFilterAct sum %f \n", sum_host);
//singletonTempMem.reset();
kMicroConvActGrad<<<blocks, threads, shared_size>>>(actGrad.getDevData(), target.getDevData(),
numCases, channels, numFilters, casePerThread,
img_threads_x, img_threads_y,
sharedY, sizeModuleSide, lobe,
imgSizeX, imgSizeY,
imgPixels);
// double sum = target.sum();
// printf(" kMicroConvGrad sum %f \n", sum);
// printf("kMicroConvGrad end \n");
cutilCheckMsg("kMicroConvGrad: Kernel execution failed");
}
void computeMicroConvWeightGrad(NVMatrix& actGrad, NVMatrix& input,
vector<NVMatrix>& tempMatrix, void* arrayPtr,
vector<double>& param, int sizeModuleSide, int channels,
int imgSize, int imgPixels, int numFilters)
{
int numCases = input.getNumCols();
int imgSizeX = imgSize;
int imgSizeY = imgSize;
int img_threads_x = 8;
int img_threads_y = 8;
int casePerThread = 16;
int nblocksx = 2;//~number of blocks x
int case_threads = DIVUP(numCases, nblocksx*casePerThread);
int lobe = sizeModuleSide/2;
int sharedX = lobe*2 + img_threads_x;
int sharedY = lobe*2 + img_threads_y;
int conv_size = (lobe*2 + 1);
int conv_size2 = conv_size*conv_size;
int imgBlocksY = DIVUP(imgSizeY,img_threads_x);
int imgBlocksX = DIVUP(imgSizeX,img_threads_y);
//for optimization can change both block sizes!
dim3 threads(case_threads, img_threads_x*img_threads_y);
dim3 blocks = dim3(DIVUP(numCases, threads.x*casePerThread), imgBlocksY*imgBlocksX);
int sizeSharedBlock = sharedX*sharedY;
int shared_size = (sizeSharedBlock*threads.x + threads.x*threads.y*numFilters*conv_size2)*sizeof(float);//looped out - case_threads*imgsPerThread;
int tag_width = DIVUP(input.getNumCols(), casePerThread) ; //could be reduced
int tag_height = blocks.y*threads.y;//could be reduced
int tag_size = tag_width*tag_height;
float* tempMatrixPtr[CONST_AREA_SIZE];
for(int i =0; i < tempMatrix.size(); i++)
{
if (tempMatrix[i].getNumCols() != tag_width || tempMatrix[i].getNumRows() != tag_height) {
tempMatrix[i].resize(tag_height, tag_width);
cudaMemset(tempMatrix[i].getDevData(), 0, tag_size*sizeof(float));
}
tempMatrixPtr[i] = tempMatrix[i].getDevData();
}
cudaMemcpy(arrayPtr, tempMatrixPtr, sizeof(float*)*tempMatrix.size(), cudaMemcpyHostToDevice);
// printf("kMicroConvWeightGrad *************** \n");
// printf("tag_width %i tag_height %i shared_size %i tempMatrix.size() %i conv_size %i casePerThread %i\n",
// tag_width, tag_height, shared_size, tempMatrix.size(), conv_size, casePerThread);
//
// printf("blocks.x %i blocks.y %i threads.x %i threads.y %i shared_size %i \n",
// blocks.x, blocks.y, threads.x, threads.y, shared_size);
// printf("sharedY %i img_threads_x %i img_threads_y %i sizeModuleSide %i imgSizeX %i imgSizeY %i imgPixels %i numFilters %i numCases %i lobe %i\n",
// sharedY,img_threads_x,img_threads_y,sizeModuleSide,imgSizeX,imgSizeY, imgPixels,numFilters,numCases,lobe);
//
//
//const int sizeConv2 = SIZE_CONV*SIZE_CONV;
//int filterID = 0;
//int dsy = 0;
//int dsx = 1;
//int channelID = 0;
//int ind_coeff = filterID*sizeConv2 + (dsy + lobe)*SIZE_CONV +(dsx + lobe);
//
// singletonTempMem.allocFloatElement(actGrad.getNumCols()*actGrad.getNumRows());
// singletonTempMem.allocFloatElement(input.getNumCols()*input.getNumRows());
// singletonTempMem.allocFloatElement(tag_height*tag_width);
// int out_width = input.getNumCols();
// int out_height = input.getNumRows()*numFilters;
// singletonTempMem.allocFloatElement(out_width*out_height);
//
// float* tempHostAct = singletonTempMem.getPtr(0);
// float* tempHostInp = singletonTempMem.getPtr(1);
// float* tempHostTag = singletonTempMem.getPtr(2);
// float* tempHostTagA = singletonTempMem.getPtr(3);
//
// cudaMemcpy(tempHostAct, actGrad.getDevData(), actGrad.getNumCols()*actGrad.getNumRows()*sizeof(float),
// cudaMemcpyDeviceToHost);
//
// cudaMemcpy(tempHostInp, input.getDevData(), input.getNumCols()*input.getNumRows()*sizeof(float),
// cudaMemcpyDeviceToHost);
// //memset(tempHostTagA, 0, tag_height*tag_width*sizeof(float));
// memset(tempHostTag, 0, tag_height*tag_width*sizeof(float));
//
// double sum_a = Sum(tempHostAct, actGrad.getNumCols()*actGrad.getNumRows());
// double sum_i = Sum(tempHostInp, input.getNumCols()*input.getNumRows());
// printf(" sum_a %f sum_i %f \n", sum_a, sum_i);
//
// float temp[CONST_AREA_SIZE];
// assert(param.size() <= CONST_AREA_SIZE);
// memset(temp, 0, sizeof(temp));
// for(int i = 0; i < param.size(); i++)
// temp[i] = (float)param[i];
//
//
// debugMicroConvLinApprox((SIZE_CONV-1)/2, SIZE_CONV, temp, tempHostInp, tempHostAct, tempHostTagA,
// numCases, channels, numFilters,
// sharedY, img_threads_x, img_threads_y,
// imgSizeX, imgSizeY,
// imgPixels);
// double sum_host0 = Sum(tempHostTagA, out_height*out_width);
// printf(" debugMicroConvFilterAct sum0 %f \n", sum_host0);
// double delta = 1e-3;
// temp[ind_coeff] += delta;
//
// debugMicroConvLinApprox((SIZE_CONV-1)/2, SIZE_CONV, temp, tempHostInp, tempHostAct, tempHostTagA,
// numCases, channels, numFilters,
// sharedY, img_threads_x, img_threads_y,
// imgSizeX, imgSizeY,
// imgPixels);
// double sum_host1 = Sum(tempHostTagA, out_height*out_width);
// printf(" debugMicroConvFilterAct sum1 %f \n", sum_host1);
//
// printf(" debugMicroConv grad %f \n", (sum_host1-sum_host0)/delta);
//
//
//memset(tempHostTag, 0, tag_height*tag_width*sizeof(float));
// debugMicroConvWeightGrad(lobe, SIZE_CONV, dsx, dsy, filterID, channelID, tempHostAct, tempHostInp, tempHostTag,
// tag_size, numCases,
// channels, numFilters,
// img_threads_x, img_threads_y, sharedY,
// lobe, sizeModuleSide, sizeSharedBlock,
// imgSizeX, imgSizeY, imgPixels);
//
// double sum_host = Sum(tempHostTag, tag_height*tag_width);
// printf(" debugMicroConvWeightGrad sum %f \n", sum_host);
//
//memset(tempHostTag, 0, tag_height*tag_width*sizeof(float));
//emuMicroConvWeightGrad(threads.x, threads.y, blocks.x, blocks.y,
// lobe, SIZE_CONV, dsx, dsy, filterID, channelID, tempHostAct, tempHostInp, tempHostTag,
// tag_size, numCases, casePerThread, tag_width,
// channels, numFilters,
// img_threads_x, img_threads_y, sharedY,
// sizeSharedBlock,
// imgSizeX, imgSizeY, imgPixels);
// double sum_host_emu = Sum(tempHostTag, tag_height*tag_width);
//printf(" emuMicroConvWeightGrad sum %f \n", sum_host_emu);
kMicroConvWeightGrad<SIZE_CONV/2><<<blocks, threads, shared_size>>>(actGrad.getDevData(), input.getDevData(), (float**)arrayPtr,
tag_size, numCases, casePerThread, tag_width,
channels, numFilters,
img_threads_x, img_threads_y,
imgSizeX, imgSizeY, imgPixels);
// double sum_ag = actGrad.sum();
// double sum_ig = input.sum();
//double sum = tempMatrix[ind_coeff].sum();
//printf(" kMicroConvWeightGrad sum %f \n", sum);
//printf(" kMicroConvWeightGrad sum %f sum_ag %f sum_ig %f \n", sum, sum_ag, sum_ig);
////debug
// printf("kMicroConvWeightGrad end \n");
cutilCheckMsg("kMicroConvWeightGrad: Kernel execution failed");
}
//-------------------------------------------------------------
//API VectFunc
//-------------------------------------------------------------
void computeVectFuncAct(NVMatrix& input, NVMatrix& target, vector<double>& param, int sizeV, int sizeH, int channels)
{
//printf("\n kVectFuncAct start*** \n");
assert(sizeV <= 4 || sizeV == 6 || sizeV == 8 || sizeV == 12 || sizeV == 16);
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
int out_width = inp_width;
int out_height = (inp_height*sizeH)/sizeV;
int numCases = out_width;
int numPixelsPerGroup = inp_height/channels;
int numColors = channels/sizeV;
if (target.getNumCols() != out_width || target.getNumRows() != out_height) {
// printf("**resize out_height %i out_width %i \n",out_height, out_width);
target.resize(out_height, out_width);
}
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
cudaMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, cudaMemcpyHostToDevice);
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(std::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
std::min(NUM_BLOCKS_MAX, DIVUP(numPixelsPerGroup, ELTWISE_THREADS_Y)));
// float sumi = input.sum();
// printf("sumi %f \n", sumi);
// printf("blocks.x %i blocks.y %i threads.x %i threads.y %i numColors %i \n",blocks.x, blocks.y, threads.x, threads.y, numColors);
// printf("inp_height %i numPixelsPerGroup %i out_width %i out_height %i sizeV %i \n",inp_height, numPixelsPerGroup,out_width,out_height,sizeV);
// printf("sizeV %i sizeH %i strides %i %i \n", sizeV, sizeH, input.getStride(), target.getStride());
////debug
// cudaMemset(target.getDevData(), 0, out_height*out_width*sizeof(float));
//
// singletonTempMem.allocFloatElement(input.getNumCols()*input.getNumRows());
// singletonTempMem.allocFloatElement(out_height*out_width);
// float* tempHostInput = singletonTempMem.getPtr(0);
// float* tempHostTarget = singletonTempMem.getPtr(1);
// cudaMemcpy(tempHostInput, input.getDevData(), input.getNumCols()*input.getNumRows()*sizeof(float), cudaMemcpyDeviceToHost);
// cudaDeviceSynchronize();
//
// double sum_inp = Sum(tempHostInput, input.getNumCols()*input.getNumRows());
// printf("sum_inp %f \n", sum_inp);
//
// double sum_host =0;
// memset(tempHostTarget, 0, out_height*out_width*sizeof(float));
// debugVectFuncAct(sizeV, temp, tempHostInput, tempHostTarget,
// numPixelsPerGroup, numCases, input.getStride(), target.getStride(), numColors, sizeH);
//
// sum_host = Sum(tempHostTarget, out_height*out_width);
//
// printf(" debugVectFuncAct sum %f \n", sum_host);
//
// singletonTempMem.reset();
#define ELT_ACT(SIZE_ARR) \
if(sizeV == SIZE_ARR){\
cudaFuncSetCacheConfig(kVectFuncAct<SIZE_ARR>, cudaFuncCachePreferL1);\
kVectFuncAct<SIZE_ARR><<<blocks, threads>>>(input.getDevData(),\
target.getDevData(), numPixelsPerGroup, numCases, input.getStride(), target.getStride(), numColors, sizeH);};
ELT_ACT(1)
ELT_ACT(2)
ELT_ACT(3)
ELT_ACT(4)
ELT_ACT(6)
ELT_ACT(8)
ELT_ACT(12)
ELT_ACT(16)
#undef ELT_ACT
// float sumt = target.sum();
// printf("kVectFuncAct sumt %f \n", sumt);
//printf("kVectFuncAct end \n");
cutilCheckMsg("kVectFuncAct: Kernel execution failed");
}
void computeVectFuncGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& target,
vector<double>& param, int sizeV, int sizeH, int channels)
{
assert(sizeV <= 4 || sizeV == 6 || sizeV == 8 || sizeV == 12 || sizeV == 16);
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
if (target.getNumCols() != inp_width || target.getNumRows() != inp_height) {
target.resize(inp_height, inp_width);
}
int out_width = inp_width;
//int out_height = (inp_height*sizeH)/sizeV;
int numCases = out_width;
int numPixelsPerGroup = inp_height/channels;
int numColors = channels/sizeV;
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
cudaMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, cudaMemcpyHostToDevice);
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(std::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),
std::min(NUM_BLOCKS_MAX, DIVUP(numPixelsPerGroup, ELTWISE_THREADS_Y)));
//printf("kVectFuncGrad start ************************\n");
//printf("blocks.x %i blocks.y %i threads.x %i threads.y %i \n",
// blocks.x, blocks.y, threads.x, threads.y);
//printf("numPixelsPerGroup %i numCases %i numColors %i out_width %i out_height %i\n",
// numPixelsPerGroup, numCases, numColors, out_width, out_height);
//singletonTempMem.allocFloatElement(input.getNumCols()*input.getNumRows());
//singletonTempMem.allocFloatElement(inp_height*inp_width);
//singletonTempMem.allocFloatElement(actGrad.getNumCols()*actGrad.getNumRows());
//singletonTempMem.allocFloatElement(inp_height*inp_width);
//float* tempHostInput = singletonTempMem.getPtr(0);
//float* tempHostTarget = singletonTempMem.getPtr(1);
//float* tempHostActGrad = singletonTempMem.getPtr(2);
//float* tempHostTarget1 = singletonTempMem.getPtr(3);
//cudaMemcpy(tempHostInput, input.getDevData(), input.getNumCols()*input.getNumRows()*sizeof(float), cudaMemcpyDeviceToHost);
//cudaMemcpy(tempHostActGrad, actGrad.getDevData(), actGrad.getNumCols()*actGrad.getNumRows()*sizeof(float), cudaMemcpyDeviceToHost);
//cudaDeviceSynchronize();
//debugVectFuncGrad(sizeV, temp, tempHostActGrad,
// tempHostInput, tempHostTarget, tempHostTarget1, numPixelsPerGroup, numCases,
// input.getStride(), actGrad.getStride(), numColors, sizeH);
//double sum_host = Sum(tempHostTarget, inp_height*inp_width);
//double sum_host1 = Sum(tempHostTarget1, inp_height*inp_width);
//printf(" debugVectFuncAct sum %f sum1 %f \n", sum_host, sum_host1);
//singletonTempMem.reset();
#define ELT_GRAD(SIZE_ARR) \
if(sizeV == SIZE_ARR){\
cudaFuncSetCacheConfig(kVectFuncGrad<SIZE_ARR>, cudaFuncCachePreferL1);\
kVectFuncGrad<SIZE_ARR><<<blocks, threads>>>(actGrad.getDevData(),\
input.getDevData(), target.getDevData(), numPixelsPerGroup, numCases,\
input.getStride(), actGrad.getStride(), numColors, sizeH);};
ELT_GRAD(1)
ELT_GRAD(2)
ELT_GRAD(3)
ELT_GRAD(4)
ELT_GRAD(6)
ELT_GRAD(8)
ELT_GRAD(12)
ELT_GRAD(16)
#undef ELT_GRAD
//float sumt = target.sum();
//printf("kVectFuncGrad sum_tag %f \n", sumt);
cutilCheckMsg("kVectFuncGrad: Kernel execution failed");
};
void computeVectFuncWeightGrad(NVMatrix& actGrad, NVMatrix& input,
vector<NVMatrix>& tempMatrix,
void* arrayPtr,
vector<double>& param, int sizeV, int sizeH, int channels)
{
assert(sizeV <= 4 || sizeV == 6 || sizeV == 8 || sizeV == 12 || sizeV == 16);
int inp_width = input.getNumCols();
int inp_height = input.getNumRows();
int out_width = inp_width;
//int out_height = (inp_height*sizeH)/sizeV;
int numCases = out_width;
int numPixelsPerGroup = inp_height/channels;
int numColors = channels/sizeV;
#define N_SUM 1
dim3 threads(min(ELTWISE_THREADS_X, inp_width), ELTWISE_THREADS_Y);
dim3 blocks(std::min(NUM_BLOCKS_MAX, (int)DIVUP(inp_width, threads.x)),//reduce
std::min(NUM_BLOCKS_MAX, (int)DIVUP(numPixelsPerGroup/N_SUM, ELTWISE_THREADS_Y)));
#undef N_SUM
int shared_size = sizeV*(sizeH+1)*threads.x*threads.y*sizeof(float);
int tag_width = blocks.x*threads.x; //could be reduced
int tag_height = blocks.y*threads.y;//could be reduced
int tag_size = tag_width*tag_height;
float temp[CONST_AREA_SIZE];
assert(param.size() <= CONST_AREA_SIZE);
memset(temp, 0, sizeof(temp));
for(int i = 0; i < param.size(); i++)
temp[i] = (float)param[i];
cudaMemcpyToSymbol(const_area, temp, sizeof(float)*CONST_AREA_SIZE, 0, cudaMemcpyHostToDevice);
float* tempMatrixPtr[CONST_AREA_SIZE];
for(int i =0; i < tempMatrix.size(); i++)
{
if (tempMatrix[i].getNumCols() != tag_width || tempMatrix[i].getNumRows() != tag_height) {
tempMatrix[i].resize(tag_height, tag_width);
}
tempMatrixPtr[i] = tempMatrix[i].getDevData();
}
cudaMemcpy(arrayPtr, tempMatrixPtr, sizeof(float*)*tempMatrix.size(), cudaMemcpyHostToDevice);
//for(int i =0; i < tempMatrix.size(); i++)
//{
// cudaMemset(tempMatrix[i].getDevData(), 0, tag_size*sizeof(float));
//}
//----------
//printf("kVectFuncParamWeightGrad start ************************\n");
//printf("blocks.x %i blocks.y %i threads.x %i threads.y %i shared_size %i \n",
// blocks.x, blocks.y, threads.x, threads.y, shared_size);
//printf("numPixelsPerGroup %i numCases %i numColors %i out_width %i out_height %i\n",
// numPixelsPerGroup, numCases, numColors, out_width, out_height);
////float sumi = input.sum();
////printf("sumi %f \n", sumi);
//printf( "tempMatrix.size() %i tag_width %i tag_height %i actGrad %i %i tempMatrix[0].getStride() %i \n",
// tempMatrix.size(), tag_width, tag_height, actGrad.getNumCols(), actGrad.getNumRows(), tempMatrix[0].getStride());
//
//singletonTempMem.allocFloatElement(input.getNumCols()*input.getNumRows());
//singletonTempMem.allocFloatElement(max(tag_height*tag_width, out_height*out_width));
//singletonTempMem.allocFloatElement(actGrad.getNumCols()*actGrad.getNumRows());
//float* tempHostInput = singletonTempMem.getPtr(0);
//float* tempHostTarget = singletonTempMem.getPtr(1);
//float* tempHostActGrad = singletonTempMem.getPtr(2);
//cudaMemcpy(tempHostInput, input.getDevData(), input.getNumCols()*input.getNumRows()*sizeof(float), cudaMemcpyDeviceToHost);
//cudaMemcpy(tempHostActGrad, actGrad.getDevData(), actGrad.getNumCols()*actGrad.getNumRows()*sizeof(float), cudaMemcpyDeviceToHost);
//cudaDeviceSynchronize();
//debugVectFuncParamWeightGrad(sizeV, temp, blocks.y, threads.y, blocks.x, threads.x,
// tempHostActGrad, tempHostInput, tempHostTarget, numColors, tag_size, numPixelsPerGroup, numCases,
// input.getStride(), actGrad.getStride(), tempMatrix[0].getStride(), sizeH);
//double sum_host = Sum(tempHostTarget, tag_height*tag_width);
// double sum_act = Sum(tempHostActGrad, actGrad.getNumCols()*actGrad.getNumRows());
//singletonTempMem.reset();
//float suma = actGrad.sum();
//printf("debugVectFuncParamWeightGrad******* sum_host %f sum_act %f suma %f\n", sum_host, sum_act, suma);
//debugVectFuncLinApprox(sizeV, temp, tempHostInput,
// tempHostActGrad, tempHostTarget,
// numPixelsPerGroup, numCases,
// input.getStride(), tempMatrix[0].getStride(), numColors, sizeH);
//float delta = 1e-4;
//float sumLA0 = Sum(tempHostTarget, out_height*out_width);
//temp[1] += delta;
//debugVectFuncLinApprox(sizeV, temp, tempHostInput,
// tempHostActGrad, tempHostTarget,
// numPixelsPerGroup, numCases,
// input.getStride(), tempMatrix[0].getStride(), numColors, sizeH);
//float sumLA1 = Sum(tempHostTarget, out_height*out_width);
//printf("debugVectFunc * s0 %f s1 %f deriv %f\n", sumLA0, sumLA1, (sumLA1-sumLA0)/delta);
//----------
#define ELT_GRAD(SIZE_ARR) \
if(sizeV == SIZE_ARR){\
cudaFuncSetCacheConfig(kVectFuncParamWeightGrad<SIZE_ARR>, cudaFuncCachePreferL1);\
kVectFuncParamWeightGrad<SIZE_ARR><<<blocks, threads, shared_size>>>(actGrad.getDevData(),\
input.getDevData(), (float**)arrayPtr, numColors, tag_size, numPixelsPerGroup, numCases,\
input.getStride(), actGrad.getStride(), tempMatrix[0].getStride(), sizeH);};
ELT_GRAD(1)
ELT_GRAD(2)
ELT_GRAD(3)
ELT_GRAD(4)
ELT_GRAD(6)
ELT_GRAD(8)
ELT_GRAD(12)
ELT_GRAD(16)
#undef ELT_GRAD
//float sumt = tempMatrix[1].sum();
//printf("kVectFuncParamWeightGrad sum_tag %f \n", sumt);
cutilCheckMsg("kVectFuncParamWeightGrad: Kernel execution failed");
} |
2746c127999a22aeb4c8e07567908242265e0272.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************
*** ***
*** Source code generated by kernel2cu.pl ***
*** ***
*** Please do not edit ***
*** ***
*********************************************************************/
#include "vglImage.h"
#include "vglLoadShader.h"
#include "vglContext.h"
#include <iostream>
//kernels
/** vglCudaCopy
Copy of image in cuda context.
*/
// <<<input->getHeight(),384>>> (IO_PBO: VglImage* input, IO_PBO: VglImage* output)
// (input->cudaPtr, output->cudaPtr, input->getWidth(), input->getHeight(), input->nChannels)
template<typename T>
__global__ void global_Copy(T* input, T* output, int w, int h, int nChannels){
int offset = blockIdx.x * nChannels * w;
T* arr_in = input + offset;
T* arr_out = output + offset;
for (int j = threadIdx.x; j < nChannels * w; j += blockDim.x){
arr_out[j] = arr_in[j];
}
}
void vglCudaCopy(VglImage* input, VglImage* output){
if (!input){
printf("vglCudaCopy: Error: input parameter is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
vglCheckContext(input, VGL_CUDA_CONTEXT);
if (!input->cudaPtr){
printf("vglCudaCopy: Error: input->cudaPtr is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
if (!output){
printf("vglCudaCopy: Error: output parameter is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
vglCheckContext(output, VGL_CUDA_CONTEXT);
if (!output->cudaPtr){
printf("vglCudaCopy: Error: output->cudaPtr is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
switch (input->depth){
case (IPL_DEPTH_8U):
hipLaunchKernelGGL(( global_Copy), dim3(input->getHeight()),dim3(384), 0, 0, (unsigned char*)input->cudaPtr, (unsigned char*)output->cudaPtr, input->getWidth(), input->getHeight(), input->nChannels);
break;
default:
printf("vglCudaCopy: Error: unsupported img->depth = %d in file '%s' in line %i.\n",
input->depth, __FILE__, __LINE__);
exit(1);
}
vglSetContext(input, VGL_CUDA_CONTEXT);
vglSetContext(output, VGL_CUDA_CONTEXT);
}
/** vglCudaInvert
Inverts image stored in cuda context.
*/
// <<<input->getWidth(),384>>> (IN_PBO: VglImage* input, OUT_PBO: VglImage* output)
// (input->cudaPtr, output->cudaPtr, input->getWidth(), input->getHeight(), input->nChannels)
template<typename T>
__global__ void global_Invert(T* input, T* output, int w, int h, int nChannels){
int offset = blockIdx.x * nChannels * w;
T* array_in = input + offset;
T* array_out = output + offset;
for (int j = threadIdx.x; j < nChannels * w; j += blockDim.x){
array_out[j] = -array_in[j];
}
}
void vglCudaInvert(VglImage* input, VglImage* output){
if (!input){
printf("vglCudaInvert: Error: input parameter is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
vglCheckContext(input, VGL_CUDA_CONTEXT);
if (!input->cudaPtr){
printf("vglCudaInvert: Error: input->cudaPtr is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
if (!output){
printf("vglCudaInvert: Error: output parameter is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
vglCheckContextForOutput(output, VGL_CUDA_CONTEXT);
if (!output->cudaPtr){
printf("vglCudaInvert: Error: output->cudaPtr is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
switch (input->depth){
case (IPL_DEPTH_8U):
hipLaunchKernelGGL(( global_Invert), dim3(input->getWidth()),dim3(384), 0, 0, (unsigned char*)input->cudaPtr, (unsigned char*)output->cudaPtr, input->getWidth(), input->getHeight(), input->nChannels);
break;
default:
printf("vglCudaInvert: Error: unsupported img->depth = %d in file '%s' in line %i.\n",
input->depth, __FILE__, __LINE__);
exit(1);
}
vglSetContext(output, VGL_CUDA_CONTEXT);
}
/** vglCudaInvertOnPlace
Inverts image, stored in cuda context, on place.
*/
// <<<input->getHeight(),384>>> (IO_PBO: VglImage* input)
// (input->cudaPtr, input->getWidth(), input->getHeight(), input->nChannels)
template<typename T>
__global__ void global_InvertOnPlace(T* input, int w, int h, int nChannels){
T* array = input + blockIdx.x * nChannels * w;
for (int j = threadIdx.x; j < nChannels * w; j += blockDim.x){
array[j] = -array[j];
}
}
void vglCudaInvertOnPlace(VglImage* input){
if (!input){
printf("vglCudaInvertOnPlace: Error: input parameter is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
vglCheckContext(input, VGL_CUDA_CONTEXT);
if (!input->cudaPtr){
printf("vglCudaInvertOnPlace: Error: input->cudaPtr is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
switch (input->depth){
case (IPL_DEPTH_8U):
hipLaunchKernelGGL(( global_InvertOnPlace), dim3(input->getHeight()),dim3(384), 0, 0, (unsigned char*)input->cudaPtr, input->getWidth(), input->getHeight(), input->nChannels);
break;
default:
printf("vglCudaInvertOnPlace: Error: unsupported img->depth = %d in file '%s' in line %i.\n",
input->depth, __FILE__, __LINE__);
exit(1);
}
vglSetContext(input, VGL_CUDA_CONTEXT);
}
| 2746c127999a22aeb4c8e07567908242265e0272.cu |
/*********************************************************************
*** ***
*** Source code generated by kernel2cu.pl ***
*** ***
*** Please do not edit ***
*** ***
*********************************************************************/
#include "vglImage.h"
#include "vglLoadShader.h"
#include "vglContext.h"
#include <iostream>
//kernels
/** vglCudaCopy
Copy of image in cuda context.
*/
// <<<input->getHeight(),384>>> (IO_PBO: VglImage* input, IO_PBO: VglImage* output)
// (input->cudaPtr, output->cudaPtr, input->getWidth(), input->getHeight(), input->nChannels)
template<typename T>
__global__ void global_Copy(T* input, T* output, int w, int h, int nChannels){
int offset = blockIdx.x * nChannels * w;
T* arr_in = input + offset;
T* arr_out = output + offset;
for (int j = threadIdx.x; j < nChannels * w; j += blockDim.x){
arr_out[j] = arr_in[j];
}
}
void vglCudaCopy(VglImage* input, VglImage* output){
if (!input){
printf("vglCudaCopy: Error: input parameter is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
vglCheckContext(input, VGL_CUDA_CONTEXT);
if (!input->cudaPtr){
printf("vglCudaCopy: Error: input->cudaPtr is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
if (!output){
printf("vglCudaCopy: Error: output parameter is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
vglCheckContext(output, VGL_CUDA_CONTEXT);
if (!output->cudaPtr){
printf("vglCudaCopy: Error: output->cudaPtr is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
switch (input->depth){
case (IPL_DEPTH_8U):
global_Copy<<<input->getHeight(),384>>>((unsigned char*)input->cudaPtr, (unsigned char*)output->cudaPtr, input->getWidth(), input->getHeight(), input->nChannels);
break;
default:
printf("vglCudaCopy: Error: unsupported img->depth = %d in file '%s' in line %i.\n",
input->depth, __FILE__, __LINE__);
exit(1);
}
vglSetContext(input, VGL_CUDA_CONTEXT);
vglSetContext(output, VGL_CUDA_CONTEXT);
}
/** vglCudaInvert
Inverts image stored in cuda context.
*/
// <<<input->getWidth(),384>>> (IN_PBO: VglImage* input, OUT_PBO: VglImage* output)
// (input->cudaPtr, output->cudaPtr, input->getWidth(), input->getHeight(), input->nChannels)
template<typename T>
__global__ void global_Invert(T* input, T* output, int w, int h, int nChannels){
int offset = blockIdx.x * nChannels * w;
T* array_in = input + offset;
T* array_out = output + offset;
for (int j = threadIdx.x; j < nChannels * w; j += blockDim.x){
array_out[j] = -array_in[j];
}
}
void vglCudaInvert(VglImage* input, VglImage* output){
if (!input){
printf("vglCudaInvert: Error: input parameter is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
vglCheckContext(input, VGL_CUDA_CONTEXT);
if (!input->cudaPtr){
printf("vglCudaInvert: Error: input->cudaPtr is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
if (!output){
printf("vglCudaInvert: Error: output parameter is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
vglCheckContextForOutput(output, VGL_CUDA_CONTEXT);
if (!output->cudaPtr){
printf("vglCudaInvert: Error: output->cudaPtr is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
switch (input->depth){
case (IPL_DEPTH_8U):
global_Invert<<<input->getWidth(),384>>>((unsigned char*)input->cudaPtr, (unsigned char*)output->cudaPtr, input->getWidth(), input->getHeight(), input->nChannels);
break;
default:
printf("vglCudaInvert: Error: unsupported img->depth = %d in file '%s' in line %i.\n",
input->depth, __FILE__, __LINE__);
exit(1);
}
vglSetContext(output, VGL_CUDA_CONTEXT);
}
/** vglCudaInvertOnPlace
Inverts image, stored in cuda context, on place.
*/
// <<<input->getHeight(),384>>> (IO_PBO: VglImage* input)
// (input->cudaPtr, input->getWidth(), input->getHeight(), input->nChannels)
template<typename T>
__global__ void global_InvertOnPlace(T* input, int w, int h, int nChannels){
T* array = input + blockIdx.x * nChannels * w;
for (int j = threadIdx.x; j < nChannels * w; j += blockDim.x){
array[j] = -array[j];
}
}
void vglCudaInvertOnPlace(VglImage* input){
if (!input){
printf("vglCudaInvertOnPlace: Error: input parameter is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
vglCheckContext(input, VGL_CUDA_CONTEXT);
if (!input->cudaPtr){
printf("vglCudaInvertOnPlace: Error: input->cudaPtr is null in file '%s' in line %i.\n",
__FILE__, __LINE__);
exit(1);
}
switch (input->depth){
case (IPL_DEPTH_8U):
global_InvertOnPlace<<<input->getHeight(),384>>>((unsigned char*)input->cudaPtr, input->getWidth(), input->getHeight(), input->nChannels);
break;
default:
printf("vglCudaInvertOnPlace: Error: unsupported img->depth = %d in file '%s' in line %i.\n",
input->depth, __FILE__, __LINE__);
exit(1);
}
vglSetContext(input, VGL_CUDA_CONTEXT);
}
|
6c700866b6b73c4aa90e4e3716b74c7fdd5b6c08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <algorithm>
#include <cmath>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype tanh(const Dtype x) {
return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1);
}
template <typename Dtype>
__global__ void ClipAdd(const int nthreads, const int dim, int t,
const Dtype* clip, const Dtype* add_vec, Dtype* data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
data[index] += clip_t * add_vec[index];
}
}
template <typename Dtype>
__global__ void ActivationForward(const int nthreads, const int H,
const Dtype* pre_gate, Dtype* gate) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % (4*H);
gate[index] = d < 3*H ? sigmoid(pre_gate[index]) : tanh(pre_gate[index]);
}
}
template <typename Dtype>
__global__ void LSTMForward(const int nthreads, const int H, const int t,
const Dtype* c_prev, const Dtype* gate, const Dtype* clip,
Dtype* c_t, Dtype* h_t) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / H;
const int d = index % H;
const Dtype* offset = gate + 4*H*n;
const Dtype i_t = offset[d];
const Dtype f_t = offset[H + d];
const Dtype o_t = offset[2*H + d];
const Dtype g_t = offset[3*H + d];
const Dtype c_t_1 = c_prev[index];
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
c_t[index] = clip_t * f_t * c_t_1 + i_t * g_t;
h_t[index] = o_t * tanh(c_t[index]);
}
}
template <typename Dtype>
__global__ void LSTMBackward(const int nthreads, const int H, const int t,
const Dtype* c_prev, const Dtype* gate, const Dtype* c_t,
const Dtype* clip, Dtype* dc_t, const Dtype* dh_t,
Dtype* dc_prev, Dtype* gate_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / H;
const int d = index % H;
const Dtype* gate_t = gate + 4*H*n;
const Dtype i_t = gate_t[d];
const Dtype f_t = gate_t[H + d];
const Dtype o_t = gate_t[2*H + d];
const Dtype g_t = gate_t[3*H + d];
const Dtype c_t_1 = c_prev[index];
const Dtype c = c_t[index];
const Dtype tanh_c = tanh(c);
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
Dtype* dc_t_1 = dc_prev + index;
Dtype* gate_diff_t = gate_diff + 4*H*n;
Dtype* di_t = gate_diff_t + d;
Dtype* df_t = gate_diff_t + H + d;
Dtype* do_t = gate_diff_t + 2*H + d;
Dtype* dg_t = gate_diff_t + 3*H + d;
// Output gate : tanh(c(t)) * h_diff(t)
*do_t = dh_t[index] * tanh_c;
// Cell state : o(t) * tanh'(c(t)) * h_diff(t) + f(t+1) * c_diff(t+1)
dc_t[index] += dh_t[index] * o_t * (Dtype(1) - tanh_c * tanh_c);
// c_diff(t-1) += f(t) * c_diff(t)
*dc_t_1 = clip_t * dc_t[index] * f_t;
// Forget gate : c(t-1) * c_diff(t)
*df_t = clip_t * dc_t[index] * c_t_1;
// Input gate : g(t) * c_diff(t)
*di_t = dc_t[index] * g_t;
// Input modulation gate : i(t) * c_diff(t)
*dg_t = dc_t[index] * i_t;
}
}
template <typename Dtype>
__global__ void ActivationBackward(const int nthreads, const int H,
const Dtype clip_threshold, const Dtype* gate, const Dtype* gate_diff,
Dtype* pre_gate_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % (4 * H);
const Dtype gate_val = gate[index];
if (d < 3 * H) {
pre_gate_diff[index] = gate_diff[index] * gate_val * (Dtype(1) - gate_val);
} else {
pre_gate_diff[index] = gate_diff[index] * (Dtype(1) - gate_val * gate_val);
}
if (clip_threshold > Dtype(0)) {
if (pre_gate_diff[index] < -clip_threshold) {
pre_gate_diff[index] = -clip_threshold;
}
else if (pre_gate_diff[index] > clip_threshold) {
pre_gate_diff[index] = clip_threshold;
}
}
}
}
template <typename Dtype>
void LstmLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK_EQ(top[0]->gpu_data(), top_.gpu_data());
Dtype* top_data = top_.mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* clip = NULL;
if (bottom.size() > 1) {
clip = bottom[1]->gpu_data();
CHECK_EQ(bottom[1]->num(), bottom[1]->count());
}
const Dtype* weight_i = this->blobs_[0]->gpu_data();
const Dtype* weight_h = this->blobs_[1]->gpu_data();
const Dtype* bias = this->blobs_[2]->gpu_data();
Dtype* pre_gate_data = pre_gate_.mutable_gpu_data();
Dtype* gate_data = gate_.mutable_gpu_data();
Dtype* cell_data = cell_.mutable_gpu_data();
// Initialize previous state
if (clip) {
caffe_copy(c_0_.count(), c_T_.gpu_data(), c_0_.mutable_gpu_data());
caffe_copy(h_0_.count(), h_T_.gpu_data(), h_0_.mutable_gpu_data());
}
else {
caffe_gpu_set(c_0_.count(), Dtype(0.), c_0_.mutable_gpu_data());
caffe_gpu_set(h_0_.count(), Dtype(0.), h_0_.mutable_gpu_data());
}
// Compute input to hidden forward propagation
caffe_gpu_gemm(CblasNoTrans, CblasTrans, T_*N_, 4*H_, I_, Dtype(1.),
bottom_data, weight_i, Dtype(0.), pre_gate_data);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, 4*H_, 1, Dtype(1.),
bias_multiplier_.gpu_data(), bias, Dtype(1.), pre_gate_data);
// Compute recurrent forward propagation
for (int t = 0; t < T_; ++t) {
Dtype* h_t = top_data + top_.offset(t);
Dtype* c_t = cell_data + cell_.offset(t);
Dtype* pre_gate_t = pre_gate_data + pre_gate_.offset(t);
Dtype* gate_t = gate_data + gate_.offset(t);
const Dtype* clip_t = clip ? clip + bottom[1]->offset(t) : NULL;
const Dtype* h_t_1 = t > 0 ? (h_t - top_.offset(1)) : h_0_.gpu_data();
const Dtype* c_t_1 = t > 0 ? (c_t - cell_.offset(1)) : c_0_.gpu_data();
caffe_gpu_gemm(CblasNoTrans, CblasTrans, N_, 4*H_, H_, Dtype(1.),
h_t_1, weight_h, Dtype(0.), h_to_gate_.mutable_gpu_data());
hipLaunchKernelGGL(( ClipAdd<Dtype>), dim3(CAFFE_GET_BLOCKS(4*N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
4*N_*H_, 4*H_, t, clip_t, h_to_gate_.gpu_data(), pre_gate_t);
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( ActivationForward<Dtype>), dim3(CAFFE_GET_BLOCKS(4*N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
4*N_*H_, H_, pre_gate_t, gate_t);
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( LSTMForward<Dtype>), dim3(CAFFE_GET_BLOCKS(N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N_*H_, H_, t, c_t_1, gate_t, clip_t, c_t, h_t);
CUDA_POST_KERNEL_CHECK;
}
// Preserve cell state and output value for truncated BPTT
caffe_copy(N_*H_, cell_data + cell_.offset(T_-1), c_T_.mutable_gpu_data());
caffe_copy(N_*H_, top_data + top_.offset(T_-1), h_T_.mutable_gpu_data());
}
template <typename Dtype>
void LstmLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_data = top_.gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* clip = NULL;
if (bottom.size() > 1) {
clip = bottom[1]->gpu_data();
CHECK_EQ(bottom[1]->num(), bottom[1]->count());
}
const Dtype* weight_i = this->blobs_[0]->gpu_data();
const Dtype* weight_h = this->blobs_[1]->gpu_data();
const Dtype* gate_data = gate_.gpu_data();
const Dtype* cell_data = cell_.gpu_data();
Dtype* top_diff = top_.mutable_gpu_diff();
Dtype* pre_gate_diff = pre_gate_.mutable_gpu_diff();
Dtype* gate_diff = gate_.mutable_gpu_diff();
Dtype* cell_diff = cell_.mutable_gpu_diff();
caffe_copy(N_*H_, c_T_.gpu_diff(), cell_diff + cell_.offset(T_-1));
for (int t = T_-1; t >= 0; --t) {
Dtype* dh_t = top_diff + top_.offset(t);
Dtype* dc_t = cell_diff + cell_.offset(t);
Dtype* gate_diff_t = gate_diff + gate_.offset(t);
Dtype* pre_gate_diff_t = pre_gate_diff + pre_gate_.offset(t);
Dtype* dh_t_1 = t > 0 ? top_diff + top_.offset(t-1) : h_0_.mutable_gpu_diff();
Dtype* dc_t_1 = t > 0 ? cell_diff + cell_.offset(t-1) : c_0_.mutable_gpu_diff();
const Dtype* clip_t = clip ? clip + bottom[1]->offset(t) : NULL;
const Dtype* c_t = cell_data + cell_.offset(t);
const Dtype* c_t_1 = t > 0 ? cell_data + cell_.offset(t-1) : c_0_.gpu_data();
const Dtype* gate_t = gate_data + gate_.offset(t);
hipLaunchKernelGGL(( LSTMBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N_*H_, H_, t, c_t_1, gate_t, c_t, clip_t, dc_t, dh_t, dc_t_1, gate_diff_t);
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( ActivationBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(4*N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
4*N_*H_, H_, clipping_threshold_, gate_t, gate_diff_t, pre_gate_diff_t);
CUDA_POST_KERNEL_CHECK;
// Backprop errors to the previous time step
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, N_, H_, 4*H_,
Dtype(1.), pre_gate_diff_t, weight_h, Dtype(0.), h_to_h_.mutable_gpu_data());
hipLaunchKernelGGL(( ClipAdd<Dtype>), dim3(CAFFE_GET_BLOCKS(N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N_*H_, H_, t, clip_t, h_to_h_.gpu_data(), dh_t_1);
}
if (this->param_propagate_down_[0]) {
// Gradient w.r.t. input-to-hidden weight
caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, I_, T_*N_, Dtype(1.),
pre_gate_diff, bottom_data, Dtype(1.), this->blobs_[0]->mutable_gpu_diff());
}
if (this->param_propagate_down_[1]) {
// Gradient w.r.t. hidden-to-hidden weight
caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, H_, (T_-1)*N_, Dtype(1.),
pre_gate_diff + pre_gate_.offset(1), top_data,
Dtype(1.), this->blobs_[1]->mutable_gpu_diff());
// Add Gradient from previous time-step
caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, H_, 1, Dtype(1.),
pre_gate_diff, h_0_.gpu_data(),
Dtype(1.), this->blobs_[1]->mutable_gpu_diff());
}
if (this->param_propagate_down_[2]) {
// Gradient w.r.t. bias
caffe_gpu_gemv(CblasTrans, T_*N_, 4*H_, Dtype(1.), pre_gate_diff,
bias_multiplier_.gpu_data(), Dtype(1.),
this->blobs_[2]->mutable_gpu_diff());
}
if (propagate_down[0]) {
// Gradient w.r.t. bottom data
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, I_, 4*H_, Dtype(1.),
pre_gate_diff, weight_i, Dtype(0.), bottom[0]->mutable_gpu_diff());
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LstmLayer);
} // namespace caffe
| 6c700866b6b73c4aa90e4e3716b74c7fdd5b6c08.cu | #include <vector>
#include <algorithm>
#include <cmath>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype tanh(const Dtype x) {
return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1);
}
template <typename Dtype>
__global__ void ClipAdd(const int nthreads, const int dim, int t,
const Dtype* clip, const Dtype* add_vec, Dtype* data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
data[index] += clip_t * add_vec[index];
}
}
template <typename Dtype>
__global__ void ActivationForward(const int nthreads, const int H,
const Dtype* pre_gate, Dtype* gate) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % (4*H);
gate[index] = d < 3*H ? sigmoid(pre_gate[index]) : tanh(pre_gate[index]);
}
}
template <typename Dtype>
__global__ void LSTMForward(const int nthreads, const int H, const int t,
const Dtype* c_prev, const Dtype* gate, const Dtype* clip,
Dtype* c_t, Dtype* h_t) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / H;
const int d = index % H;
const Dtype* offset = gate + 4*H*n;
const Dtype i_t = offset[d];
const Dtype f_t = offset[H + d];
const Dtype o_t = offset[2*H + d];
const Dtype g_t = offset[3*H + d];
const Dtype c_t_1 = c_prev[index];
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
c_t[index] = clip_t * f_t * c_t_1 + i_t * g_t;
h_t[index] = o_t * tanh(c_t[index]);
}
}
template <typename Dtype>
__global__ void LSTMBackward(const int nthreads, const int H, const int t,
const Dtype* c_prev, const Dtype* gate, const Dtype* c_t,
const Dtype* clip, Dtype* dc_t, const Dtype* dh_t,
Dtype* dc_prev, Dtype* gate_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / H;
const int d = index % H;
const Dtype* gate_t = gate + 4*H*n;
const Dtype i_t = gate_t[d];
const Dtype f_t = gate_t[H + d];
const Dtype o_t = gate_t[2*H + d];
const Dtype g_t = gate_t[3*H + d];
const Dtype c_t_1 = c_prev[index];
const Dtype c = c_t[index];
const Dtype tanh_c = tanh(c);
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
Dtype* dc_t_1 = dc_prev + index;
Dtype* gate_diff_t = gate_diff + 4*H*n;
Dtype* di_t = gate_diff_t + d;
Dtype* df_t = gate_diff_t + H + d;
Dtype* do_t = gate_diff_t + 2*H + d;
Dtype* dg_t = gate_diff_t + 3*H + d;
// Output gate : tanh(c(t)) * h_diff(t)
*do_t = dh_t[index] * tanh_c;
// Cell state : o(t) * tanh'(c(t)) * h_diff(t) + f(t+1) * c_diff(t+1)
dc_t[index] += dh_t[index] * o_t * (Dtype(1) - tanh_c * tanh_c);
// c_diff(t-1) += f(t) * c_diff(t)
*dc_t_1 = clip_t * dc_t[index] * f_t;
// Forget gate : c(t-1) * c_diff(t)
*df_t = clip_t * dc_t[index] * c_t_1;
// Input gate : g(t) * c_diff(t)
*di_t = dc_t[index] * g_t;
// Input modulation gate : i(t) * c_diff(t)
*dg_t = dc_t[index] * i_t;
}
}
template <typename Dtype>
__global__ void ActivationBackward(const int nthreads, const int H,
const Dtype clip_threshold, const Dtype* gate, const Dtype* gate_diff,
Dtype* pre_gate_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % (4 * H);
const Dtype gate_val = gate[index];
if (d < 3 * H) {
pre_gate_diff[index] = gate_diff[index] * gate_val * (Dtype(1) - gate_val);
} else {
pre_gate_diff[index] = gate_diff[index] * (Dtype(1) - gate_val * gate_val);
}
if (clip_threshold > Dtype(0)) {
if (pre_gate_diff[index] < -clip_threshold) {
pre_gate_diff[index] = -clip_threshold;
}
else if (pre_gate_diff[index] > clip_threshold) {
pre_gate_diff[index] = clip_threshold;
}
}
}
}
template <typename Dtype>
void LstmLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK_EQ(top[0]->gpu_data(), top_.gpu_data());
Dtype* top_data = top_.mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* clip = NULL;
if (bottom.size() > 1) {
clip = bottom[1]->gpu_data();
CHECK_EQ(bottom[1]->num(), bottom[1]->count());
}
const Dtype* weight_i = this->blobs_[0]->gpu_data();
const Dtype* weight_h = this->blobs_[1]->gpu_data();
const Dtype* bias = this->blobs_[2]->gpu_data();
Dtype* pre_gate_data = pre_gate_.mutable_gpu_data();
Dtype* gate_data = gate_.mutable_gpu_data();
Dtype* cell_data = cell_.mutable_gpu_data();
// Initialize previous state
if (clip) {
caffe_copy(c_0_.count(), c_T_.gpu_data(), c_0_.mutable_gpu_data());
caffe_copy(h_0_.count(), h_T_.gpu_data(), h_0_.mutable_gpu_data());
}
else {
caffe_gpu_set(c_0_.count(), Dtype(0.), c_0_.mutable_gpu_data());
caffe_gpu_set(h_0_.count(), Dtype(0.), h_0_.mutable_gpu_data());
}
// Compute input to hidden forward propagation
caffe_gpu_gemm(CblasNoTrans, CblasTrans, T_*N_, 4*H_, I_, Dtype(1.),
bottom_data, weight_i, Dtype(0.), pre_gate_data);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, 4*H_, 1, Dtype(1.),
bias_multiplier_.gpu_data(), bias, Dtype(1.), pre_gate_data);
// Compute recurrent forward propagation
for (int t = 0; t < T_; ++t) {
Dtype* h_t = top_data + top_.offset(t);
Dtype* c_t = cell_data + cell_.offset(t);
Dtype* pre_gate_t = pre_gate_data + pre_gate_.offset(t);
Dtype* gate_t = gate_data + gate_.offset(t);
const Dtype* clip_t = clip ? clip + bottom[1]->offset(t) : NULL;
const Dtype* h_t_1 = t > 0 ? (h_t - top_.offset(1)) : h_0_.gpu_data();
const Dtype* c_t_1 = t > 0 ? (c_t - cell_.offset(1)) : c_0_.gpu_data();
caffe_gpu_gemm(CblasNoTrans, CblasTrans, N_, 4*H_, H_, Dtype(1.),
h_t_1, weight_h, Dtype(0.), h_to_gate_.mutable_gpu_data());
ClipAdd<Dtype><<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
4*N_*H_, 4*H_, t, clip_t, h_to_gate_.gpu_data(), pre_gate_t);
CUDA_POST_KERNEL_CHECK;
ActivationForward<Dtype><<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
4*N_*H_, H_, pre_gate_t, gate_t);
CUDA_POST_KERNEL_CHECK;
LSTMForward<Dtype><<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
N_*H_, H_, t, c_t_1, gate_t, clip_t, c_t, h_t);
CUDA_POST_KERNEL_CHECK;
}
// Preserve cell state and output value for truncated BPTT
caffe_copy(N_*H_, cell_data + cell_.offset(T_-1), c_T_.mutable_gpu_data());
caffe_copy(N_*H_, top_data + top_.offset(T_-1), h_T_.mutable_gpu_data());
}
template <typename Dtype>
void LstmLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_data = top_.gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* clip = NULL;
if (bottom.size() > 1) {
clip = bottom[1]->gpu_data();
CHECK_EQ(bottom[1]->num(), bottom[1]->count());
}
const Dtype* weight_i = this->blobs_[0]->gpu_data();
const Dtype* weight_h = this->blobs_[1]->gpu_data();
const Dtype* gate_data = gate_.gpu_data();
const Dtype* cell_data = cell_.gpu_data();
Dtype* top_diff = top_.mutable_gpu_diff();
Dtype* pre_gate_diff = pre_gate_.mutable_gpu_diff();
Dtype* gate_diff = gate_.mutable_gpu_diff();
Dtype* cell_diff = cell_.mutable_gpu_diff();
caffe_copy(N_*H_, c_T_.gpu_diff(), cell_diff + cell_.offset(T_-1));
for (int t = T_-1; t >= 0; --t) {
Dtype* dh_t = top_diff + top_.offset(t);
Dtype* dc_t = cell_diff + cell_.offset(t);
Dtype* gate_diff_t = gate_diff + gate_.offset(t);
Dtype* pre_gate_diff_t = pre_gate_diff + pre_gate_.offset(t);
Dtype* dh_t_1 = t > 0 ? top_diff + top_.offset(t-1) : h_0_.mutable_gpu_diff();
Dtype* dc_t_1 = t > 0 ? cell_diff + cell_.offset(t-1) : c_0_.mutable_gpu_diff();
const Dtype* clip_t = clip ? clip + bottom[1]->offset(t) : NULL;
const Dtype* c_t = cell_data + cell_.offset(t);
const Dtype* c_t_1 = t > 0 ? cell_data + cell_.offset(t-1) : c_0_.gpu_data();
const Dtype* gate_t = gate_data + gate_.offset(t);
LSTMBackward<Dtype><<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
N_*H_, H_, t, c_t_1, gate_t, c_t, clip_t, dc_t, dh_t, dc_t_1, gate_diff_t);
CUDA_POST_KERNEL_CHECK;
ActivationBackward<Dtype><<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
4*N_*H_, H_, clipping_threshold_, gate_t, gate_diff_t, pre_gate_diff_t);
CUDA_POST_KERNEL_CHECK;
// Backprop errors to the previous time step
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, N_, H_, 4*H_,
Dtype(1.), pre_gate_diff_t, weight_h, Dtype(0.), h_to_h_.mutable_gpu_data());
ClipAdd<Dtype><<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
N_*H_, H_, t, clip_t, h_to_h_.gpu_data(), dh_t_1);
}
if (this->param_propagate_down_[0]) {
// Gradient w.r.t. input-to-hidden weight
caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, I_, T_*N_, Dtype(1.),
pre_gate_diff, bottom_data, Dtype(1.), this->blobs_[0]->mutable_gpu_diff());
}
if (this->param_propagate_down_[1]) {
// Gradient w.r.t. hidden-to-hidden weight
caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, H_, (T_-1)*N_, Dtype(1.),
pre_gate_diff + pre_gate_.offset(1), top_data,
Dtype(1.), this->blobs_[1]->mutable_gpu_diff());
// Add Gradient from previous time-step
caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, H_, 1, Dtype(1.),
pre_gate_diff, h_0_.gpu_data(),
Dtype(1.), this->blobs_[1]->mutable_gpu_diff());
}
if (this->param_propagate_down_[2]) {
// Gradient w.r.t. bias
caffe_gpu_gemv(CblasTrans, T_*N_, 4*H_, Dtype(1.), pre_gate_diff,
bias_multiplier_.gpu_data(), Dtype(1.),
this->blobs_[2]->mutable_gpu_diff());
}
if (propagate_down[0]) {
// Gradient w.r.t. bottom data
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, I_, 4*H_, Dtype(1.),
pre_gate_diff, weight_i, Dtype(0.), bottom[0]->mutable_gpu_diff());
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LstmLayer);
} // namespace caffe
|
9f60b5777ce9a623b7eef16da0ab6512867248e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define T_PER_BLOCK 16
#define MINF __int_as_float(0xff800000)
__global__ void resampleFloat_Kernel(float* d_output, unsigned int outputWidth, unsigned int outputHeight, const float* d_input, unsigned int inputWidth, unsigned int inputHeight)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth-1) / (float)(outputWidth-1);
const float scaleHeight = (float)(inputHeight-1) / (float)(outputHeight-1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight) {
d_output[y*outputWidth + x] = d_input[yInput*inputWidth + xInput];
//d_output[y*outputWidth + x] = bilinearInterpolationFloat(x*scaleWidth, y*scaleHeight, d_input, inputWidth, inputHeight);
}
}
} | 9f60b5777ce9a623b7eef16da0ab6512867248e1.cu | #include "includes.h"
#define T_PER_BLOCK 16
#define MINF __int_as_float(0xff800000)
__global__ void resampleFloat_Kernel(float* d_output, unsigned int outputWidth, unsigned int outputHeight, const float* d_input, unsigned int inputWidth, unsigned int inputHeight)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth-1) / (float)(outputWidth-1);
const float scaleHeight = (float)(inputHeight-1) / (float)(outputHeight-1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight) {
d_output[y*outputWidth + x] = d_input[yInput*inputWidth + xInput];
//d_output[y*outputWidth + x] = bilinearInterpolationFloat(x*scaleWidth, y*scaleHeight, d_input, inputWidth, inputHeight);
}
}
} |
7a2d97bbb246ad898639192367f01f5d5d413772.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include "THHTensor.hpp"
#include "linear_upsampling.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHAtomics.cuh"
template<typename Dtype, typename Acctype>
__global__ void nearest_neighbor_4d_kernel(
const int n,
const THCDeviceTensor<Dtype, 4> data1,
THCDeviceTensor<Dtype, 4> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int height1 = data1.getSize(2);
const int width1 = data1.getSize(3);
const int height2 = data2.getSize(2);
const int width2 = data2.getSize(3);
const float height_scale = (float) height1 / (float) height2;
const float width_scale = (float) width1 / (float) width2;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][h1][w1];
data2[n][c][h2][w2] = val;
}
}
return;
}
//
const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, height1);
const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, width1);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][h1][w1];
data2[n][c][h2][w2] = val;
}
}
}
}
// Backward operation
template <typename Dtype, typename Acctype>
__global__ void nearest_neighbor_4d_kernel_backward(
const int n,
THCDeviceTensor<Dtype, 4> data1,
const THCDeviceTensor<Dtype, 4> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int height1 = data1.getSize(2);
const int width1 = data1.getSize(3);
const int height2 = data2.getSize(2);
const int width2 = data2.getSize(3);
const float height_scale = (float) height1 / (float) height2;
const float width_scale = (float) width1 / (float) width2;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const Dtype val = data2[n][c][h2][w2];
data1[n][c][h1][w1] = val;
}
}
return;
}
//
const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, height1);
const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, width1);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const Dtype d2val = data2[n][c][h2][w2];
atomicAdd(data1[n][c][h1][w1].data(), d2val);
}
}
}
}
#include "generic/SpatialUpSamplingNearest.cu"
#include "THHGenerateFloatTypes.h"
| 7a2d97bbb246ad898639192367f01f5d5d413772.cu | #include "THCUNN.h"
#include "common.h"
#include "THCTensor.hpp"
#include "linear_upsampling.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCAtomics.cuh"
template<typename Dtype, typename Acctype>
__global__ void nearest_neighbor_4d_kernel(
const int n,
const THCDeviceTensor<Dtype, 4> data1,
THCDeviceTensor<Dtype, 4> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int height1 = data1.getSize(2);
const int width1 = data1.getSize(3);
const int height2 = data2.getSize(2);
const int width2 = data2.getSize(3);
const float height_scale = (float) height1 / (float) height2;
const float width_scale = (float) width1 / (float) width2;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][h1][w1];
data2[n][c][h2][w2] = val;
}
}
return;
}
//
const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, height1);
const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, width1);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][h1][w1];
data2[n][c][h2][w2] = val;
}
}
}
}
// Backward operation
template <typename Dtype, typename Acctype>
__global__ void nearest_neighbor_4d_kernel_backward(
const int n,
THCDeviceTensor<Dtype, 4> data1,
const THCDeviceTensor<Dtype, 4> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int height1 = data1.getSize(2);
const int width1 = data1.getSize(3);
const int height2 = data2.getSize(2);
const int width2 = data2.getSize(3);
const float height_scale = (float) height1 / (float) height2;
const float width_scale = (float) width1 / (float) width2;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const Dtype val = data2[n][c][h2][w2];
data1[n][c][h1][w1] = val;
}
}
return;
}
//
const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, height1);
const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, width1);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const Dtype d2val = data2[n][c][h2][w2];
atomicAdd(data1[n][c][h1][w1].data(), d2val);
}
}
}
}
#include "generic/SpatialUpSamplingNearest.cu"
#include "THCGenerateFloatTypes.h"
|
edf0f03eb86d567405275f2c347d60575a9d5c85.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// System includes.
#include <stdio.h>
#include <iostream>
// STL.
#include <vector>
// CUDA runtime.
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA.
#include <helper_functions.h>
#include <helper_cuda.h>
typedef unsigned int uint;
using std::cout;
using std::endl;
using std::vector;
__device__ float multiplyByTwo(float number)
{
return number * 2.0f;
}
__global__ void someInternalKernel(float *v, uint size)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size)
{
v[tid] = v[tid]+10;
}
}
void internalFunctionLaunchingKernel(int argc, const char **argv)
{
try
{
int devID;
//hipError_t error;
// This will pick the best possible CUDA capable device.
devID = findCudaDevice(argc, (const char **)argv);
// Create host vector.
const uint kVectorSize = 1000;
vector<float> hVector(kVectorSize);
for (uint i = 0; i < kVectorSize; ++i)
{
hVector[i] = rand() / static_cast<float>(RAND_MAX);
}
// Create and populate device vector.
float *dVector;
checkCudaErrors(hipMalloc(&dVector, kVectorSize * sizeof(float)));
checkCudaErrors(hipMemcpy(dVector,
&hVector[0],
kVectorSize * sizeof(float),
hipMemcpyHostToDevice));
// Kernel configuration, where a one-dimensional
// grid and one-dimensional blocks are configured.
const int nThreads = 1024;
const int nBlocks = 1;
dim3 dimGrid(nBlocks);
dim3 dimBlock(nThreads);
someInternalKernel << <dimGrid, dimBlock >> >
(dVector, kVectorSize);
checkCudaErrors(hipGetLastError());
// Download results.
vector<float> hResultVector(kVectorSize);
checkCudaErrors(hipMemcpy(&hResultVector[0],
dVector,
kVectorSize * sizeof(float),
hipMemcpyDeviceToHost));
// Free resources.
if (dVector) checkCudaErrors(hipFree(dVector));
}
catch (...)
{
cout << "Error occured, exiting..." << endl;
exit(EXIT_FAILURE);
}
}
| edf0f03eb86d567405275f2c347d60575a9d5c85.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// System includes.
#include <stdio.h>
#include <iostream>
// STL.
#include <vector>
// CUDA runtime.
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA.
#include <helper_functions.h>
#include <helper_cuda.h>
typedef unsigned int uint;
using std::cout;
using std::endl;
using std::vector;
__device__ float multiplyByTwo(float number)
{
return number * 2.0f;
}
__global__ void someInternalKernel(float *v, uint size)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size)
{
v[tid] = v[tid]+10;
}
}
void internalFunctionLaunchingKernel(int argc, const char **argv)
{
try
{
int devID;
//cudaError_t error;
// This will pick the best possible CUDA capable device.
devID = findCudaDevice(argc, (const char **)argv);
// Create host vector.
const uint kVectorSize = 1000;
vector<float> hVector(kVectorSize);
for (uint i = 0; i < kVectorSize; ++i)
{
hVector[i] = rand() / static_cast<float>(RAND_MAX);
}
// Create and populate device vector.
float *dVector;
checkCudaErrors(cudaMalloc(&dVector, kVectorSize * sizeof(float)));
checkCudaErrors(cudaMemcpy(dVector,
&hVector[0],
kVectorSize * sizeof(float),
cudaMemcpyHostToDevice));
// Kernel configuration, where a one-dimensional
// grid and one-dimensional blocks are configured.
const int nThreads = 1024;
const int nBlocks = 1;
dim3 dimGrid(nBlocks);
dim3 dimBlock(nThreads);
someInternalKernel << <dimGrid, dimBlock >> >
(dVector, kVectorSize);
checkCudaErrors(cudaGetLastError());
// Download results.
vector<float> hResultVector(kVectorSize);
checkCudaErrors(cudaMemcpy(&hResultVector[0],
dVector,
kVectorSize * sizeof(float),
cudaMemcpyDeviceToHost));
// Free resources.
if (dVector) checkCudaErrors(cudaFree(dVector));
}
catch (...)
{
cout << "Error occured, exiting..." << endl;
exit(EXIT_FAILURE);
}
}
|
081445bd425c20ade11e3a4e863a5f7da2500e65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// includes, system
#include <stdio.h>
#include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
// Part 3 of 5: implement the kernel
__global__ void myFirstKernel(int *d_a)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
d_a[idx] = 1000*blockIdx.x + threadIdx.x;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory
int *h_a;
// pointer for device memory
int *d_a;
// define grid and block size
int numBlocks = 8;
int numThreadsPerBlock = 8;
// Part 1 of 5: allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
hipMalloc( (void **) &d_a, memSize );
// Part 2 of 5: launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
hipLaunchKernelGGL(( myFirstKernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_a );
// block until the device has completed
hipDeviceSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// Part 4 of 5: device to host copy
hipMemcpy( h_a, d_a, memSize, hipMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("hipMemcpy");
// Part 5 of 5: verify the data returned to the host is correct
for (int i = 0; i < numBlocks; i++)
{
for (int j = 0; j < numThreadsPerBlock; j++)
{
assert(h_a[i * numThreadsPerBlock + j] == 1000 * i + j);
}
}
// free device memory
hipFree(d_a);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(-1);
}
}
| 081445bd425c20ade11e3a4e863a5f7da2500e65.cu | /*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// includes, system
#include <stdio.h>
#include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
// Part 3 of 5: implement the kernel
__global__ void myFirstKernel(int *d_a)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
d_a[idx] = 1000*blockIdx.x + threadIdx.x;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory
int *h_a;
// pointer for device memory
int *d_a;
// define grid and block size
int numBlocks = 8;
int numThreadsPerBlock = 8;
// Part 1 of 5: allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc( (void **) &d_a, memSize );
// Part 2 of 5: launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
myFirstKernel<<< dimGrid, dimBlock >>>( d_a );
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// Part 4 of 5: device to host copy
cudaMemcpy( h_a, d_a, memSize, cudaMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("cudaMemcpy");
// Part 5 of 5: verify the data returned to the host is correct
for (int i = 0; i < numBlocks; i++)
{
for (int j = 0; j < numThreadsPerBlock; j++)
{
assert(h_a[i * numThreadsPerBlock + j] == 1000 * i + j);
}
}
// free device memory
cudaFree(d_a);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(-1);
}
}
|
b159f2ac6636823b379462cd07405c1ce31a9cde.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHNumerics.cuh>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int get_interval(accscalar_t sample,
int index, int inputSize, int outputSize, int poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int>((index + sample) * alpha) -
static_cast<int>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
PackedTensorAccessor<scalar_t, 4> input,
PackedTensorAccessor<scalar_t, 3> samples,
int poolSizeH, int poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3)) {
int outputW = ourOutputPoint % output.size(3);
int outputH = ourOutputPoint / output.size(3);
int poolW = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputW, input.size(3), output.size(3), poolSizeW);
int poolH = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(2), output.size(2), poolSizeH);
scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound();
int maxIndex = poolH * input.size(3) + poolW;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (poolSizeW < 2 || poolSizeW > 7) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
} else {
for (int i = 0; i < poolSizeW; ++i) {
int w = i + poolW;
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
}
}
indices[batch][plane][outputH][outputW] = maxIndex;
output[batch][plane][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_backward_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> gradInput,
PackedTensorAccessor<scalar_t, 4> gradOutput,
PackedTensorAccessor<int64_t, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3)) {
int outputW = ourOutputPoint % gradOutput.size(3);
int outputH = ourOutputPoint / gradOutput.size(3);
int index = indices[batch][plane][outputH][outputW];
assert(index >= 0);
int inputW = index % gradInput.size(3);
int inputH = index / gradInput.size(3);
assert(inputH < gradInput.size(2));
gpuAtomicAddNoReturn(
&gradInput[batch][plane][inputH][inputW],
gradOutput[batch][plane][outputH][outputW]
);
}
}
void fractional_max_pool2d_out_cuda_template(
Tensor & output,
Tensor& indices,
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples) {
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int ndims = input.ndimension();
TORCH_CHECK(input.numel() > 0,
"fractional_max_pool2d(): expected input to have non-empty ",
"spatial dimensions.");
TORCH_CHECK((ndims == 3 || ndims == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
if (ndims == 4) {
numBatch = input.size(0);
planeDim++;
dimh++;
dimw++;
}
/* sizes */
int numPlanes = input.size(planeDim);
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
int poolSizeH = pool_size[0];
int poolSizeW = pool_size[1];
TORCH_CHECK(outputH + poolSizeH - 1 <= inputH,
"fractional_max_pool2d(): pool_size height ", poolSizeH,
" too large relative to input height ", inputH);
TORCH_CHECK(outputW + poolSizeW - 1 <= inputW,
"pool_size width ", poolSizeW,
" too large relative to input width ", inputW);
if (ndims == 3) {
/* resize output */
output.resize_({numPlanes, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numPlanes, outputH, outputW});
} else {
output.resize_({numBatch, numPlanes, outputH, outputW});
indices.resize_({numBatch, numPlanes, outputH, outputW});
}
auto output_ = output;
auto input_ = input;
auto indices_ = indices;
if(ndims == 3) {
output_ = output_.reshape({1, numPlanes, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputH, outputW});
input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)});
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = output_.size(2) *
output_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"fractional_max_pool2d_out_cuda_frame",
[&] {
auto devInput = input_.packed_accessor<scalar_t, 4>();
auto devOutput = output_.packed_accessor<scalar_t, 4>();
auto devIndices = indices_.packed_accessor<int64_t, 4>();
auto devSamples = randomSamples.packed_accessor<scalar_t, 3>();
hipLaunchKernelGGL(( fractional_max_pool2d_out_cuda_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devOutput, devIndices, devInput, devSamples,
poolSizeH, poolSizeW);
}
);
AT_CUDA_CHECK(hipGetLastError());
}
void fractional_max_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices)
{
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
dimh++;
dimw++;
}
/* sizes */
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
TORCH_CHECK(outputH == gradOutput.size(dimh),
"fractional_max_pool2d(): gradOutput height unexpected");
TORCH_CHECK(outputW == gradOutput.size(dimw),
"fractional_max_pool2d(): gradOutput width unexpected");
/* resize */
gradInput.resize_as_(input);
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 3) {
gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW});
indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW});
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
auto devIndices = indices.packed_accessor<int64_t, 4>();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(),
"fractional_max_pool2d_backward_out_cuda_frame",
[&] {
auto devGradInput = gradInput_.packed_accessor<scalar_t, 4>();
auto devGradOutput = gradOutput_.packed_accessor<scalar_t, 4>();
hipLaunchKernelGGL(( fractional_max_pool2d_backward_out_cuda_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devGradInput, devGradOutput, devIndices);
}
);
AT_CUDA_CHECK(hipGetLastError());
}
}// namespace
std::tuple<Tensor&, Tensor&> fractional_max_pool2d_out_cuda(
at::Tensor& output,
at::Tensor& indices,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples)
{
fractional_max_pool2d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> fractional_max_pool2d_cuda(
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples)
{
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
fractional_max_pool2d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& fractional_max_pool2d_backward_out_cuda(
at::Tensor& gradInput,
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_out_cuda");
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
Tensor fractional_max_pool2d_backward_cuda(
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_cuda");
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
}// at::native
}// at
| b159f2ac6636823b379462cd07405c1ce31a9cde.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCNumerics.cuh>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int get_interval(accscalar_t sample,
int index, int inputSize, int outputSize, int poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int>((index + sample) * alpha) -
static_cast<int>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
PackedTensorAccessor<scalar_t, 4> input,
PackedTensorAccessor<scalar_t, 3> samples,
int poolSizeH, int poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3)) {
int outputW = ourOutputPoint % output.size(3);
int outputH = ourOutputPoint / output.size(3);
int poolW = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputW, input.size(3), output.size(3), poolSizeW);
int poolH = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(2), output.size(2), poolSizeH);
scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound();
int maxIndex = poolH * input.size(3) + poolW;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (poolSizeW < 2 || poolSizeW > 7) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
} else {
for (int i = 0; i < poolSizeW; ++i) {
int w = i + poolW;
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
}
}
indices[batch][plane][outputH][outputW] = maxIndex;
output[batch][plane][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_backward_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> gradInput,
PackedTensorAccessor<scalar_t, 4> gradOutput,
PackedTensorAccessor<int64_t, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3)) {
int outputW = ourOutputPoint % gradOutput.size(3);
int outputH = ourOutputPoint / gradOutput.size(3);
int index = indices[batch][plane][outputH][outputW];
assert(index >= 0);
int inputW = index % gradInput.size(3);
int inputH = index / gradInput.size(3);
assert(inputH < gradInput.size(2));
gpuAtomicAddNoReturn(
&gradInput[batch][plane][inputH][inputW],
gradOutput[batch][plane][outputH][outputW]
);
}
}
void fractional_max_pool2d_out_cuda_template(
Tensor & output,
Tensor& indices,
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples) {
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int ndims = input.ndimension();
TORCH_CHECK(input.numel() > 0,
"fractional_max_pool2d(): expected input to have non-empty ",
"spatial dimensions.");
TORCH_CHECK((ndims == 3 || ndims == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
if (ndims == 4) {
numBatch = input.size(0);
planeDim++;
dimh++;
dimw++;
}
/* sizes */
int numPlanes = input.size(planeDim);
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
int poolSizeH = pool_size[0];
int poolSizeW = pool_size[1];
TORCH_CHECK(outputH + poolSizeH - 1 <= inputH,
"fractional_max_pool2d(): pool_size height ", poolSizeH,
" too large relative to input height ", inputH);
TORCH_CHECK(outputW + poolSizeW - 1 <= inputW,
"pool_size width ", poolSizeW,
" too large relative to input width ", inputW);
if (ndims == 3) {
/* resize output */
output.resize_({numPlanes, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numPlanes, outputH, outputW});
} else {
output.resize_({numBatch, numPlanes, outputH, outputW});
indices.resize_({numBatch, numPlanes, outputH, outputW});
}
auto output_ = output;
auto input_ = input;
auto indices_ = indices;
if(ndims == 3) {
output_ = output_.reshape({1, numPlanes, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputH, outputW});
input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)});
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = output_.size(2) *
output_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"fractional_max_pool2d_out_cuda_frame",
[&] {
auto devInput = input_.packed_accessor<scalar_t, 4>();
auto devOutput = output_.packed_accessor<scalar_t, 4>();
auto devIndices = indices_.packed_accessor<int64_t, 4>();
auto devSamples = randomSamples.packed_accessor<scalar_t, 3>();
fractional_max_pool2d_out_cuda_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
devOutput, devIndices, devInput, devSamples,
poolSizeH, poolSizeW);
}
);
AT_CUDA_CHECK(cudaGetLastError());
}
void fractional_max_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices)
{
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
dimh++;
dimw++;
}
/* sizes */
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
TORCH_CHECK(outputH == gradOutput.size(dimh),
"fractional_max_pool2d(): gradOutput height unexpected");
TORCH_CHECK(outputW == gradOutput.size(dimw),
"fractional_max_pool2d(): gradOutput width unexpected");
/* resize */
gradInput.resize_as_(input);
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 3) {
gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW});
indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW});
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
auto devIndices = indices.packed_accessor<int64_t, 4>();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(),
"fractional_max_pool2d_backward_out_cuda_frame",
[&] {
auto devGradInput = gradInput_.packed_accessor<scalar_t, 4>();
auto devGradOutput = gradOutput_.packed_accessor<scalar_t, 4>();
fractional_max_pool2d_backward_out_cuda_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
devGradInput, devGradOutput, devIndices);
}
);
AT_CUDA_CHECK(cudaGetLastError());
}
}// namespace
std::tuple<Tensor&, Tensor&> fractional_max_pool2d_out_cuda(
at::Tensor& output,
at::Tensor& indices,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples)
{
fractional_max_pool2d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> fractional_max_pool2d_cuda(
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples)
{
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
fractional_max_pool2d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& fractional_max_pool2d_backward_out_cuda(
at::Tensor& gradInput,
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_out_cuda");
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
Tensor fractional_max_pool2d_backward_cuda(
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_cuda");
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
}// at::native
}// at
|
b23530afe2ee31e6f3cea4bcc6605ff890ea4851.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <iostream>
#include <fstream>
#include <rocblas.h>
#include "cublas_batch_gemm.cuh"
//#define DOM_DEBUG
using namespace std;
void print_mat(int I, int J, double *mat_ptr) // number of rows, columns, pointer to the column-major buffer
{
int i, j;
for(i=0; i<I; i++) {
for(j=0; j<J; j++) printf("%lf ", *(mat_ptr+(j*I+i))); //printf("%2.1f ", *(mat_ptr+(j*I+i))); // use if decimal display precision needs to be specified
printf("\n");
}
}
void print_ten(int I, int J, int K, double *ten_ptr) // tensor
{
int i, j, k;
for(k=0; k<K; k++) {
printf("Slice %d:\n", k);
for(i=0; i<I; i++) {
for(j=0; j<J; j++) printf("%lf ", *(ten_ptr+(k*I*J+j*I+i)));
printf("\n");
}
}
}
#define TIME_ME(NAME,CODE) \
h_A = (double*)malloc(M*K*sd); h_B = (double*)malloc(K*N*P*sd); h_C = (double*)malloc(M*N*P*sd); \
hipMalloc((void **)&d_A, M*K*sd); hipMalloc((void **)&d_B, K*N*P*sd); hipMalloc((void **)&d_C, M*N*P*sd); \
for(i=0; i<M*K; ++i) if(dom_debug) h_A[i] = i+1; else h_A[i] = rand()/(double)RAND_MAX; \
for(i=0; i<K*N*P; ++i) if(dom_debug) h_B[i] = i+1; else h_B[i] = rand()/(double)RAND_MAX; \
for(i=0; i<M*N*P; ++i) h_C[i] = 0; \
hipblasSetVector(M*K,sd,h_A,1,d_A,1); hipblasSetVector(K*N*P,sd,h_B,1,d_B,1); hipblasSetVector(M*N*P,sd,h_C,1,d_C,1); \
hipEventCreate(&start); \
hipEventCreate(&stop); \
hipEventRecord(start); \
for(i=0;i<10;i++) \
CODE; \
hipEventRecord(stop); \
hipEventSynchronize(stop); \
hipEventElapsedTime(&time_##NAME, start, stop);time_##NAME /= 10; \
hipEventDestroy(start); \
hipEventDestroy(stop); \
if(dom_debug) {hipblasGetVector(M*N*P, sd, d_C, 1, h_C, 1); printf("C:\n"); print_ten(M,N,P,h_C);} \
hipFree(d_A); \
hipFree(d_B); \
hipFree(d_C); \
free(h_A); \
free(h_B); \
free(h_C);
int main(int argc, char **argv)
{
int M, N, P, K, t_size=2, i, dom_debug=1;
size_t sd = sizeof(double); // size of datatype used in gemm calls - single or double precision
double alpha = 1.0f, beta = 0.0f, *h_A, *h_B, *h_C, *d_A, *d_B, *d_C, *d_tmp_B;
float time_11_b_p=0, time_11_b_n=0, time_11_f=0, time_12_b_p=0, time_12_b_n=0, time_21_b_p=0, time_21_b_n=0, time_21_f=0, time_22_b_p=0, time_22_b_n=0, time_13_b_p=0, time_13_geam=0, time_15_b_p=0, time_15_f=0, time_51_b_n=0, time_51_f=0, time_61_b_n=0, time_61_f=0;
// struct timeval tv1, tv2;
ofstream fout("gpu_timing_cudaevent_test2.txt",ios::out|ios::app);
fout << "t_size" << '\t' << "time_11_b_p" << '\t' << "time_11_b_n" << '\t' << "time_11_f" << '\t' << "time_12_b_p" << '\t' << "time_12_b_n" << '\t' << "time_21_b_p" << '\t' << "time_21_b_n" << '\t' << "time_21_f" << '\t' << "time_22_b_p" << '\t' << "time_22_b_n" << '\t' << "time_13_b_p" << '\t' << "time_13_geam" << '\t' << "time_15_b_p" << '\t' << "time_15_f" << '\t' << "time_51_b_n" << '\t' << "time_51_f" << '\t' << "time_61_b_n" << '\t' << "time_61_f" << '\t';
fout << "time_11_pbyn" << '\t' << "time_11_nbyp" << '\t' << "time_12_pbyn" << '\t' << "time_12_nbyp" << '\t' << "time_21_pbyn" << '\t' << "time_21_nbyp" << '\t' << "time_22_pbyn" << '\t' << "time_22_nbyp" << '\t' << "time_11_fbyn" << '\t' << "time_11_nbyf" << '\t' << "time_11_fbyp" << '\t' << "time_11_pbyf" << '\t' << "time_21_fbyn" << '\t' << "time_21_nbyf" << '\t' << "time_21_fbyp" << '\t' << "time_21_pbyf" << '\t' << "time_15_fbyp" << '\t' << "time_15_pbyf" << '\t' << "time_51_fbyn" << '\t' << "time_51_nbyf" << '\t' << "time_61_fbyn" << '\t' << "time_61_nbyf" << '\t' << "time_13_trans_f" << '\t' << "time_13_trans_p" << '\t' << "time_13_trans_n" << '\n'; // time of fancy derivatives
hipblasHandle_t handle; hipblasCreate(&handle);
#ifndef DOM_DEBUG
dom_debug = 0;
for(t_size=5; t_size<56; t_size++)
#endif
{
if(dom_debug){M=3; N=4; P=5; K=2;} else{M=t_size; N=t_size; P=t_size; K=t_size;}
printf("M=%d, N=%d, P=%d, K=%d\n", M, N, P, K);
if(dom_debug) printf("case 1.1 batch in p\n");
hipEvent_t start, stop;
TIME_ME(11_b_p,
hipblasDgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
M, N, K, &alpha,
d_A, M, 0,
d_B, K, K*N, &beta,
d_C, M, M*N,
P)
);
if(dom_debug) printf("case 1.1 batch in n\n");
TIME_ME(11_b_n,
hipblasDgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
M, P, K, &alpha,
d_A, M, 0,
d_B, K*N, K, &beta,
d_C, M*N, M,
N)
);
if(dom_debug) printf("case 1.1 flat\n");
TIME_ME(11_f,
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
M, N*P, K, &alpha,
d_A, M,
d_B, K, &beta,
d_C, M)
);
if(dom_debug) printf("case 1.2 batch in p\n");
TIME_ME(12_b_p,
hipblasDgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
M, N, K, &alpha,
d_A, M, 0,
d_B, K*P, K, &beta,
d_C, M, M*N,
P)
);
if(dom_debug) printf("case 1.2 batch in n\n");
TIME_ME(12_b_n,
hipblasDgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
M, P, K, &alpha,
d_A, M, 0,
d_B, K, K*P, &beta,
d_C, M*N, M,
N)
);
if(dom_debug) printf("case 2.1 batch in p\n");
TIME_ME(21_b_p,
hipblasDgemmBatched(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
M, N, K, &alpha,
d_A, K, 0,
d_B, K, K*N, &beta,
d_C, M, M*N,
P)
);
if(dom_debug) printf("case 2.1 batch in n\n");
TIME_ME(21_b_n,
hipblasDgemmBatched(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
M, P, K, &alpha,
d_A, K, 0,
d_B, K*N, K, &beta,
d_C, M*N, M,
N)
);
if(dom_debug) printf("case 2.1 flat\n");
TIME_ME(21_f,
hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
M, N*P, K, &alpha,
d_A, K,
d_B, K, &beta,
d_C, M)
);
if(dom_debug) printf("case 2.2 batch in p\n");
TIME_ME(22_b_p,
hipblasDgemmBatched(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
M, N, K, &alpha,
d_A, K, 0,
d_B, K*P, K, &beta,
d_C, M, M*N,
P)
);
if(dom_debug) printf("case 2.2 batch in n\n");
TIME_ME(22_b_n,
hipblasDgemmBatched(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
M, P, K, &alpha,
d_A, K, 0,
d_B, K, K*P, &beta,
d_C, M*N, M,
N)
);
if(dom_debug) printf("case 1.3 batch in p\n");
TIME_ME(13_b_p,
hipblasDgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_T,
M, N, K, &alpha,
d_A, M, 0,
d_B, N, N*K, &beta,
d_C, M, M*N,
P)
);
if(dom_debug) {hipblasGetVector(M*N*P, sd, d_C, 1, h_C, 1); printf("C:\n"); print_ten(M,N,P,h_C);}
TIME_ME(13_geam,
for(int p=0; p<P; p++) // timing explicit transpose: btas-like
hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, K, N, &alpha, d_B+(N*K*p), N, &beta, d_tmp_B, K, d_B+(N*K*p), K)
);
if(dom_debug) printf("case 1.5 batch in p\n");
TIME_ME(15_b_p,
hipblasDgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_T,
M, N, K, &alpha,
d_A, M, 0,
d_B, N*P, N, &beta,
d_C, M, M*N,
P)
);
if(dom_debug) printf("case 1.5 flat\n");
TIME_ME(15_f,
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T,
M, N*P, K, &alpha,
d_A, M,
d_B, N*P, &beta,
d_C, M)
);
if(dom_debug) printf("case 5.1 batch in n\n");
TIME_ME(51_b_n,
hipblasDgemmBatched(handle, HIPBLAS_OP_T, HIPBLAS_OP_T,
M, P, K, &alpha,
d_B, K, K*M,
d_A, P, 0, &beta,
d_C, M*N, M,
N)
);
if(dom_debug) printf("case 5.1 flat\n");
TIME_ME(51_f,
hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_T,
M*N, P, K, &alpha,
d_B, K,
d_A, P, &beta,
d_C, M*N)
);
if(dom_debug) printf("case 6.1 batch in n\n");
TIME_ME(61_b_n,
hipblasDgemmBatched(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
M, P, K, &alpha,
d_B, K, K*M,
d_A, K, 0, &beta,
d_C, M*N, M,
N)
);
if(dom_debug) printf("case 6.1 flat\n");
TIME_ME(61_f,
hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
M*N, P, K, &alpha,
d_B, K,
d_A, K, &beta,
d_C, M*N)
);
fout << t_size << '\t' << time_11_b_p << '\t' << time_11_b_n << '\t' << time_11_f << '\t' << time_12_b_p << '\t' << time_12_b_n << '\t' << time_21_b_p << '\t' << time_21_b_n << '\t' << time_21_f << '\t' << time_22_b_p << '\t' << time_22_b_n << '\t' << time_13_b_p << '\t' << time_13_geam << '\t' << time_15_b_p << '\t' << time_15_f << '\t' << time_51_b_n << '\t' << time_51_f << '\t' << time_61_b_n << '\t' << time_61_f << '\t';
fout << time_11_b_p/time_11_b_n << '\t' << time_11_b_n/time_11_b_p << '\t' << time_12_b_p/time_12_b_n << '\t' << time_12_b_n/time_12_b_p << '\t' << time_21_b_p/time_21_b_n << '\t' << time_21_b_n/time_21_b_p << '\t' << time_22_b_p/time_22_b_n << '\t' << time_22_b_n/time_22_b_p << '\t' << time_11_f/time_11_b_n << '\t' << time_11_b_n/time_11_f << '\t' << time_11_f/time_11_b_p << '\t' << time_11_b_p/time_11_f << '\t' << time_21_f/time_21_b_n << '\t' << time_21_b_n/time_21_f << '\t' << time_21_f/time_21_b_p << '\t' << time_21_b_p/time_21_f << '\t' << time_15_f/time_15_b_p << '\t' << time_15_b_p/time_15_f << '\t' << time_51_f/time_51_b_n << '\t' << time_51_b_n/time_51_f << '\t' << time_61_f/time_61_b_n << '\t' << time_61_b_n/time_61_f << '\t' << time_13_geam+time_11_f << '\t' << time_13_geam+time_11_b_p << '\t' << time_13_geam+time_11_b_n << '\n'; // time of fancy derivatives
}
hipblasDestroy(handle);
fout.close();
return(0);
}
/* double error_norm = 0, ref_norm = 0; // check result against reference
for(i=0; i < M*N*P; ++i)
{
double diff = h_C[i]-h_Ctrue[i];
error_norm += diff*diff;
ref_norm += h_Cg[i]*h_Cb[i];
//printf("%d: %f \t %f\n", i, h_Cg[i], h_Cb[i]);
}
error_norm = sqrt(error_norm);
ref_norm = sqrt(ref_norm);
if(fabs(ref_norm) < 1e-7) printf("\n*** error ***\n");
if(error_norm / ref_norm < 1e-6f) printf("\npassed comparison\n");
else printf("\nfailed comparison\n");*/
| b23530afe2ee31e6f3cea4bcc6605ff890ea4851.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <iostream>
#include <fstream>
#include <cublas_v2.h>
#include "cublas_batch_gemm.cuh"
//#define DOM_DEBUG
using namespace std;
void print_mat(int I, int J, double *mat_ptr) // number of rows, columns, pointer to the column-major buffer
{
int i, j;
for(i=0; i<I; i++) {
for(j=0; j<J; j++) printf("%lf ", *(mat_ptr+(j*I+i))); //printf("%2.1f ", *(mat_ptr+(j*I+i))); // use if decimal display precision needs to be specified
printf("\n");
}
}
void print_ten(int I, int J, int K, double *ten_ptr) // tensor
{
int i, j, k;
for(k=0; k<K; k++) {
printf("Slice %d:\n", k);
for(i=0; i<I; i++) {
for(j=0; j<J; j++) printf("%lf ", *(ten_ptr+(k*I*J+j*I+i)));
printf("\n");
}
}
}
#define TIME_ME(NAME,CODE) \
h_A = (double*)malloc(M*K*sd); h_B = (double*)malloc(K*N*P*sd); h_C = (double*)malloc(M*N*P*sd); \
cudaMalloc((void **)&d_A, M*K*sd); cudaMalloc((void **)&d_B, K*N*P*sd); cudaMalloc((void **)&d_C, M*N*P*sd); \
for(i=0; i<M*K; ++i) if(dom_debug) h_A[i] = i+1; else h_A[i] = rand()/(double)RAND_MAX; \
for(i=0; i<K*N*P; ++i) if(dom_debug) h_B[i] = i+1; else h_B[i] = rand()/(double)RAND_MAX; \
for(i=0; i<M*N*P; ++i) h_C[i] = 0; \
cublasSetVector(M*K,sd,h_A,1,d_A,1); cublasSetVector(K*N*P,sd,h_B,1,d_B,1); cublasSetVector(M*N*P,sd,h_C,1,d_C,1); \
cudaEventCreate(&start); \
cudaEventCreate(&stop); \
cudaEventRecord(start); \
for(i=0;i<10;i++) \
CODE; \
cudaEventRecord(stop); \
cudaEventSynchronize(stop); \
cudaEventElapsedTime(&time_##NAME, start, stop);time_##NAME /= 10; \
cudaEventDestroy(start); \
cudaEventDestroy(stop); \
if(dom_debug) {cublasGetVector(M*N*P, sd, d_C, 1, h_C, 1); printf("C:\n"); print_ten(M,N,P,h_C);} \
cudaFree(d_A); \
cudaFree(d_B); \
cudaFree(d_C); \
free(h_A); \
free(h_B); \
free(h_C);
int main(int argc, char **argv)
{
int M, N, P, K, t_size=2, i, dom_debug=1;
size_t sd = sizeof(double); // size of datatype used in gemm calls - single or double precision
double alpha = 1.0f, beta = 0.0f, *h_A, *h_B, *h_C, *d_A, *d_B, *d_C, *d_tmp_B;
float time_11_b_p=0, time_11_b_n=0, time_11_f=0, time_12_b_p=0, time_12_b_n=0, time_21_b_p=0, time_21_b_n=0, time_21_f=0, time_22_b_p=0, time_22_b_n=0, time_13_b_p=0, time_13_geam=0, time_15_b_p=0, time_15_f=0, time_51_b_n=0, time_51_f=0, time_61_b_n=0, time_61_f=0;
// struct timeval tv1, tv2;
ofstream fout("gpu_timing_cudaevent_test2.txt",ios::out|ios::app);
fout << "t_size" << '\t' << "time_11_b_p" << '\t' << "time_11_b_n" << '\t' << "time_11_f" << '\t' << "time_12_b_p" << '\t' << "time_12_b_n" << '\t' << "time_21_b_p" << '\t' << "time_21_b_n" << '\t' << "time_21_f" << '\t' << "time_22_b_p" << '\t' << "time_22_b_n" << '\t' << "time_13_b_p" << '\t' << "time_13_geam" << '\t' << "time_15_b_p" << '\t' << "time_15_f" << '\t' << "time_51_b_n" << '\t' << "time_51_f" << '\t' << "time_61_b_n" << '\t' << "time_61_f" << '\t';
fout << "time_11_pbyn" << '\t' << "time_11_nbyp" << '\t' << "time_12_pbyn" << '\t' << "time_12_nbyp" << '\t' << "time_21_pbyn" << '\t' << "time_21_nbyp" << '\t' << "time_22_pbyn" << '\t' << "time_22_nbyp" << '\t' << "time_11_fbyn" << '\t' << "time_11_nbyf" << '\t' << "time_11_fbyp" << '\t' << "time_11_pbyf" << '\t' << "time_21_fbyn" << '\t' << "time_21_nbyf" << '\t' << "time_21_fbyp" << '\t' << "time_21_pbyf" << '\t' << "time_15_fbyp" << '\t' << "time_15_pbyf" << '\t' << "time_51_fbyn" << '\t' << "time_51_nbyf" << '\t' << "time_61_fbyn" << '\t' << "time_61_nbyf" << '\t' << "time_13_trans_f" << '\t' << "time_13_trans_p" << '\t' << "time_13_trans_n" << '\n'; // time of fancy derivatives
cublasHandle_t handle; cublasCreate(&handle);
#ifndef DOM_DEBUG
dom_debug = 0;
for(t_size=5; t_size<56; t_size++)
#endif
{
if(dom_debug){M=3; N=4; P=5; K=2;} else{M=t_size; N=t_size; P=t_size; K=t_size;}
printf("M=%d, N=%d, P=%d, K=%d\n", M, N, P, K);
if(dom_debug) printf("case 1.1 batch in p\n");
cudaEvent_t start, stop;
TIME_ME(11_b_p,
cublasDgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N,
M, N, K, &alpha,
d_A, M, 0,
d_B, K, K*N, &beta,
d_C, M, M*N,
P)
);
if(dom_debug) printf("case 1.1 batch in n\n");
TIME_ME(11_b_n,
cublasDgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N,
M, P, K, &alpha,
d_A, M, 0,
d_B, K*N, K, &beta,
d_C, M*N, M,
N)
);
if(dom_debug) printf("case 1.1 flat\n");
TIME_ME(11_f,
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N,
M, N*P, K, &alpha,
d_A, M,
d_B, K, &beta,
d_C, M)
);
if(dom_debug) printf("case 1.2 batch in p\n");
TIME_ME(12_b_p,
cublasDgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N,
M, N, K, &alpha,
d_A, M, 0,
d_B, K*P, K, &beta,
d_C, M, M*N,
P)
);
if(dom_debug) printf("case 1.2 batch in n\n");
TIME_ME(12_b_n,
cublasDgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N,
M, P, K, &alpha,
d_A, M, 0,
d_B, K, K*P, &beta,
d_C, M*N, M,
N)
);
if(dom_debug) printf("case 2.1 batch in p\n");
TIME_ME(21_b_p,
cublasDgemmBatched(handle, CUBLAS_OP_T, CUBLAS_OP_N,
M, N, K, &alpha,
d_A, K, 0,
d_B, K, K*N, &beta,
d_C, M, M*N,
P)
);
if(dom_debug) printf("case 2.1 batch in n\n");
TIME_ME(21_b_n,
cublasDgemmBatched(handle, CUBLAS_OP_T, CUBLAS_OP_N,
M, P, K, &alpha,
d_A, K, 0,
d_B, K*N, K, &beta,
d_C, M*N, M,
N)
);
if(dom_debug) printf("case 2.1 flat\n");
TIME_ME(21_f,
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N,
M, N*P, K, &alpha,
d_A, K,
d_B, K, &beta,
d_C, M)
);
if(dom_debug) printf("case 2.2 batch in p\n");
TIME_ME(22_b_p,
cublasDgemmBatched(handle, CUBLAS_OP_T, CUBLAS_OP_N,
M, N, K, &alpha,
d_A, K, 0,
d_B, K*P, K, &beta,
d_C, M, M*N,
P)
);
if(dom_debug) printf("case 2.2 batch in n\n");
TIME_ME(22_b_n,
cublasDgemmBatched(handle, CUBLAS_OP_T, CUBLAS_OP_N,
M, P, K, &alpha,
d_A, K, 0,
d_B, K, K*P, &beta,
d_C, M*N, M,
N)
);
if(dom_debug) printf("case 1.3 batch in p\n");
TIME_ME(13_b_p,
cublasDgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_T,
M, N, K, &alpha,
d_A, M, 0,
d_B, N, N*K, &beta,
d_C, M, M*N,
P)
);
if(dom_debug) {cublasGetVector(M*N*P, sd, d_C, 1, h_C, 1); printf("C:\n"); print_ten(M,N,P,h_C);}
TIME_ME(13_geam,
for(int p=0; p<P; p++) // timing explicit transpose: btas-like
cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, K, N, &alpha, d_B+(N*K*p), N, &beta, d_tmp_B, K, d_B+(N*K*p), K)
);
if(dom_debug) printf("case 1.5 batch in p\n");
TIME_ME(15_b_p,
cublasDgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_T,
M, N, K, &alpha,
d_A, M, 0,
d_B, N*P, N, &beta,
d_C, M, M*N,
P)
);
if(dom_debug) printf("case 1.5 flat\n");
TIME_ME(15_f,
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T,
M, N*P, K, &alpha,
d_A, M,
d_B, N*P, &beta,
d_C, M)
);
if(dom_debug) printf("case 5.1 batch in n\n");
TIME_ME(51_b_n,
cublasDgemmBatched(handle, CUBLAS_OP_T, CUBLAS_OP_T,
M, P, K, &alpha,
d_B, K, K*M,
d_A, P, 0, &beta,
d_C, M*N, M,
N)
);
if(dom_debug) printf("case 5.1 flat\n");
TIME_ME(51_f,
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_T,
M*N, P, K, &alpha,
d_B, K,
d_A, P, &beta,
d_C, M*N)
);
if(dom_debug) printf("case 6.1 batch in n\n");
TIME_ME(61_b_n,
cublasDgemmBatched(handle, CUBLAS_OP_T, CUBLAS_OP_N,
M, P, K, &alpha,
d_B, K, K*M,
d_A, K, 0, &beta,
d_C, M*N, M,
N)
);
if(dom_debug) printf("case 6.1 flat\n");
TIME_ME(61_f,
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N,
M*N, P, K, &alpha,
d_B, K,
d_A, K, &beta,
d_C, M*N)
);
fout << t_size << '\t' << time_11_b_p << '\t' << time_11_b_n << '\t' << time_11_f << '\t' << time_12_b_p << '\t' << time_12_b_n << '\t' << time_21_b_p << '\t' << time_21_b_n << '\t' << time_21_f << '\t' << time_22_b_p << '\t' << time_22_b_n << '\t' << time_13_b_p << '\t' << time_13_geam << '\t' << time_15_b_p << '\t' << time_15_f << '\t' << time_51_b_n << '\t' << time_51_f << '\t' << time_61_b_n << '\t' << time_61_f << '\t';
fout << time_11_b_p/time_11_b_n << '\t' << time_11_b_n/time_11_b_p << '\t' << time_12_b_p/time_12_b_n << '\t' << time_12_b_n/time_12_b_p << '\t' << time_21_b_p/time_21_b_n << '\t' << time_21_b_n/time_21_b_p << '\t' << time_22_b_p/time_22_b_n << '\t' << time_22_b_n/time_22_b_p << '\t' << time_11_f/time_11_b_n << '\t' << time_11_b_n/time_11_f << '\t' << time_11_f/time_11_b_p << '\t' << time_11_b_p/time_11_f << '\t' << time_21_f/time_21_b_n << '\t' << time_21_b_n/time_21_f << '\t' << time_21_f/time_21_b_p << '\t' << time_21_b_p/time_21_f << '\t' << time_15_f/time_15_b_p << '\t' << time_15_b_p/time_15_f << '\t' << time_51_f/time_51_b_n << '\t' << time_51_b_n/time_51_f << '\t' << time_61_f/time_61_b_n << '\t' << time_61_b_n/time_61_f << '\t' << time_13_geam+time_11_f << '\t' << time_13_geam+time_11_b_p << '\t' << time_13_geam+time_11_b_n << '\n'; // time of fancy derivatives
}
cublasDestroy(handle);
fout.close();
return(0);
}
/* double error_norm = 0, ref_norm = 0; // check result against reference
for(i=0; i < M*N*P; ++i)
{
double diff = h_C[i]-h_Ctrue[i];
error_norm += diff*diff;
ref_norm += h_Cg[i]*h_Cb[i];
//printf("%d: %f \t %f\n", i, h_Cg[i], h_Cb[i]);
}
error_norm = sqrt(error_norm);
ref_norm = sqrt(ref_norm);
if(fabs(ref_norm) < 1e-7) printf("\n*** error ***\n");
if(error_norm / ref_norm < 1e-6f) printf("\npassed comparison\n");
else printf("\nfailed comparison\n");*/
|
add81286cada87096a4f83bdf092bf650b6fece7.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file proj_app.cu
*
* @brief Simple Gunrock Application
*/
#include <gunrock/gunrock.h>
#include <gunrock/util/test_utils.cuh>
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/app/proj/proj_enactor.cuh>
#include <gunrock/app/proj/proj_test.cuh>
namespace gunrock {
namespace app {
namespace proj {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
return retval;
}
/**
* @brief Run proj tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return hipError_t error message(s), if any
*/
template <typename GraphT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
typename GraphT::ValueT *ref_projections,
util::Location target) {
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
bool quick = parameters.Get<bool>("quick");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("proj", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
ValueT *h_projections = new ValueT[graph.nodes * graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_projections));
SizeT num_errors =
Validate_Results(parameters, graph, h_projections,
quick ? NULL : ref_projections, false);
}
}
cpu_timer.Start();
if (validation == "last") {
GUARD_CU(problem.Extract(h_projections));
SizeT num_errors = Validate_Results(parameters, graph, h_projections,
quick ? NULL : ref_projections, false);
}
// compute running statistics
// TODO: change NULL to problem specific per-vertex visited marker, e.g.
// h_distances info.ComputeTraversalStats(enactor, (VertexT*)NULL);
// //Display_Memory_Usage(problem);
// #ifdef ENABLE_PERFORMANCE_PROFILING
// //Display_Performance_Profiling(enactor);
// #endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_projections;
h_projections = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace proj
} // namespace app
} // namespace gunrock
// ===========================================================================================
// ========================= CODE BELOW THIS LINE NOT NEEDED FOR TESTS
// =======================
// ===========================================================================================
// /*
// * @brief Entry of gunrock_template function
// * @tparam GraphT Type of the graph
// * @tparam ValueT Type of the distances
// * @param[in] parameters Excution parameters
// * @param[in] graph Input graph
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// */
// template <typename GraphT, typename ValueT = typename GraphT::ValueT>
// double gunrock_Template(
// gunrock::util::Parameters ¶meters,
// GraphT &graph
// // TODO: add problem specific outputs, e.g.:
// //ValueT **distances
// )
// {
// typedef typename GraphT::VertexT VertexT;
// typedef gunrock::app::Template::Problem<GraphT > ProblemT;
// typedef gunrock::app::Template::Enactor<ProblemT> EnactorT;
// gunrock::util::CpuTimer cpu_timer;
// gunrock::util::Location target = gunrock::util::DEVICE;
// double total_time = 0;
// if (parameters.UseDefault("quiet"))
// parameters.Set("quiet", true);
// // Allocate problem and enactor on GPU, and initialize them
// ProblemT problem(parameters);
// EnactorT enactor;
// problem.Init(graph , target);
// enactor.Init(problem, target);
// int num_runs = parameters.Get<int>("num-runs");
// // TODO: get problem specific inputs, e.g.:
// // std::vector<VertexT> srcs =
// parameters.Get<std::vector<VertexT>>("srcs");
// // int num_srcs = srcs.size();
// for (int run_num = 0; run_num < num_runs; ++run_num)
// {
// // TODO: problem specific inputs, e.g.:
// // int src_num = run_num % num_srcs;
// // VertexT src = srcs[src_num];
// problem.Reset(/*src,*/ target);
// enactor.Reset(/*src,*/ target);
// cpu_timer.Start();
// enactor.Enact(/*src*/);
// cpu_timer.Stop();
// total_time += cpu_timer.ElapsedMillis();
// // TODO: extract problem specific data, e.g.:
// problem.Extract(/*distances[src_num]*/);
// }
// enactor.Release(target);
// problem.Release(target);
// // TODO: problem specific clean ups, e.g.:
// // srcs.clear();
// return total_time;
// }
// * @brief Simple interface take in graph as CSR format
// * @param[in] num_nodes Number of veritces in the input graph
// * @param[in] num_edges Number of edges in the input graph
// * @param[in] row_offsets CSR-formatted graph input row offsets
// * @param[in] col_indices CSR-formatted graph input column indices
// * @param[in] edge_values CSR-formatted graph input edge weights
// * @param[in] num_runs Number of runs to perform SSSP
// * @param[in] sources Sources to begin traverse, one for each run
// * @param[in] mark_preds Whether to output predecessor info
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// template <
// typename VertexT = int,
// typename SizeT = int,
// typename GValueT = unsigned int,
// typename TValueT = GValueT>
// float Template(
// const SizeT num_nodes,
// const SizeT num_edges,
// const SizeT *row_offsets,
// const VertexT *col_indices,
// const GValueT *edge_values,
// const int num_runs
// // TODO: add problem specific inputs and outputs, e.g.:
// // VertexT *sources,
// // SSSPValueT **distances
// )
// {
// // TODO: change to other graph representation, if not using CSR
// typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
// gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR>
// GraphT;
// typedef typename GraphT::CsrT CsrT;
// // Setup parameters
// gunrock::util::Parameters parameters("Template");
// gunrock::graphio::UseParameters(parameters);
// gunrock::app::Template::UseParameters(parameters);
// gunrock::app::UseParameters_test(parameters);
// parameters.Parse_CommandLine(0, NULL);
// parameters.Set("graph-type", "by-pass");
// parameters.Set("num-runs", num_runs);
// // TODO: problem specific inputs, e.g.:
// // std::vector<VertexT> srcs;
// // for (int i = 0; i < num_runs; i ++)
// // srcs.push_back(sources[i]);
// // parameters.Set("srcs", srcs);
// bool quiet = parameters.Get<bool>("quiet");
// GraphT graph;
// // Assign pointers into gunrock graph format
// // TODO: change to other graph representation, if not using CSR
// graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
// graph.CsrT::row_offsets .SetPointer(row_offsets, num_nodes + 1,
// gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices,
// num_edges, gunrock::util::HOST); graph.CsrT::edge_values
// .SetPointer(edge_values, num_edges, gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
// gunrock::graphio::LoadGraph(parameters, graph);
// // Run the Template
// // TODO: add problem specific outputs, e.g.
// double elapsed_time = gunrock_Template(parameters, graph /*,
// distances*/);
// // Cleanup
// graph.Release();
// // TODO: problem specific cleanup
// // srcs.clear();
// return elapsed_time;
// }
// // Leave this at the end of the file
// // Local Variables:
// // mode:c++
// // c-file-style: "NVIDIA"
// // End:
| add81286cada87096a4f83bdf092bf650b6fece7.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file proj_app.cu
*
* @brief Simple Gunrock Application
*/
#include <gunrock/gunrock.h>
#include <gunrock/util/test_utils.cuh>
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/app/proj/proj_enactor.cuh>
#include <gunrock/app/proj/proj_test.cuh>
namespace gunrock {
namespace app {
namespace proj {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
return retval;
}
/**
* @brief Run proj tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return cudaError_t error message(s), if any
*/
template <typename GraphT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
typename GraphT::ValueT *ref_projections,
util::Location target) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
bool quick = parameters.Get<bool>("quick");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("proj", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
ValueT *h_projections = new ValueT[graph.nodes * graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_projections));
SizeT num_errors =
Validate_Results(parameters, graph, h_projections,
quick ? NULL : ref_projections, false);
}
}
cpu_timer.Start();
if (validation == "last") {
GUARD_CU(problem.Extract(h_projections));
SizeT num_errors = Validate_Results(parameters, graph, h_projections,
quick ? NULL : ref_projections, false);
}
// compute running statistics
// TODO: change NULL to problem specific per-vertex visited marker, e.g.
// h_distances info.ComputeTraversalStats(enactor, (VertexT*)NULL);
// //Display_Memory_Usage(problem);
// #ifdef ENABLE_PERFORMANCE_PROFILING
// //Display_Performance_Profiling(enactor);
// #endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_projections;
h_projections = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace proj
} // namespace app
} // namespace gunrock
// ===========================================================================================
// ========================= CODE BELOW THIS LINE NOT NEEDED FOR TESTS
// =======================
// ===========================================================================================
// /*
// * @brief Entry of gunrock_template function
// * @tparam GraphT Type of the graph
// * @tparam ValueT Type of the distances
// * @param[in] parameters Excution parameters
// * @param[in] graph Input graph
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// */
// template <typename GraphT, typename ValueT = typename GraphT::ValueT>
// double gunrock_Template(
// gunrock::util::Parameters ¶meters,
// GraphT &graph
// // TODO: add problem specific outputs, e.g.:
// //ValueT **distances
// )
// {
// typedef typename GraphT::VertexT VertexT;
// typedef gunrock::app::Template::Problem<GraphT > ProblemT;
// typedef gunrock::app::Template::Enactor<ProblemT> EnactorT;
// gunrock::util::CpuTimer cpu_timer;
// gunrock::util::Location target = gunrock::util::DEVICE;
// double total_time = 0;
// if (parameters.UseDefault("quiet"))
// parameters.Set("quiet", true);
// // Allocate problem and enactor on GPU, and initialize them
// ProblemT problem(parameters);
// EnactorT enactor;
// problem.Init(graph , target);
// enactor.Init(problem, target);
// int num_runs = parameters.Get<int>("num-runs");
// // TODO: get problem specific inputs, e.g.:
// // std::vector<VertexT> srcs =
// parameters.Get<std::vector<VertexT>>("srcs");
// // int num_srcs = srcs.size();
// for (int run_num = 0; run_num < num_runs; ++run_num)
// {
// // TODO: problem specific inputs, e.g.:
// // int src_num = run_num % num_srcs;
// // VertexT src = srcs[src_num];
// problem.Reset(/*src,*/ target);
// enactor.Reset(/*src,*/ target);
// cpu_timer.Start();
// enactor.Enact(/*src*/);
// cpu_timer.Stop();
// total_time += cpu_timer.ElapsedMillis();
// // TODO: extract problem specific data, e.g.:
// problem.Extract(/*distances[src_num]*/);
// }
// enactor.Release(target);
// problem.Release(target);
// // TODO: problem specific clean ups, e.g.:
// // srcs.clear();
// return total_time;
// }
// * @brief Simple interface take in graph as CSR format
// * @param[in] num_nodes Number of veritces in the input graph
// * @param[in] num_edges Number of edges in the input graph
// * @param[in] row_offsets CSR-formatted graph input row offsets
// * @param[in] col_indices CSR-formatted graph input column indices
// * @param[in] edge_values CSR-formatted graph input edge weights
// * @param[in] num_runs Number of runs to perform SSSP
// * @param[in] sources Sources to begin traverse, one for each run
// * @param[in] mark_preds Whether to output predecessor info
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// template <
// typename VertexT = int,
// typename SizeT = int,
// typename GValueT = unsigned int,
// typename TValueT = GValueT>
// float Template(
// const SizeT num_nodes,
// const SizeT num_edges,
// const SizeT *row_offsets,
// const VertexT *col_indices,
// const GValueT *edge_values,
// const int num_runs
// // TODO: add problem specific inputs and outputs, e.g.:
// // VertexT *sources,
// // SSSPValueT **distances
// )
// {
// // TODO: change to other graph representation, if not using CSR
// typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
// gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR>
// GraphT;
// typedef typename GraphT::CsrT CsrT;
// // Setup parameters
// gunrock::util::Parameters parameters("Template");
// gunrock::graphio::UseParameters(parameters);
// gunrock::app::Template::UseParameters(parameters);
// gunrock::app::UseParameters_test(parameters);
// parameters.Parse_CommandLine(0, NULL);
// parameters.Set("graph-type", "by-pass");
// parameters.Set("num-runs", num_runs);
// // TODO: problem specific inputs, e.g.:
// // std::vector<VertexT> srcs;
// // for (int i = 0; i < num_runs; i ++)
// // srcs.push_back(sources[i]);
// // parameters.Set("srcs", srcs);
// bool quiet = parameters.Get<bool>("quiet");
// GraphT graph;
// // Assign pointers into gunrock graph format
// // TODO: change to other graph representation, if not using CSR
// graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
// graph.CsrT::row_offsets .SetPointer(row_offsets, num_nodes + 1,
// gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices,
// num_edges, gunrock::util::HOST); graph.CsrT::edge_values
// .SetPointer(edge_values, num_edges, gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
// gunrock::graphio::LoadGraph(parameters, graph);
// // Run the Template
// // TODO: add problem specific outputs, e.g.
// double elapsed_time = gunrock_Template(parameters, graph /*,
// distances*/);
// // Cleanup
// graph.Release();
// // TODO: problem specific cleanup
// // srcs.clear();
// return elapsed_time;
// }
// // Leave this at the end of the file
// // Local Variables:
// // mode:c++
// // c-file-style: "NVIDIA"
// // End:
|
9c33e5c3fd20dfc02ebfa73dda28b295d9b3ebe6.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include "pointcloud.h"
#include <hip/hip_runtime.h>
#define blockSize 128
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
#define ORANGE glm::vec3(1.0f, 0.5f, 0.1f)
#define GREEN glm::vec3(0.f, 0.9f, 0.2f)
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
__host__ __device__ glm::vec3 generateRandomVec3(int index) {
thrust::default_random_engine rng(hash((int)(index)));
thrust::uniform_real_distribution<float> unitDistrib(0.f, 0.1f);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
__global__ void kernRotTrans(glm::vec3* pos, glm::mat4 rotationMat, glm::vec3 t, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
glm::vec3 rotated = glm::vec3(rotationMat * glm::vec4(pos[idx], 1.0f));
pos[idx] = rotated + t;
}
}
__global__ void kernSetRGB(glm::vec3* rgb, glm::vec3 color, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
rgb[idx].x = color.r;
rgb[idx].y = color.g;
rgb[idx].z = color.b;
}
}
/**
* Generates Sinusoids for Target
*/
__global__ void kernBuildTargetSinusoid(glm::vec3* pos, glm::vec3* rgb, glm::mat4 rotationMat, float y_interval, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
glm::vec3 t = generateRandomVec3(idx);
//glm::vec3 t(0.0f, 0.0f, 0.0f);
pos[idx].x = 0.7f;
pos[idx].y = idx * y_interval;
pos[idx].z = sinf(idx*y_interval);
pos[idx] = pos[idx] + t;
rgb[idx].x = 0.f;
rgb[idx].y = 0.9f;
rgb[idx].z = 0.2f;
}
}
/**
* Generates Sinusoids for SRC
*/
__global__ void kernBuildSrcSinusoid(glm::vec3* pos, glm::vec3* rgb, glm::mat4 rotationMat, float y_interval, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
glm::vec3 t = generateRandomVec3(idx);
//glm::vec3 t(0.1f, 0.0f, 0.0f);
pos[idx].x = 0.7f;
pos[idx].y = idx * y_interval;
pos[idx].z = sinf(idx*y_interval);
glm::vec3 rotated = glm::vec3(rotationMat * glm::vec4(pos[idx], 1.0f));
pos[idx] = rotated + t;
rgb[idx].x = 1.f;
rgb[idx].y = 0.5f;
rgb[idx].z = 0.1f;
}
}
/**
* Copy the Pointcloud Positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale, int vbo_offset) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
index += vbo_offset;
vbo[4 * index + 0] = pos[index].x * c_scale - 0.f;
vbo[4 * index + 1] = (pos[index].y-6.f) * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Copy the Pointcloud RGB's into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyRGBToVBO(int N, glm::vec3 *rgb, float *vbo, float s_scale, int vbo_offset) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
index += vbo_offset;
vbo[4 * index + 0] = rgb[index].x + 0.3f;
vbo[4 * index + 1] = rgb[index].y + 0.3f;
vbo[4 * index + 2] = rgb[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
pointcloud::pointcloud(): isTarget(false), N(500){
}
pointcloud::pointcloud(bool target, int numPoints): isTarget(target), N(numPoints){
}
pointcloud::pointcloud(bool target, int numPoints, bool gpu): isTarget(target), N(numPoints), isGPU(gpu){
}
/******************
* CPU Methods *
******************/
/**
* Initialize and fills dev_pos and dev_rgb array in CPU
*/
void pointcloud::initCPU() {
dev_pos = new glm::vec3[N];
dev_matches = new glm::vec3[N];
dev_rgb = new glm::vec3[N];
buildSinusoidCPU();
}
/**
* Populates dev_pos with a 3D Sinusoid (with or without Noise) on the CPU
*/
void pointcloud::buildSinusoidCPU() {
float y_interval = (2.5 * PI) / N;
//RNG (Predetermine Rotation)
std::random_device rd;
std::mt19937 e2(rd());
std::uniform_real_distribution<float> u01(0, 0.1);
glm::vec3 r(0.1f, 0.2f, 0.0f);
float angle = -1.f * PI;
//float angle = 0.f;
glm::mat4 rotationMat = glm::rotate(angle, r);
for (int idx = 0; idx < N; idx++) {
glm::vec3 pos;
glm::vec3 rgb;
if (isTarget) { //Leave Original Pointcloud
pos = glm::vec3(0.7f, idx*y_interval, sin(idx*y_interval));
rgb = glm::vec3(0.f, 0.9f, 0.2f);
//Create & Apply Translation for Pointcloud Effect
glm::vec3 t(u01(e2), u01(e2), u01(e2));
//glm::vec3 t(0.0, 0.0, 0.0);
pos += t;
}
else { //Add Multiplicative Noise, Rotation, Translation to OG
pos = glm::vec3(0.7f, idx*y_interval, sin(idx*y_interval));
rgb = glm::vec3(1.0f, 0.5f, 0.1f);
//Create Translation and Rotation
//glm::vec3 t(0.1f, 0.f, 0.f);
glm::vec3 t(u01(e2), u01(e2), u01(e2));
//Apply Translation and Rotation
glm::vec3 rotated = glm::vec3(rotationMat * glm::vec4(pos, 1.0));
pos = rotated + t;
}
#if DEBUG
printf("IDX %d\n", idx);
utilityCore::printVec3(pos);
#endif
dev_pos[idx] = pos;
dev_rgb[idx] = rgb;
}
#if DEBUG
printf("=================================================\n");
#endif
}
/**
* Copies dev_pos and dev_rgb into the VBO in the CPU implementation
* This assumes that dev_pos is already filled but is on CPU
* REALLY WACK WAY TO DO IT
*/
void pointcloud::pointCloudToVBOCPU(float *vbodptr_positions, float *vbodptr_rgb, float s_scale) {
glm::vec3* tempPos;
glm::vec3 * tempRGB;
int vbo_offset = isTarget ? 0 : 0;
//Malloc Temporary Buffers
hipMalloc((void**)&tempPos, N * sizeof(glm::vec3));
hipMalloc((void**)&tempRGB, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("hipMalloc Pointcloud failed!");
//Memcpy dev_pos and dev_rgb into temporary buffers
hipMemcpy(tempPos, dev_pos, N * sizeof(glm::vec3), hipMemcpyHostToDevice);
hipMemcpy(tempRGB, dev_rgb, N * sizeof(glm::vec3), hipMemcpyHostToDevice);
utilityCore::checkCUDAErrorWithLine("hipMemcpy Pointcloud failed!");
//Launching Kernels
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(N, tempPos, vbodptr_positions, s_scale, vbo_offset);
kernCopyRGBToVBO << <fullBlocksPerGrid, blockSize >> >(N, tempRGB, vbodptr_rgb, s_scale, vbo_offset);
utilityCore::checkCUDAErrorWithLine("copyPointCloudToVBO failed!");
hipDeviceSynchronize();
//Now Flipping original pointer to device so we don't crash on termination
dev_tempcpupos = tempPos;
dev_tempcpurgb = tempRGB;
}
/******************
* GPU Methods *
******************/
/**
* Initialize and fills dev_pos and dev_rgb array in CPU
*/
void pointcloud::initGPU() {
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
//hipMalloc position, matches & rgb arrays
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("hipMalloc dev_pos failed");
hipMalloc((void**)&dev_matches, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("hipMalloc dev_matches failed");
hipMalloc((void**)&dev_rgb, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("hipMalloc dev_rgb failed");
buildSinusoidGPU();
}
void pointcloud::initGPU(std::vector<glm::vec3> coords) {
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
//hipMalloc position, matches & rgb arrays
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("hipMalloc dev_pos failed");
hipMalloc((void**)&dev_matches, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("hipMalloc dev_matches failed");
hipMalloc((void**)&dev_rgb, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("hipMalloc dev_rgb failed");
printf("SIZE IS: %d \n", coords.size());
if (coords.size() > 0) {
buildCoordsGPU(coords);
}
else {
buildSinusoidGPU();
}
}
void pointcloud::buildCoordsGPU(std::vector<glm::vec3> coords) {
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
glm::vec3* coordPos = &coords[0];
hipMemcpy(dev_pos, coordPos, N * sizeof(glm::vec3), hipMemcpyHostToDevice);
if (isTarget) {
hipLaunchKernelGGL(( kernSetRGB), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, dev_rgb, GREEN, N);
}
else {
hipLaunchKernelGGL(( kernSetRGB), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, dev_rgb, ORANGE, N);
float angle = 0.1 * PI;
//float angle = 0.f;
glm::vec3 axis(1.f, 0.f, 0.f);
glm::vec3 t(9.0f, 0.f, 0.f);
//glm::vec3 t(0.8f, 0.f, 0.f);
glm::mat4 rotationMatrix = glm::rotate(angle, axis);
kernRotTrans << <fullBlocksPerGrid, blockSize >> > (dev_pos, rotationMatrix, t, N);
}
}
/**
* Populates dev_pos with a 3D Sinusoid (with or without Noise) on the GPU
* Fills dev_pos & dev_rgb
*/
void pointcloud::buildSinusoidGPU() {
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
float y_interval = (2.5 * PI) / N;
glm::vec3 r(0.f, 1.0f, 1.0f);
float angle = -0.7f * PI;
//float angle = 0.0f;
glm::mat4 rotationMat = glm::rotate(angle, r);
if (isTarget) {
hipLaunchKernelGGL(( kernBuildTargetSinusoid), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, dev_pos, dev_rgb, rotationMat, y_interval, N);
}
else {
hipLaunchKernelGGL(( kernBuildSrcSinusoid), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, dev_pos, dev_rgb, rotationMat, y_interval, N);
}
}
void pointcloud::pointCloudToVBOGPU(float *vbodptr_positions, float *vbodptr_rgb, float s_scale) {
int vbo_offset = isTarget ? 0 : 0;
//Launching Kernels
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernCopyPositionsToVBO), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, N, dev_pos, vbodptr_positions, s_scale, vbo_offset);
hipLaunchKernelGGL(( kernCopyRGBToVBO) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, N, dev_rgb, vbodptr_rgb, s_scale, vbo_offset);
utilityCore::checkCUDAErrorWithLine("copyPointCloudToVBO failed!");
hipDeviceSynchronize();
}
pointcloud::~pointcloud() {
if (isGPU) {
hipFree(dev_pos);
hipFree(dev_rgb);
}
else {
hipFree(dev_tempcpupos);
hipFree(dev_tempcpurgb);
}
}
| 9c33e5c3fd20dfc02ebfa73dda28b295d9b3ebe6.cu | #pragma once
#include "pointcloud.h"
#include <cuda.h>
#define blockSize 128
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
#define ORANGE glm::vec3(1.0f, 0.5f, 0.1f)
#define GREEN glm::vec3(0.f, 0.9f, 0.2f)
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
__host__ __device__ glm::vec3 generateRandomVec3(int index) {
thrust::default_random_engine rng(hash((int)(index)));
thrust::uniform_real_distribution<float> unitDistrib(0.f, 0.1f);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
__global__ void kernRotTrans(glm::vec3* pos, glm::mat4 rotationMat, glm::vec3 t, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
glm::vec3 rotated = glm::vec3(rotationMat * glm::vec4(pos[idx], 1.0f));
pos[idx] = rotated + t;
}
}
__global__ void kernSetRGB(glm::vec3* rgb, glm::vec3 color, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
rgb[idx].x = color.r;
rgb[idx].y = color.g;
rgb[idx].z = color.b;
}
}
/**
* Generates Sinusoids for Target
*/
__global__ void kernBuildTargetSinusoid(glm::vec3* pos, glm::vec3* rgb, glm::mat4 rotationMat, float y_interval, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
glm::vec3 t = generateRandomVec3(idx);
//glm::vec3 t(0.0f, 0.0f, 0.0f);
pos[idx].x = 0.7f;
pos[idx].y = idx * y_interval;
pos[idx].z = sinf(idx*y_interval);
pos[idx] = pos[idx] + t;
rgb[idx].x = 0.f;
rgb[idx].y = 0.9f;
rgb[idx].z = 0.2f;
}
}
/**
* Generates Sinusoids for SRC
*/
__global__ void kernBuildSrcSinusoid(glm::vec3* pos, glm::vec3* rgb, glm::mat4 rotationMat, float y_interval, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
glm::vec3 t = generateRandomVec3(idx);
//glm::vec3 t(0.1f, 0.0f, 0.0f);
pos[idx].x = 0.7f;
pos[idx].y = idx * y_interval;
pos[idx].z = sinf(idx*y_interval);
glm::vec3 rotated = glm::vec3(rotationMat * glm::vec4(pos[idx], 1.0f));
pos[idx] = rotated + t;
rgb[idx].x = 1.f;
rgb[idx].y = 0.5f;
rgb[idx].z = 0.1f;
}
}
/**
* Copy the Pointcloud Positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale, int vbo_offset) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
index += vbo_offset;
vbo[4 * index + 0] = pos[index].x * c_scale - 0.f;
vbo[4 * index + 1] = (pos[index].y-6.f) * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Copy the Pointcloud RGB's into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyRGBToVBO(int N, glm::vec3 *rgb, float *vbo, float s_scale, int vbo_offset) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
index += vbo_offset;
vbo[4 * index + 0] = rgb[index].x + 0.3f;
vbo[4 * index + 1] = rgb[index].y + 0.3f;
vbo[4 * index + 2] = rgb[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
pointcloud::pointcloud(): isTarget(false), N(500){
}
pointcloud::pointcloud(bool target, int numPoints): isTarget(target), N(numPoints){
}
pointcloud::pointcloud(bool target, int numPoints, bool gpu): isTarget(target), N(numPoints), isGPU(gpu){
}
/******************
* CPU Methods *
******************/
/**
* Initialize and fills dev_pos and dev_rgb array in CPU
*/
void pointcloud::initCPU() {
dev_pos = new glm::vec3[N];
dev_matches = new glm::vec3[N];
dev_rgb = new glm::vec3[N];
buildSinusoidCPU();
}
/**
* Populates dev_pos with a 3D Sinusoid (with or without Noise) on the CPU
*/
void pointcloud::buildSinusoidCPU() {
float y_interval = (2.5 * PI) / N;
//RNG (Predetermine Rotation)
std::random_device rd;
std::mt19937 e2(rd());
std::uniform_real_distribution<float> u01(0, 0.1);
glm::vec3 r(0.1f, 0.2f, 0.0f);
float angle = -1.f * PI;
//float angle = 0.f;
glm::mat4 rotationMat = glm::rotate(angle, r);
for (int idx = 0; idx < N; idx++) {
glm::vec3 pos;
glm::vec3 rgb;
if (isTarget) { //Leave Original Pointcloud
pos = glm::vec3(0.7f, idx*y_interval, sin(idx*y_interval));
rgb = glm::vec3(0.f, 0.9f, 0.2f);
//Create & Apply Translation for Pointcloud Effect
glm::vec3 t(u01(e2), u01(e2), u01(e2));
//glm::vec3 t(0.0, 0.0, 0.0);
pos += t;
}
else { //Add Multiplicative Noise, Rotation, Translation to OG
pos = glm::vec3(0.7f, idx*y_interval, sin(idx*y_interval));
rgb = glm::vec3(1.0f, 0.5f, 0.1f);
//Create Translation and Rotation
//glm::vec3 t(0.1f, 0.f, 0.f);
glm::vec3 t(u01(e2), u01(e2), u01(e2));
//Apply Translation and Rotation
glm::vec3 rotated = glm::vec3(rotationMat * glm::vec4(pos, 1.0));
pos = rotated + t;
}
#if DEBUG
printf("IDX %d\n", idx);
utilityCore::printVec3(pos);
#endif
dev_pos[idx] = pos;
dev_rgb[idx] = rgb;
}
#if DEBUG
printf("=================================================\n");
#endif
}
/**
* Copies dev_pos and dev_rgb into the VBO in the CPU implementation
* This assumes that dev_pos is already filled but is on CPU
* REALLY WACK WAY TO DO IT
*/
void pointcloud::pointCloudToVBOCPU(float *vbodptr_positions, float *vbodptr_rgb, float s_scale) {
glm::vec3* tempPos;
glm::vec3 * tempRGB;
int vbo_offset = isTarget ? 0 : 0;
//Malloc Temporary Buffers
cudaMalloc((void**)&tempPos, N * sizeof(glm::vec3));
cudaMalloc((void**)&tempRGB, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("cudaMalloc Pointcloud failed!");
//Memcpy dev_pos and dev_rgb into temporary buffers
cudaMemcpy(tempPos, dev_pos, N * sizeof(glm::vec3), cudaMemcpyHostToDevice);
cudaMemcpy(tempRGB, dev_rgb, N * sizeof(glm::vec3), cudaMemcpyHostToDevice);
utilityCore::checkCUDAErrorWithLine("cudaMemcpy Pointcloud failed!");
//Launching Kernels
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(N, tempPos, vbodptr_positions, s_scale, vbo_offset);
kernCopyRGBToVBO << <fullBlocksPerGrid, blockSize >> >(N, tempRGB, vbodptr_rgb, s_scale, vbo_offset);
utilityCore::checkCUDAErrorWithLine("copyPointCloudToVBO failed!");
cudaDeviceSynchronize();
//Now Flipping original pointer to device so we don't crash on termination
dev_tempcpupos = tempPos;
dev_tempcpurgb = tempRGB;
}
/******************
* GPU Methods *
******************/
/**
* Initialize and fills dev_pos and dev_rgb array in CPU
*/
void pointcloud::initGPU() {
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
//cudaMalloc position, matches & rgb arrays
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("cudaMalloc dev_pos failed");
cudaMalloc((void**)&dev_matches, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("cudaMalloc dev_matches failed");
cudaMalloc((void**)&dev_rgb, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("cudaMalloc dev_rgb failed");
buildSinusoidGPU();
}
void pointcloud::initGPU(std::vector<glm::vec3> coords) {
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
//cudaMalloc position, matches & rgb arrays
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("cudaMalloc dev_pos failed");
cudaMalloc((void**)&dev_matches, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("cudaMalloc dev_matches failed");
cudaMalloc((void**)&dev_rgb, N * sizeof(glm::vec3));
utilityCore::checkCUDAErrorWithLine("cudaMalloc dev_rgb failed");
printf("SIZE IS: %d \n", coords.size());
if (coords.size() > 0) {
buildCoordsGPU(coords);
}
else {
buildSinusoidGPU();
}
}
void pointcloud::buildCoordsGPU(std::vector<glm::vec3> coords) {
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
glm::vec3* coordPos = &coords[0];
cudaMemcpy(dev_pos, coordPos, N * sizeof(glm::vec3), cudaMemcpyHostToDevice);
if (isTarget) {
kernSetRGB<<<fullBlocksPerGrid, blockSize>>>(dev_rgb, GREEN, N);
}
else {
kernSetRGB<<<fullBlocksPerGrid, blockSize>>>(dev_rgb, ORANGE, N);
float angle = 0.1 * PI;
//float angle = 0.f;
glm::vec3 axis(1.f, 0.f, 0.f);
glm::vec3 t(9.0f, 0.f, 0.f);
//glm::vec3 t(0.8f, 0.f, 0.f);
glm::mat4 rotationMatrix = glm::rotate(angle, axis);
kernRotTrans << <fullBlocksPerGrid, blockSize >> > (dev_pos, rotationMatrix, t, N);
}
}
/**
* Populates dev_pos with a 3D Sinusoid (with or without Noise) on the GPU
* Fills dev_pos & dev_rgb
*/
void pointcloud::buildSinusoidGPU() {
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
float y_interval = (2.5 * PI) / N;
glm::vec3 r(0.f, 1.0f, 1.0f);
float angle = -0.7f * PI;
//float angle = 0.0f;
glm::mat4 rotationMat = glm::rotate(angle, r);
if (isTarget) {
kernBuildTargetSinusoid<<<fullBlocksPerGrid, blockSize>>>(dev_pos, dev_rgb, rotationMat, y_interval, N);
}
else {
kernBuildSrcSinusoid<<<fullBlocksPerGrid, blockSize>>>(dev_pos, dev_rgb, rotationMat, y_interval, N);
}
}
void pointcloud::pointCloudToVBOGPU(float *vbodptr_positions, float *vbodptr_rgb, float s_scale) {
int vbo_offset = isTarget ? 0 : 0;
//Launching Kernels
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
kernCopyPositionsToVBO<<<fullBlocksPerGrid, blockSize >>>(N, dev_pos, vbodptr_positions, s_scale, vbo_offset);
kernCopyRGBToVBO <<<fullBlocksPerGrid, blockSize >>>(N, dev_rgb, vbodptr_rgb, s_scale, vbo_offset);
utilityCore::checkCUDAErrorWithLine("copyPointCloudToVBO failed!");
cudaDeviceSynchronize();
}
pointcloud::~pointcloud() {
if (isGPU) {
cudaFree(dev_pos);
cudaFree(dev_rgb);
}
else {
cudaFree(dev_tempcpupos);
cudaFree(dev_tempcpurgb);
}
}
|
56164f9e7e63af5337cfcab5f1bb1230281f4baa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
MIT License
Copyright (c) 2015-2018 Ardavan Kanani
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "path_tracer_data.cuh"
__forceinline__ __device__
float4 pathtracer(
CudaRay& ray,
const CudaRNG& rng,
float4& lookAt,
uint tid
) {
float3 L = _constant_spec._black;
float3 beta = _constant_spec._white;
bool specular_bounce = false;
for ( uchar bounce = 0; bounce < _constant_spec._bounces; ++bounce ) {
CudaIntersectionRecord rec( tid );
if ( intersect( ray, rec ) ) {
CudaIntersection I( ray, rng, rec );
if ( bounce == 0 ) {
lookAt = make_float4( I.getP(), 1.f );
}
//if ( I.isEmitter()){
// if ( !I.isMeshLight() ) {
// L += beta * _material_manager.getEmmitance( I );
// break;
// } else if ( bounce == 0 || specular_bounce ) {
// L += beta * _light_manager.Le( ray.getDir(), I.getInsIdx() );
// break;
// }
//}
if ( I.isEmitter() && ( bounce == 0 || specular_bounce ) ) {
L += beta * (I.isMeshLight() ?
_light_manager.Le( ray.getDir(), I.getInsIdx() ) :
_material_manager.getEmmitance( I ));
break;
}
accumulate( I, ray, beta, L );
// check the outgoing direction is on the correct side
if ( !correct_sidedness( I ) ) break;
// Russian Roulette
if ( rr_terminate( I, bounce, beta ) ) break;
specular_bounce = I.isSpecular();
I.spawnRay( ray );
} else {
if ( _constant_spec.is_sky_light_enabled() &&
( bounce == 0 || specular_bounce ) ) {
L += beta*_skydome_manager.evaluate( normalize( ray.getDir() ), false );
}
break;
}
}
L = clamp( L, 0, 100.f );
return make_float4( L, 1.f );
}
__global__
void path_tracer_kernel( uint frame_number, uint gpu_offset ) {
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
const uint y = blockIdx.y * blockDim.y + threadIdx.y;
// grid id of the thread
const uint gid = y * _constant_camera._w + x;
// block id of the thread
const uint tid = threadIdx.y * blockDim.x + threadIdx.x;
float4 lookAt = make_float4( 0.f );
//const CudaRNG rng( gid, frame_number + clock64() );
const CudaRNG rng( gid, frame_number );
CudaRay ray = generateRay( x, y + gpu_offset, rng );
const float4 new_color = pathtracer( ray, rng, lookAt, tid );
_framebuffer_manager.set( new_color, gid, frame_number );
if ( gid + gpu_offset*_constant_camera._w == _constant_camera._center ) {
_device_lookAt = lookAt;
}
}
__global__
void debug_skydome_kernel(
uint frame_number,
int width,
int height
) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
int id = y * _constant_camera._w + x;
float2 u = make_float2( x / (float)( _constant_camera._w ), y /
(float)( _constant_camera._h ) );
_framebuffer_manager.set( _skydome_manager.evaluate( u ), id );
if ( _constant_spec.is_mis_enabled() ) {
CudaRNG rng( id, frame_number );
u = _skydome_manager.importance_sample_uv( make_float2( rng(), rng() ) );
u.x *= _constant_camera._w;
u.y *= _constant_camera._h;
id = int( u.y ) * ( _constant_camera._w ) + int( u.x );
_framebuffer_manager.set( make_float4( 1.f, 0, 0, 1 ), id );
}
}
void debug_skydome( uint& frame_number, int width, int height ) {
static const dim3 block( THREAD_W, THREAD_H, 1 );
static const dim3 grid( width / block.x, height / block.y, 1 );
debug_skydome_kernel << < grid, block >> > ( frame_number, width, height );
_render_manager->update();
++frame_number;
}
void cuda_path_tracer( uint& frame_number ) {
static const dim3 block( THREAD_W, THREAD_H, 1 );
static const dim3 grid[] = {
dim3( _render_manager->_gpu[0]->_task._w / block.x,
_render_manager->_gpu[0]->_task._h / block.y, 1 ),
_render_manager->_num_gpus > 1 ?
dim3( _render_manager->_gpu[1]->_task._w / block.x,
_render_manager->_gpu[1]->_task._h / block.y, 1 )
: dim3( 0 )
};
static const size_t shmsize = _render_manager->_shmsize;
static const int offset = _render_manager->_gpu[0]->_task._h;
for ( int i = _render_manager->_num_gpus - 1; i >= 0; --i ) {
checkNoorErrors( hipSetDevice( i ) );
path_tracer_kernel << <
grid[i],
block,
shmsize,
_render_manager->getStream( i )
>> > ( frame_number, i*offset );
}
_render_manager->update();
++frame_number;
}
| 56164f9e7e63af5337cfcab5f1bb1230281f4baa.cu | /*
MIT License
Copyright (c) 2015-2018 Ardavan Kanani
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "path_tracer_data.cuh"
__forceinline__ __device__
float4 pathtracer(
CudaRay& ray,
const CudaRNG& rng,
float4& lookAt,
uint tid
) {
float3 L = _constant_spec._black;
float3 beta = _constant_spec._white;
bool specular_bounce = false;
for ( uchar bounce = 0; bounce < _constant_spec._bounces; ++bounce ) {
CudaIntersectionRecord rec( tid );
if ( intersect( ray, rec ) ) {
CudaIntersection I( ray, rng, rec );
if ( bounce == 0 ) {
lookAt = make_float4( I.getP(), 1.f );
}
//if ( I.isEmitter()){
// if ( !I.isMeshLight() ) {
// L += beta * _material_manager.getEmmitance( I );
// break;
// } else if ( bounce == 0 || specular_bounce ) {
// L += beta * _light_manager.Le( ray.getDir(), I.getInsIdx() );
// break;
// }
//}
if ( I.isEmitter() && ( bounce == 0 || specular_bounce ) ) {
L += beta * (I.isMeshLight() ?
_light_manager.Le( ray.getDir(), I.getInsIdx() ) :
_material_manager.getEmmitance( I ));
break;
}
accumulate( I, ray, beta, L );
// check the outgoing direction is on the correct side
if ( !correct_sidedness( I ) ) break;
// Russian Roulette
if ( rr_terminate( I, bounce, beta ) ) break;
specular_bounce = I.isSpecular();
I.spawnRay( ray );
} else {
if ( _constant_spec.is_sky_light_enabled() &&
( bounce == 0 || specular_bounce ) ) {
L += beta*_skydome_manager.evaluate( normalize( ray.getDir() ), false );
}
break;
}
}
L = clamp( L, 0, 100.f );
return make_float4( L, 1.f );
}
__global__
void path_tracer_kernel( uint frame_number, uint gpu_offset ) {
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
const uint y = blockIdx.y * blockDim.y + threadIdx.y;
// grid id of the thread
const uint gid = y * _constant_camera._w + x;
// block id of the thread
const uint tid = threadIdx.y * blockDim.x + threadIdx.x;
float4 lookAt = make_float4( 0.f );
//const CudaRNG rng( gid, frame_number + clock64() );
const CudaRNG rng( gid, frame_number );
CudaRay ray = generateRay( x, y + gpu_offset, rng );
const float4 new_color = pathtracer( ray, rng, lookAt, tid );
_framebuffer_manager.set( new_color, gid, frame_number );
if ( gid + gpu_offset*_constant_camera._w == _constant_camera._center ) {
_device_lookAt = lookAt;
}
}
__global__
void debug_skydome_kernel(
uint frame_number,
int width,
int height
) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
int id = y * _constant_camera._w + x;
float2 u = make_float2( x / (float)( _constant_camera._w ), y /
(float)( _constant_camera._h ) );
_framebuffer_manager.set( _skydome_manager.evaluate( u ), id );
if ( _constant_spec.is_mis_enabled() ) {
CudaRNG rng( id, frame_number );
u = _skydome_manager.importance_sample_uv( make_float2( rng(), rng() ) );
u.x *= _constant_camera._w;
u.y *= _constant_camera._h;
id = int( u.y ) * ( _constant_camera._w ) + int( u.x );
_framebuffer_manager.set( make_float4( 1.f, 0, 0, 1 ), id );
}
}
void debug_skydome( uint& frame_number, int width, int height ) {
static const dim3 block( THREAD_W, THREAD_H, 1 );
static const dim3 grid( width / block.x, height / block.y, 1 );
debug_skydome_kernel << < grid, block >> > ( frame_number, width, height );
_render_manager->update();
++frame_number;
}
void cuda_path_tracer( uint& frame_number ) {
static const dim3 block( THREAD_W, THREAD_H, 1 );
static const dim3 grid[] = {
dim3( _render_manager->_gpu[0]->_task._w / block.x,
_render_manager->_gpu[0]->_task._h / block.y, 1 ),
_render_manager->_num_gpus > 1 ?
dim3( _render_manager->_gpu[1]->_task._w / block.x,
_render_manager->_gpu[1]->_task._h / block.y, 1 )
: dim3( 0 )
};
static const size_t shmsize = _render_manager->_shmsize;
static const int offset = _render_manager->_gpu[0]->_task._h;
for ( int i = _render_manager->_num_gpus - 1; i >= 0; --i ) {
checkNoorErrors( cudaSetDevice( i ) );
path_tracer_kernel << <
grid[i],
block,
shmsize,
_render_manager->getStream( i )
>> > ( frame_number, i*offset );
}
_render_manager->update();
++frame_number;
}
|
91f5a0c605fb4c1aebb3cbc8f4088a0e38e8e585.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hiprand/hiprand.h"
#include <iostream>
using namespace std;
__global__ void runningSum(int *d)
{
int tid = threadIdx.x;
int threads = blockDim.x;
//tc - total number of threads allowed.
for (int tc = threads, step = 1; tc > 0; step <<= 1)
{
//guardian
if (tid < tc)
{
d[tid+step] += d[tid];
}
tc -= step;
}
}
int main()
{
const int count = 32;
const size_t size = count * sizeof(int);
int h[count];
for (int i=0; i<count; ++i)
{
h[i] = i+1;
}
int *d;
hipMalloc(&d, size);
hipMemcpy(d, h, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( runningSum), dim3(1), dim3(count-1), 0, 0, d);
hipMemcpy(h, d, size, hipMemcpyDeviceToHost);
hipFree(d);
for (int i=0; i < count; ++i)
{
std::cout << h[i] << std::endl;
}
} | 91f5a0c605fb4c1aebb3cbc8f4088a0e38e8e585.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand.h"
#include <iostream>
using namespace std;
__global__ void runningSum(int *d)
{
int tid = threadIdx.x;
int threads = blockDim.x;
//tc - total number of threads allowed.
for (int tc = threads, step = 1; tc > 0; step <<= 1)
{
//guardian
if (tid < tc)
{
d[tid+step] += d[tid];
}
tc -= step;
}
}
int main()
{
const int count = 32;
const size_t size = count * sizeof(int);
int h[count];
for (int i=0; i<count; ++i)
{
h[i] = i+1;
}
int *d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
runningSum<<<1, count-1>>>(d);
cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost);
cudaFree(d);
for (int i=0; i < count; ++i)
{
std::cout << h[i] << std::endl;
}
} |
6ea78e952efc9bda2708a4b2470fcb3bcac7d327.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BLOCK_SIZE 256
#define SOFTENING 1e-9f
typedef struct { float4 *pos, *vel; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__
void applyForce(float4 *p, float4 *v, float4 *d, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
dt = 0.00001f;
if (i < n) {
d[i].x = dt; d[i].y = n;
float Fx = 0.0f; float Fy = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float2 spos[BLOCK_SIZE];
float4 tpos = p[tile * blockDim.x + threadIdx.x];
spos[threadIdx.x] = make_float2(tpos.x, tpos.y);
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = spos[j].x - p[i].x;
float dy = spos[j].y - p[i].y;
float distSqr = dx*dx + dy*dy + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
float strength = 1.0f;
Fx += dx * invDist3 * strength; Fy += dy * invDist3 * strength;
}
__syncthreads();
}
//v[i].x += 1.5f;
v[i].x += dt*Fx; v[i].y += dt*Fy;
p[i].x += v[i].x*dt; p[i].y += v[i].y*dt;
}
} | 6ea78e952efc9bda2708a4b2470fcb3bcac7d327.cu | #define BLOCK_SIZE 256
#define SOFTENING 1e-9f
typedef struct { float4 *pos, *vel; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__
void applyForce(float4 *p, float4 *v, float4 *d, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
dt = 0.00001f;
if (i < n) {
d[i].x = dt; d[i].y = n;
float Fx = 0.0f; float Fy = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float2 spos[BLOCK_SIZE];
float4 tpos = p[tile * blockDim.x + threadIdx.x];
spos[threadIdx.x] = make_float2(tpos.x, tpos.y);
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = spos[j].x - p[i].x;
float dy = spos[j].y - p[i].y;
float distSqr = dx*dx + dy*dy + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
float strength = 1.0f;
Fx += dx * invDist3 * strength; Fy += dy * invDist3 * strength;
}
__syncthreads();
}
//v[i].x += 1.5f;
v[i].x += dt*Fx; v[i].y += dt*Fy;
p[i].x += v[i].x*dt; p[i].y += v[i].y*dt;
}
} |
687885632daa7dc7c7482659e275b7602d817479.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/system/hip/vector.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/pair.h>
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <map>
#include <cassert>
#define start_index_s0 2*i*batch_size
#define start_index_s1 2*i*batch_size+batch_size
#define start_index_s2 2*i*batch_size-batch_size
#define merge_index_1 2*(i-1)*batch_size
#define merge_index_2 2*(i-1)*batch_size+batch_size
void PairMerge(uint64_t *key_array_1, uint64_t *key_array_2, uint64_t batch_size, int nthreads);
// cached_allocator: a simple allocator for caching allocation requests
class cached_allocator
{
public:
// just allocate bytes
typedef char value_type;
cached_allocator() {}
~cached_allocator()
{
// free all allocations when cached_allocator goes out of scope
free_all();
}
char *allocate(std::ptrdiff_t num_bytes)
{
char *result = 0;
// search the cache for a free block
free_blocks_type::iterator free_block = free_blocks.find(num_bytes);
if(free_block != free_blocks.end())
{
std::cout << "cached_allocator::allocator(): found a hit" << std::endl;
// get the pointer
result = free_block->second;
// erase from the free_blocks map
free_blocks.erase(free_block);
}
else
{
// no allocation of the right size exists
// create a new one with cuda::malloc
// throw if cuda::malloc can't satisfy the request
try
{
std::cout << "cached_allocator::allocator(): no free block found; calling cuda::malloc" << std::endl;
// allocate memory and convert cuda::pointer to raw pointer
result = thrust::hip::malloc<char>(num_bytes).get();
}
catch(std::runtime_error &e)
{
throw;
}
}
// insert the allocated pointer into the allocated_blocks map
allocated_blocks.insert(std::make_pair(result, num_bytes));
return result;
}
void deallocate(char *ptr, size_t n)
{
// erase the allocated block from the allocated blocks map
allocated_blocks_type::iterator iter = allocated_blocks.find(ptr);
std::ptrdiff_t num_bytes = iter->second;
allocated_blocks.erase(iter);
// insert the block into the free blocks map
free_blocks.insert(std::make_pair(num_bytes, ptr));
}
private:
typedef std::multimap<std::ptrdiff_t, char*> free_blocks_type;
typedef std::map<char *, std::ptrdiff_t> allocated_blocks_type;
free_blocks_type free_blocks;
allocated_blocks_type allocated_blocks;
void free_all()
{
std::cout << "cached_allocator::free_all(): cleaning up after ourselves..." << std::endl;
// deallocate all outstanding blocks in both lists
for(free_blocks_type::iterator i = free_blocks.begin();
i != free_blocks.end();
++i)
{
// transform the pointer to cuda::pointer before calling cuda::free
thrust::hip::free(thrust::hip::pointer<char>(i->second));
}
for(allocated_blocks_type::iterator i = allocated_blocks.begin();
i != allocated_blocks.end();
++i)
{
// transform the pointer to cuda::pointer before calling cuda::free
thrust::hip::free(thrust::hip::pointer<char>(i->first));
}
}
};
void ThrustSort(uint64_t *h_key_array, uint64_t *d_key_array[2], uint64_t number_of_elements, uint64_t batch_size, int nthreads)
{
cached_allocator alloc;
int number_of_batches = number_of_elements / batch_size;
uint64_t *pinned_M[2];
hipMalloc( (void**)&d_key_array[0], batch_size * sizeof(uint64_t) );
hipMalloc( (void**)&d_key_array[1], batch_size * sizeof(uint64_t) );
hipHostMalloc( (void**)&pinned_M[0], batch_size * sizeof(uint64_t), hipHostMallocDefault );
hipHostMalloc( (void**)&pinned_M[1], batch_size * sizeof(uint64_t), hipHostMallocDefault );
hipStream_t streams[2];
for (int s = 0; s < 2; s++) {
hipStreamCreate(&streams[s]);
}
thrust::device_ptr<uint64_t> th_key_array[2];
for (int s = 0; s < 2; s++) {
th_key_array[s] = thrust::device_pointer_cast(d_key_array[s]);
}
for (int i = 0; i < number_of_batches / 2; i++) {
for (int s = 0; s < 2; s++) {
if (i == 0 && s == 0) {
std::memcpy(pinned_M[0],
&h_key_array[start_index_s0],
batch_size*sizeof(uint64_t));
hipMemcpyAsync(d_key_array[0],
pinned_M[0],
batch_size*sizeof(uint64_t),
hipMemcpyHostToDevice,
streams[0]);
hipDeviceSynchronize();
thrust::sort(thrust::hip::par(alloc).on(streams[0]), th_key_array[0], th_key_array[0]+batch_size);
hipDeviceSynchronize();
}
else if (i > 0 && s == 0)
{
std::memcpy(pinned_M[0],
&h_key_array[start_index_s0],
batch_size*sizeof(uint64_t));
hipMemcpyAsync(d_key_array[0],
pinned_M[0],
batch_size*sizeof(uint64_t),
hipMemcpyHostToDevice,
streams[0]);
hipMemcpyAsync(pinned_M[1],
d_key_array[1],
batch_size*sizeof(uint64_t),
hipMemcpyDeviceToHost,
streams[1]);
hipDeviceSynchronize();
std::memcpy(&h_key_array[start_index_s2],
pinned_M[1],
batch_size*sizeof(uint64_t));
thrust::sort(thrust::hip::par(alloc).on(streams[0]), th_key_array[0], th_key_array[0]+batch_size);
PairMerge(&h_key_array[merge_index_1], &h_key_array[merge_index_2], batch_size, nthreads);
hipDeviceSynchronize();
}
else if (s == 1)
{
std::memcpy(pinned_M[1],
&h_key_array[start_index_s1],
batch_size*sizeof(uint64_t));
hipMemcpyAsync(d_key_array[1],
pinned_M[1],
batch_size*sizeof(uint64_t),
hipMemcpyHostToDevice,
streams[1]);
hipMemcpyAsync(pinned_M[0],
d_key_array[0],
batch_size*sizeof(uint64_t),
hipMemcpyDeviceToHost,
streams[0]);
hipDeviceSynchronize();
std::memcpy(&h_key_array[start_index_s0],
pinned_M[0],
batch_size*sizeof(uint64_t));
thrust::sort(thrust::hip::par(alloc).on(streams[1]), th_key_array[1], th_key_array[1]+batch_size);
hipDeviceSynchronize();
if (i == (number_of_batches / 2) - 1)
{
hipMemcpyAsync(pinned_M[1],
d_key_array[1],
batch_size*sizeof(uint64_t),
hipMemcpyDeviceToHost,
streams[1]);
hipDeviceSynchronize();
std::memcpy(&h_key_array[start_index_s1],
pinned_M[1],
batch_size*sizeof(uint64_t));
}
}
}
}
for (int s = 0; s < 2; s++) {
hipStreamDestroy(streams[s]);
}
return;
}
| 687885632daa7dc7c7482659e275b7602d817479.cu | #include <stdio.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/system/cuda/vector.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/pair.h>
#include <cuda_runtime.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <map>
#include <cassert>
#define start_index_s0 2*i*batch_size
#define start_index_s1 2*i*batch_size+batch_size
#define start_index_s2 2*i*batch_size-batch_size
#define merge_index_1 2*(i-1)*batch_size
#define merge_index_2 2*(i-1)*batch_size+batch_size
void PairMerge(uint64_t *key_array_1, uint64_t *key_array_2, uint64_t batch_size, int nthreads);
// cached_allocator: a simple allocator for caching allocation requests
class cached_allocator
{
public:
// just allocate bytes
typedef char value_type;
cached_allocator() {}
~cached_allocator()
{
// free all allocations when cached_allocator goes out of scope
free_all();
}
char *allocate(std::ptrdiff_t num_bytes)
{
char *result = 0;
// search the cache for a free block
free_blocks_type::iterator free_block = free_blocks.find(num_bytes);
if(free_block != free_blocks.end())
{
std::cout << "cached_allocator::allocator(): found a hit" << std::endl;
// get the pointer
result = free_block->second;
// erase from the free_blocks map
free_blocks.erase(free_block);
}
else
{
// no allocation of the right size exists
// create a new one with cuda::malloc
// throw if cuda::malloc can't satisfy the request
try
{
std::cout << "cached_allocator::allocator(): no free block found; calling cuda::malloc" << std::endl;
// allocate memory and convert cuda::pointer to raw pointer
result = thrust::cuda::malloc<char>(num_bytes).get();
}
catch(std::runtime_error &e)
{
throw;
}
}
// insert the allocated pointer into the allocated_blocks map
allocated_blocks.insert(std::make_pair(result, num_bytes));
return result;
}
void deallocate(char *ptr, size_t n)
{
// erase the allocated block from the allocated blocks map
allocated_blocks_type::iterator iter = allocated_blocks.find(ptr);
std::ptrdiff_t num_bytes = iter->second;
allocated_blocks.erase(iter);
// insert the block into the free blocks map
free_blocks.insert(std::make_pair(num_bytes, ptr));
}
private:
typedef std::multimap<std::ptrdiff_t, char*> free_blocks_type;
typedef std::map<char *, std::ptrdiff_t> allocated_blocks_type;
free_blocks_type free_blocks;
allocated_blocks_type allocated_blocks;
void free_all()
{
std::cout << "cached_allocator::free_all(): cleaning up after ourselves..." << std::endl;
// deallocate all outstanding blocks in both lists
for(free_blocks_type::iterator i = free_blocks.begin();
i != free_blocks.end();
++i)
{
// transform the pointer to cuda::pointer before calling cuda::free
thrust::cuda::free(thrust::cuda::pointer<char>(i->second));
}
for(allocated_blocks_type::iterator i = allocated_blocks.begin();
i != allocated_blocks.end();
++i)
{
// transform the pointer to cuda::pointer before calling cuda::free
thrust::cuda::free(thrust::cuda::pointer<char>(i->first));
}
}
};
void ThrustSort(uint64_t *h_key_array, uint64_t *d_key_array[2], uint64_t number_of_elements, uint64_t batch_size, int nthreads)
{
cached_allocator alloc;
int number_of_batches = number_of_elements / batch_size;
uint64_t *pinned_M[2];
cudaMalloc( (void**)&d_key_array[0], batch_size * sizeof(uint64_t) );
cudaMalloc( (void**)&d_key_array[1], batch_size * sizeof(uint64_t) );
cudaHostAlloc( (void**)&pinned_M[0], batch_size * sizeof(uint64_t), cudaHostAllocDefault );
cudaHostAlloc( (void**)&pinned_M[1], batch_size * sizeof(uint64_t), cudaHostAllocDefault );
cudaStream_t streams[2];
for (int s = 0; s < 2; s++) {
cudaStreamCreate(&streams[s]);
}
thrust::device_ptr<uint64_t> th_key_array[2];
for (int s = 0; s < 2; s++) {
th_key_array[s] = thrust::device_pointer_cast(d_key_array[s]);
}
for (int i = 0; i < number_of_batches / 2; i++) {
for (int s = 0; s < 2; s++) {
if (i == 0 && s == 0) {
std::memcpy(pinned_M[0],
&h_key_array[start_index_s0],
batch_size*sizeof(uint64_t));
cudaMemcpyAsync(d_key_array[0],
pinned_M[0],
batch_size*sizeof(uint64_t),
cudaMemcpyHostToDevice,
streams[0]);
cudaDeviceSynchronize();
thrust::sort(thrust::cuda::par(alloc).on(streams[0]), th_key_array[0], th_key_array[0]+batch_size);
cudaDeviceSynchronize();
}
else if (i > 0 && s == 0)
{
std::memcpy(pinned_M[0],
&h_key_array[start_index_s0],
batch_size*sizeof(uint64_t));
cudaMemcpyAsync(d_key_array[0],
pinned_M[0],
batch_size*sizeof(uint64_t),
cudaMemcpyHostToDevice,
streams[0]);
cudaMemcpyAsync(pinned_M[1],
d_key_array[1],
batch_size*sizeof(uint64_t),
cudaMemcpyDeviceToHost,
streams[1]);
cudaDeviceSynchronize();
std::memcpy(&h_key_array[start_index_s2],
pinned_M[1],
batch_size*sizeof(uint64_t));
thrust::sort(thrust::cuda::par(alloc).on(streams[0]), th_key_array[0], th_key_array[0]+batch_size);
PairMerge(&h_key_array[merge_index_1], &h_key_array[merge_index_2], batch_size, nthreads);
cudaDeviceSynchronize();
}
else if (s == 1)
{
std::memcpy(pinned_M[1],
&h_key_array[start_index_s1],
batch_size*sizeof(uint64_t));
cudaMemcpyAsync(d_key_array[1],
pinned_M[1],
batch_size*sizeof(uint64_t),
cudaMemcpyHostToDevice,
streams[1]);
cudaMemcpyAsync(pinned_M[0],
d_key_array[0],
batch_size*sizeof(uint64_t),
cudaMemcpyDeviceToHost,
streams[0]);
cudaDeviceSynchronize();
std::memcpy(&h_key_array[start_index_s0],
pinned_M[0],
batch_size*sizeof(uint64_t));
thrust::sort(thrust::cuda::par(alloc).on(streams[1]), th_key_array[1], th_key_array[1]+batch_size);
cudaDeviceSynchronize();
if (i == (number_of_batches / 2) - 1)
{
cudaMemcpyAsync(pinned_M[1],
d_key_array[1],
batch_size*sizeof(uint64_t),
cudaMemcpyDeviceToHost,
streams[1]);
cudaDeviceSynchronize();
std::memcpy(&h_key_array[start_index_s1],
pinned_M[1],
batch_size*sizeof(uint64_t));
}
}
}
}
for (int s = 0; s < 2; s++) {
cudaStreamDestroy(streams[s]);
}
return;
}
|
2fd89a59a843fd9a4b27c32efcfc452086ed14a4.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2017-2023, XGBoost contributors
*/
#include <gtest/gtest.h>
#include <xgboost/c_api.h>
#include <xgboost/learner.h>
#include <xgboost/logging.h>
#include <xgboost/predictor.h>
#include <string>
#include "../../../src/data/device_adapter.cuh"
#include "../../../src/data/proxy_dmatrix.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../helpers.h"
#include "test_predictor.h"
namespace xgboost::predictor {
TEST(GPUPredictor, Basic) {
auto cpu_lparam = MakeCUDACtx(-1);
auto gpu_lparam = MakeCUDACtx(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
for (size_t i = 1; i < 33; i *= 2) {
int n_row = i, n_col = i;
auto dmat = RandomDataGenerator(n_row, n_col, 0).GenerateDMatrix();
auto ctx = MakeCUDACtx(0);
LearnerModelParam mparam{MakeMP(n_col, .5, 1, ctx.Ordinal())};
gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx);
// Test predict batch
PredictionCacheEntry gpu_out_predictions;
PredictionCacheEntry cpu_out_predictions;
gpu_predictor->InitOutPredictions(dmat->Info(), &gpu_out_predictions.predictions, model);
gpu_predictor->PredictBatch(dmat.get(), &gpu_out_predictions, model, 0);
cpu_predictor->InitOutPredictions(dmat->Info(), &cpu_out_predictions.predictions, model);
cpu_predictor->PredictBatch(dmat.get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.predictions.HostVector();
float abs_tolerance = 0.001;
for (size_t j = 0; j < gpu_out_predictions.predictions.Size(); j++) {
ASSERT_NEAR(gpu_out_predictions_h[j], cpu_out_predictions_h[j], abs_tolerance);
}
}
}
namespace {
void VerifyBasicColumnSplit(std::array<std::vector<float>, 32> const& expected_result) {
auto const world_size = collective::GetWorldSize();
auto const rank = collective::GetRank();
auto ctx = MakeCUDACtx(rank);
std::unique_ptr<Predictor> predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &ctx));
predictor->Configure({});
for (size_t i = 1; i < 33; i *= 2) {
size_t n_row = i, n_col = i;
auto dmat = RandomDataGenerator(n_row, n_col, 0).GenerateDMatrix();
std::unique_ptr<DMatrix> sliced{dmat->SliceCol(world_size, rank)};
LearnerModelParam mparam{MakeMP(n_col, .5, 1, ctx.Ordinal())};
gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx);
// Test predict batch
PredictionCacheEntry out_predictions;
predictor->InitOutPredictions(sliced->Info(), &out_predictions.predictions, model);
predictor->PredictBatch(sliced.get(), &out_predictions, model, 0);
std::vector<float>& out_predictions_h = out_predictions.predictions.HostVector();
EXPECT_EQ(out_predictions_h, expected_result[i - 1]);
}
}
} // anonymous namespace
TEST(GPUPredictor, MGPUBasicColumnSplit) {
auto const n_gpus = common::AllVisibleGPUs();
if (n_gpus <= 1) {
GTEST_SKIP() << "Skipping MGPUIBasicColumnSplit test with # GPUs = " << n_gpus;
}
auto ctx = MakeCUDACtx(0);
std::unique_ptr<Predictor> predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &ctx));
predictor->Configure({});
std::array<std::vector<float>, 32> result{};
for (size_t i = 1; i < 33; i *= 2) {
size_t n_row = i, n_col = i;
auto dmat = RandomDataGenerator(n_row, n_col, 0).GenerateDMatrix();
LearnerModelParam mparam{MakeMP(n_col, .5, 1, ctx.Ordinal())};
gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx);
// Test predict batch
PredictionCacheEntry out_predictions;
predictor->InitOutPredictions(dmat->Info(), &out_predictions.predictions, model);
predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
std::vector<float>& out_predictions_h = out_predictions.predictions.HostVector();
result[i - 1] = out_predictions_h;
}
RunWithInMemoryCommunicator(n_gpus, VerifyBasicColumnSplit, result);
}
TEST(GPUPredictor, EllpackBasic) {
size_t constexpr kCols{8};
auto ctx = MakeCUDACtx(0);
for (size_t bins = 2; bins < 258; bins += 16) {
size_t rows = bins * 16;
auto p_m =
RandomDataGenerator{rows, kCols, 0.0}.Bins(bins).Device(0).GenerateDeviceDMatrix(false);
ASSERT_FALSE(p_m->PageExists<SparsePage>());
TestPredictionFromGradientIndex<EllpackPage>(&ctx, rows, kCols, p_m);
TestPredictionFromGradientIndex<EllpackPage>(&ctx, bins, kCols, p_m);
}
}
TEST(GPUPredictor, EllpackTraining) {
auto ctx = MakeCUDACtx(0);
size_t constexpr kRows{128}, kCols{16}, kBins{64};
auto p_ellpack = RandomDataGenerator{kRows, kCols, 0.0}
.Bins(kBins)
.Device(ctx.Ordinal())
.GenerateDeviceDMatrix(false);
HostDeviceVector<float> storage(kRows * kCols);
auto columnar =
RandomDataGenerator{kRows, kCols, 0.0}.Device(ctx.Ordinal()).GenerateArrayInterface(&storage);
auto adapter = data::CupyAdapter(columnar);
std::shared_ptr<DMatrix> p_full{
DMatrix::Create(&adapter, std::numeric_limits<float>::quiet_NaN(), 1)};
TestTrainingPrediction(&ctx, kRows, kBins, p_full, p_ellpack);
}
TEST(GPUPredictor, ExternalMemoryTest) {
auto lparam = MakeCUDACtx(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam));
gpu_predictor->Configure({});
const int n_classes = 3;
Context ctx = MakeCUDACtx(0);
LearnerModelParam mparam{MakeMP(5, .5, n_classes, ctx.Ordinal())};
gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx, n_classes);
std::vector<std::unique_ptr<DMatrix>> dmats;
dmats.push_back(CreateSparsePageDMatrix(400));
dmats.push_back(CreateSparsePageDMatrix(800));
dmats.push_back(CreateSparsePageDMatrix(8000));
for (const auto& dmat: dmats) {
dmat->Info().base_margin_ = decltype(dmat->Info().base_margin_){
{dmat->Info().num_row_, static_cast<size_t>(n_classes)}, 0};
dmat->Info().base_margin_.Data()->Fill(0.5);
PredictionCacheEntry out_predictions;
gpu_predictor->InitOutPredictions(dmat->Info(), &out_predictions.predictions, model);
gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
EXPECT_EQ(out_predictions.predictions.Size(), dmat->Info().num_row_ * n_classes);
const std::vector<float> &host_vector = out_predictions.predictions.ConstHostVector();
for (size_t i = 0; i < host_vector.size() / n_classes; i++) {
ASSERT_EQ(host_vector[i * n_classes], 2.0);
ASSERT_EQ(host_vector[i * n_classes + 1], 0.5);
ASSERT_EQ(host_vector[i * n_classes + 2], 0.5);
}
}
}
TEST(GPUPredictor, InplacePredictCupy) {
auto ctx = MakeCUDACtx(0);
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(ctx.Ordinal());
HostDeviceVector<float> data;
std::string interface_str = gen.GenerateArrayInterface(&data);
std::shared_ptr<DMatrix> p_fmat{new data::DMatrixProxy};
dynamic_cast<data::DMatrixProxy*>(p_fmat.get())->SetCUDAArray(interface_str.c_str());
TestInplacePrediction(&ctx, p_fmat, kRows, kCols);
}
TEST(GPUPredictor, InplacePredictCuDF) {
auto ctx = MakeCUDACtx(0);
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(ctx.Ordinal());
std::vector<HostDeviceVector<float>> storage(kCols);
auto interface_str = gen.GenerateColumnarArrayInterface(&storage);
std::shared_ptr<DMatrix> p_fmat{new data::DMatrixProxy};
dynamic_cast<data::DMatrixProxy*>(p_fmat.get())->SetCUDAArray(interface_str.c_str());
TestInplacePrediction(&ctx, p_fmat, kRows, kCols);
}
TEST(GpuPredictor, LesserFeatures) {
auto ctx = MakeCUDACtx(0);
TestPredictionWithLesserFeatures(&ctx);
}
// Very basic test of empty model
TEST(GPUPredictor, ShapStump) {
hipSetDevice(0);
auto ctx = MakeCUDACtx(0);
LearnerModelParam mparam{MakeMP(1, .5, 1, ctx.Ordinal())};
gbm::GBTreeModel model(&mparam, &ctx);
std::vector<std::unique_ptr<RegTree>> trees;
trees.push_back(std::make_unique<RegTree>());
model.CommitModelGroup(std::move(trees), 0);
auto gpu_lparam = MakeCUDACtx(0);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
gpu_predictor->Configure({});
HostDeviceVector<float> predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
auto& phis = predictions.HostVector();
auto base_score = mparam.BaseScore(Context::kCpuId)(0);
EXPECT_EQ(phis[0], 0.0);
EXPECT_EQ(phis[1], base_score);
EXPECT_EQ(phis[2], 0.0);
EXPECT_EQ(phis[3], base_score);
EXPECT_EQ(phis[4], 0.0);
EXPECT_EQ(phis[5], base_score);
}
TEST(GPUPredictor, Shap) {
auto ctx = MakeCUDACtx(0);
LearnerModelParam mparam{MakeMP(1, .5, 1, ctx.Ordinal())};
gbm::GBTreeModel model(&mparam, &ctx);
std::vector<std::unique_ptr<RegTree>> trees;
trees.push_back(std::make_unique<RegTree>());
trees[0]->ExpandNode(0, 0, 0.5, true, 1.0, -1.0, 1.0, 0.0, 5.0, 2.0, 3.0);
model.CommitModelGroup(std::move(trees), 0);
auto gpu_lparam = MakeCUDACtx(0);
auto cpu_lparam = MakeCUDACtx(-1);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
HostDeviceVector<float> predictions;
HostDeviceVector<float> cpu_predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
cpu_predictor->PredictContribution(dmat.get(), &cpu_predictions, model);
auto& phis = predictions.HostVector();
auto& cpu_phis = cpu_predictions.HostVector();
for (auto i = 0ull; i < phis.size(); i++) {
EXPECT_NEAR(cpu_phis[i], phis[i], 1e-3);
}
}
TEST(GPUPredictor, IterationRange) {
auto ctx = MakeCUDACtx(0);
TestIterationRange(&ctx);
}
TEST(GPUPredictor, CategoricalPrediction) {
auto ctx = MakeCUDACtx(0);
TestCategoricalPrediction(&ctx, false);
}
TEST(GPUPredictor, CategoricalPredictLeaf) {
auto ctx = MakeCUDACtx(0);
TestCategoricalPredictLeaf(&ctx, false);
}
TEST(GPUPredictor, PredictLeafBasic) {
size_t constexpr kRows = 5, kCols = 5;
auto dmat = RandomDataGenerator(kRows, kCols, 0).Device(0).GenerateDMatrix();
auto lparam = MakeCUDACtx(GPUIDX);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam));
gpu_predictor->Configure({});
LearnerModelParam mparam{MakeMP(kCols, .0, 1)};
Context ctx;
gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx);
HostDeviceVector<float> leaf_out_predictions;
gpu_predictor->PredictLeaf(dmat.get(), &leaf_out_predictions, model);
auto const& h_leaf_out_predictions = leaf_out_predictions.ConstHostVector();
for (auto v : h_leaf_out_predictions) {
ASSERT_EQ(v, 0);
}
}
TEST(GPUPredictor, Sparse) {
auto ctx = MakeCUDACtx(0);
TestSparsePrediction(&ctx, 0.2);
TestSparsePrediction(&ctx, 0.8);
}
} // namespace xgboost::predictor
| 2fd89a59a843fd9a4b27c32efcfc452086ed14a4.cu | /**
* Copyright 2017-2023, XGBoost contributors
*/
#include <gtest/gtest.h>
#include <xgboost/c_api.h>
#include <xgboost/learner.h>
#include <xgboost/logging.h>
#include <xgboost/predictor.h>
#include <string>
#include "../../../src/data/device_adapter.cuh"
#include "../../../src/data/proxy_dmatrix.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../helpers.h"
#include "test_predictor.h"
namespace xgboost::predictor {
TEST(GPUPredictor, Basic) {
auto cpu_lparam = MakeCUDACtx(-1);
auto gpu_lparam = MakeCUDACtx(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
for (size_t i = 1; i < 33; i *= 2) {
int n_row = i, n_col = i;
auto dmat = RandomDataGenerator(n_row, n_col, 0).GenerateDMatrix();
auto ctx = MakeCUDACtx(0);
LearnerModelParam mparam{MakeMP(n_col, .5, 1, ctx.Ordinal())};
gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx);
// Test predict batch
PredictionCacheEntry gpu_out_predictions;
PredictionCacheEntry cpu_out_predictions;
gpu_predictor->InitOutPredictions(dmat->Info(), &gpu_out_predictions.predictions, model);
gpu_predictor->PredictBatch(dmat.get(), &gpu_out_predictions, model, 0);
cpu_predictor->InitOutPredictions(dmat->Info(), &cpu_out_predictions.predictions, model);
cpu_predictor->PredictBatch(dmat.get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.predictions.HostVector();
float abs_tolerance = 0.001;
for (size_t j = 0; j < gpu_out_predictions.predictions.Size(); j++) {
ASSERT_NEAR(gpu_out_predictions_h[j], cpu_out_predictions_h[j], abs_tolerance);
}
}
}
namespace {
void VerifyBasicColumnSplit(std::array<std::vector<float>, 32> const& expected_result) {
auto const world_size = collective::GetWorldSize();
auto const rank = collective::GetRank();
auto ctx = MakeCUDACtx(rank);
std::unique_ptr<Predictor> predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &ctx));
predictor->Configure({});
for (size_t i = 1; i < 33; i *= 2) {
size_t n_row = i, n_col = i;
auto dmat = RandomDataGenerator(n_row, n_col, 0).GenerateDMatrix();
std::unique_ptr<DMatrix> sliced{dmat->SliceCol(world_size, rank)};
LearnerModelParam mparam{MakeMP(n_col, .5, 1, ctx.Ordinal())};
gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx);
// Test predict batch
PredictionCacheEntry out_predictions;
predictor->InitOutPredictions(sliced->Info(), &out_predictions.predictions, model);
predictor->PredictBatch(sliced.get(), &out_predictions, model, 0);
std::vector<float>& out_predictions_h = out_predictions.predictions.HostVector();
EXPECT_EQ(out_predictions_h, expected_result[i - 1]);
}
}
} // anonymous namespace
TEST(GPUPredictor, MGPUBasicColumnSplit) {
auto const n_gpus = common::AllVisibleGPUs();
if (n_gpus <= 1) {
GTEST_SKIP() << "Skipping MGPUIBasicColumnSplit test with # GPUs = " << n_gpus;
}
auto ctx = MakeCUDACtx(0);
std::unique_ptr<Predictor> predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &ctx));
predictor->Configure({});
std::array<std::vector<float>, 32> result{};
for (size_t i = 1; i < 33; i *= 2) {
size_t n_row = i, n_col = i;
auto dmat = RandomDataGenerator(n_row, n_col, 0).GenerateDMatrix();
LearnerModelParam mparam{MakeMP(n_col, .5, 1, ctx.Ordinal())};
gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx);
// Test predict batch
PredictionCacheEntry out_predictions;
predictor->InitOutPredictions(dmat->Info(), &out_predictions.predictions, model);
predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
std::vector<float>& out_predictions_h = out_predictions.predictions.HostVector();
result[i - 1] = out_predictions_h;
}
RunWithInMemoryCommunicator(n_gpus, VerifyBasicColumnSplit, result);
}
TEST(GPUPredictor, EllpackBasic) {
size_t constexpr kCols{8};
auto ctx = MakeCUDACtx(0);
for (size_t bins = 2; bins < 258; bins += 16) {
size_t rows = bins * 16;
auto p_m =
RandomDataGenerator{rows, kCols, 0.0}.Bins(bins).Device(0).GenerateDeviceDMatrix(false);
ASSERT_FALSE(p_m->PageExists<SparsePage>());
TestPredictionFromGradientIndex<EllpackPage>(&ctx, rows, kCols, p_m);
TestPredictionFromGradientIndex<EllpackPage>(&ctx, bins, kCols, p_m);
}
}
TEST(GPUPredictor, EllpackTraining) {
auto ctx = MakeCUDACtx(0);
size_t constexpr kRows{128}, kCols{16}, kBins{64};
auto p_ellpack = RandomDataGenerator{kRows, kCols, 0.0}
.Bins(kBins)
.Device(ctx.Ordinal())
.GenerateDeviceDMatrix(false);
HostDeviceVector<float> storage(kRows * kCols);
auto columnar =
RandomDataGenerator{kRows, kCols, 0.0}.Device(ctx.Ordinal()).GenerateArrayInterface(&storage);
auto adapter = data::CupyAdapter(columnar);
std::shared_ptr<DMatrix> p_full{
DMatrix::Create(&adapter, std::numeric_limits<float>::quiet_NaN(), 1)};
TestTrainingPrediction(&ctx, kRows, kBins, p_full, p_ellpack);
}
TEST(GPUPredictor, ExternalMemoryTest) {
auto lparam = MakeCUDACtx(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam));
gpu_predictor->Configure({});
const int n_classes = 3;
Context ctx = MakeCUDACtx(0);
LearnerModelParam mparam{MakeMP(5, .5, n_classes, ctx.Ordinal())};
gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx, n_classes);
std::vector<std::unique_ptr<DMatrix>> dmats;
dmats.push_back(CreateSparsePageDMatrix(400));
dmats.push_back(CreateSparsePageDMatrix(800));
dmats.push_back(CreateSparsePageDMatrix(8000));
for (const auto& dmat: dmats) {
dmat->Info().base_margin_ = decltype(dmat->Info().base_margin_){
{dmat->Info().num_row_, static_cast<size_t>(n_classes)}, 0};
dmat->Info().base_margin_.Data()->Fill(0.5);
PredictionCacheEntry out_predictions;
gpu_predictor->InitOutPredictions(dmat->Info(), &out_predictions.predictions, model);
gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
EXPECT_EQ(out_predictions.predictions.Size(), dmat->Info().num_row_ * n_classes);
const std::vector<float> &host_vector = out_predictions.predictions.ConstHostVector();
for (size_t i = 0; i < host_vector.size() / n_classes; i++) {
ASSERT_EQ(host_vector[i * n_classes], 2.0);
ASSERT_EQ(host_vector[i * n_classes + 1], 0.5);
ASSERT_EQ(host_vector[i * n_classes + 2], 0.5);
}
}
}
TEST(GPUPredictor, InplacePredictCupy) {
auto ctx = MakeCUDACtx(0);
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(ctx.Ordinal());
HostDeviceVector<float> data;
std::string interface_str = gen.GenerateArrayInterface(&data);
std::shared_ptr<DMatrix> p_fmat{new data::DMatrixProxy};
dynamic_cast<data::DMatrixProxy*>(p_fmat.get())->SetCUDAArray(interface_str.c_str());
TestInplacePrediction(&ctx, p_fmat, kRows, kCols);
}
TEST(GPUPredictor, InplacePredictCuDF) {
auto ctx = MakeCUDACtx(0);
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(ctx.Ordinal());
std::vector<HostDeviceVector<float>> storage(kCols);
auto interface_str = gen.GenerateColumnarArrayInterface(&storage);
std::shared_ptr<DMatrix> p_fmat{new data::DMatrixProxy};
dynamic_cast<data::DMatrixProxy*>(p_fmat.get())->SetCUDAArray(interface_str.c_str());
TestInplacePrediction(&ctx, p_fmat, kRows, kCols);
}
TEST(GpuPredictor, LesserFeatures) {
auto ctx = MakeCUDACtx(0);
TestPredictionWithLesserFeatures(&ctx);
}
// Very basic test of empty model
TEST(GPUPredictor, ShapStump) {
cudaSetDevice(0);
auto ctx = MakeCUDACtx(0);
LearnerModelParam mparam{MakeMP(1, .5, 1, ctx.Ordinal())};
gbm::GBTreeModel model(&mparam, &ctx);
std::vector<std::unique_ptr<RegTree>> trees;
trees.push_back(std::make_unique<RegTree>());
model.CommitModelGroup(std::move(trees), 0);
auto gpu_lparam = MakeCUDACtx(0);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
gpu_predictor->Configure({});
HostDeviceVector<float> predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
auto& phis = predictions.HostVector();
auto base_score = mparam.BaseScore(Context::kCpuId)(0);
EXPECT_EQ(phis[0], 0.0);
EXPECT_EQ(phis[1], base_score);
EXPECT_EQ(phis[2], 0.0);
EXPECT_EQ(phis[3], base_score);
EXPECT_EQ(phis[4], 0.0);
EXPECT_EQ(phis[5], base_score);
}
TEST(GPUPredictor, Shap) {
auto ctx = MakeCUDACtx(0);
LearnerModelParam mparam{MakeMP(1, .5, 1, ctx.Ordinal())};
gbm::GBTreeModel model(&mparam, &ctx);
std::vector<std::unique_ptr<RegTree>> trees;
trees.push_back(std::make_unique<RegTree>());
trees[0]->ExpandNode(0, 0, 0.5, true, 1.0, -1.0, 1.0, 0.0, 5.0, 2.0, 3.0);
model.CommitModelGroup(std::move(trees), 0);
auto gpu_lparam = MakeCUDACtx(0);
auto cpu_lparam = MakeCUDACtx(-1);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
HostDeviceVector<float> predictions;
HostDeviceVector<float> cpu_predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
cpu_predictor->PredictContribution(dmat.get(), &cpu_predictions, model);
auto& phis = predictions.HostVector();
auto& cpu_phis = cpu_predictions.HostVector();
for (auto i = 0ull; i < phis.size(); i++) {
EXPECT_NEAR(cpu_phis[i], phis[i], 1e-3);
}
}
TEST(GPUPredictor, IterationRange) {
auto ctx = MakeCUDACtx(0);
TestIterationRange(&ctx);
}
TEST(GPUPredictor, CategoricalPrediction) {
auto ctx = MakeCUDACtx(0);
TestCategoricalPrediction(&ctx, false);
}
TEST(GPUPredictor, CategoricalPredictLeaf) {
auto ctx = MakeCUDACtx(0);
TestCategoricalPredictLeaf(&ctx, false);
}
TEST(GPUPredictor, PredictLeafBasic) {
size_t constexpr kRows = 5, kCols = 5;
auto dmat = RandomDataGenerator(kRows, kCols, 0).Device(0).GenerateDMatrix();
auto lparam = MakeCUDACtx(GPUIDX);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam));
gpu_predictor->Configure({});
LearnerModelParam mparam{MakeMP(kCols, .0, 1)};
Context ctx;
gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx);
HostDeviceVector<float> leaf_out_predictions;
gpu_predictor->PredictLeaf(dmat.get(), &leaf_out_predictions, model);
auto const& h_leaf_out_predictions = leaf_out_predictions.ConstHostVector();
for (auto v : h_leaf_out_predictions) {
ASSERT_EQ(v, 0);
}
}
TEST(GPUPredictor, Sparse) {
auto ctx = MakeCUDACtx(0);
TestSparsePrediction(&ctx, 0.2);
TestSparsePrediction(&ctx, 0.8);
}
} // namespace xgboost::predictor
|
fa093ad88347a28f9f80d3431874bd13a20a8af2.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <cstdint>
#include <mutex>
#include <type_traits>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
#include <cuda_fp16.hpp>
#include "chainerx/array.h"
#include "chainerx/axes.h"
#include "chainerx/backend.h"
#include "chainerx/backend_util.h"
#include "chainerx/cuda/rocblas.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/cusolver.h"
#include "chainerx/cuda/data_type.cuh"
#include "chainerx/cuda/float16.cuh"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/error.h"
#include "chainerx/float16.h"
#include "chainerx/kernels/creation.h"
#include "chainerx/kernels/linalg.h"
#include "chainerx/kernels/misc.h"
#include "chainerx/macro.h"
#include "chainerx/native/native_device.h"
#include "chainerx/routines/arithmetic.h"
#include "chainerx/routines/creation.h"
#include "chainerx/routines/indexing.h"
#include "chainerx/routines/linalg.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
cusolverStatus_t GetrfBuffersize(hipsolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, T* /*a*/, int /*lda*/, int* /*lwork*/) {
throw DtypeError{"Only Arrays of float or double type are supported by getrf (LU)"};
}
template <typename T>
cusolverStatus_t Getrf(
hipsolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, T* /*a*/, int /*lda*/, T* /*workspace*/, int* /*devipiv*/, int* /*devinfo*/) {
throw DtypeError{"Only Arrays of float or double type are supported by getrf (LU)"};
}
template <typename T>
cusolverStatus_t Getrs(
hipsolverDnHandle_t /*handle*/,
hipblasOperation_t /*trans*/,
int /*n*/,
int /*nrhs*/,
T* /*a*/,
int /*lda*/,
int* /*devipiv*/,
T* /*b*/,
int /*ldb*/,
int* /*devinfo*/) {
throw DtypeError{"Only Arrays of float or double type are supported by getrs (Solve)"};
}
template <typename T>
cusolverStatus_t GesvdBuffersize(hipsolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, int* /*lwork*/) {
throw DtypeError{"Only Arrays of float or double type are supported by gesvd (SVD)"};
}
template <typename T>
cusolverStatus_t Gesvd(
hipsolverDnHandle_t /*handle*/,
signed char /*jobu*/,
signed char /*jobvt*/,
int /*m*/,
int /*n*/,
T* /*a*/,
int /*lda*/,
T* /*s*/,
T* /*u*/,
int /*ldu*/,
T* /*vt*/,
int /*ldvt*/,
T* /*work*/,
int /*lwork*/,
T* /*rwork*/,
int* /*devinfo*/) {
throw DtypeError{"Only Arrays of float or double type are supported by gesvd (SVD)"};
}
template <typename T>
cusolverStatus_t GeqrfBufferSize(hipsolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, T* /*a*/, int /*lda*/, int* /*lwork*/) {
throw DtypeError{"Only Arrays of float or double type are supported by geqrf (QR)"};
}
template <typename T>
cusolverStatus_t Geqrf(
hipsolverDnHandle_t /*handle*/,
int /*m*/,
int /*n*/,
T* /*a*/,
int /*lda*/,
T* /*tau*/,
T* /*workspace*/,
int /*lwork*/,
int* /*devinfo*/) {
throw DtypeError{"Only Arrays of float or double type are supported by geqrf (QR)"};
}
template <typename T>
cusolverStatus_t OrgqrBufferSize(
hipsolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, int /*k*/, T* /*a*/, int /*lda*/, T* /*tau*/, int* /*lwork*/) {
throw DtypeError{"Only Arrays of float or double type are supported by orgqr (QR)"};
}
template <typename T>
cusolverStatus_t Orgqr(
hipsolverDnHandle_t /*handle*/,
int /*m*/,
int /*n*/,
int /*k*/,
T* /*a*/,
int /*lda*/,
T* /*tau*/,
T* /*work*/,
int /*lwork*/,
int* /*devinfo*/) {
throw DtypeError{"Only Arrays of float or double type are supported by orgqr (QR)"};
}
template <typename T>
cusolverStatus_t PotrfBuffersize(
hipsolverDnHandle_t /*handle*/, hipblasFillMode_t /*uplo*/, int /*n*/, T* /*a*/, int /*lda*/, int* /*lwork*/) {
throw DtypeError{"Only Arrays of float or double type are supported by potrf (Cholesky)"};
}
template <typename T>
cusolverStatus_t Potrf(
hipsolverDnHandle_t /*handle*/,
hipblasFillMode_t /*uplo*/,
int /*n*/,
T* /*a*/,
int /*lda*/,
T* /*workspace*/,
int /*lwork*/,
int* /*devinfo*/) {
throw DtypeError{"Only Arrays of float or double type are supported by potrf (Cholesky)"};
}
template <>
cusolverStatus_t GetrfBuffersize<double>(hipsolverDnHandle_t handle, int m, int n, double* a, int lda, int* lwork) {
return hipsolverDnDgetrf_bufferSize(handle, m, n, a, lda, lwork);
}
template <>
cusolverStatus_t GetrfBuffersize<float>(hipsolverDnHandle_t handle, int m, int n, float* a, int lda, int* lwork) {
return hipsolverDnSgetrf_bufferSize(handle, m, n, a, lda, lwork);
}
template <>
cusolverStatus_t Getrf<double>(hipsolverDnHandle_t handle, int m, int n, double* a, int lda, double* workspace, int* devipiv, int* devinfo) {
return hipsolverDnDgetrf(handle, m, n, a, lda, workspace, devipiv, devinfo);
}
template <>
cusolverStatus_t Getrf<float>(hipsolverDnHandle_t handle, int m, int n, float* a, int lda, float* workspace, int* devipiv, int* devinfo) {
return hipsolverDnSgetrf(handle, m, n, a, lda, workspace, devipiv, devinfo);
}
template <>
cusolverStatus_t Getrs<double>(
hipsolverDnHandle_t handle,
hipblasOperation_t trans,
int n,
int nrhs,
double* a,
int lda,
int* devipiv,
double* b,
int ldb,
int* devinfo) {
return hipsolverDnDgetrs(handle, trans, n, nrhs, a, lda, devipiv, b, ldb, devinfo);
}
template <>
cusolverStatus_t Getrs<float>(
hipsolverDnHandle_t handle,
hipblasOperation_t trans,
int n,
int nrhs,
float* a,
int lda,
int* devipiv,
float* b,
int ldb,
int* devinfo) {
return hipsolverDnSgetrs(handle, trans, n, nrhs, a, lda, devipiv, b, ldb, devinfo);
}
template <>
cusolverStatus_t GesvdBuffersize<double>(hipsolverDnHandle_t handle, int m, int n, int* lwork) {
return hipsolverDnDgesvd_bufferSize(handle, m, n, lwork);
}
template <>
cusolverStatus_t GesvdBuffersize<float>(hipsolverDnHandle_t handle, int m, int n, int* lwork) {
return hipsolverDnSgesvd_bufferSize(handle, m, n, lwork);
}
template <>
cusolverStatus_t Gesvd<double>(
hipsolverDnHandle_t handle,
signed char jobu,
signed char jobvt,
int m,
int n,
double* a,
int lda,
double* s,
double* u,
int ldu,
double* vt,
int ldvt,
double* work,
int lwork,
double* rwork,
int* devinfo) {
return hipsolverDnDgesvd(handle, jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, rwork, devinfo);
}
template <>
cusolverStatus_t Gesvd<float>(
hipsolverDnHandle_t handle,
signed char jobu,
signed char jobvt,
int m,
int n,
float* a,
int lda,
float* s,
float* u,
int ldu,
float* vt,
int ldvt,
float* work,
int lwork,
float* rwork,
int* devinfo) {
return hipsolverDnSgesvd(handle, jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, rwork, devinfo);
}
template <>
cusolverStatus_t GeqrfBufferSize<double>(hipsolverDnHandle_t handle, int m, int n, double* a, int lda, int* lwork) {
return hipsolverDnDgeqrf_bufferSize(handle, m, n, a, lda, lwork);
}
template <>
cusolverStatus_t GeqrfBufferSize<float>(hipsolverDnHandle_t handle, int m, int n, float* a, int lda, int* lwork) {
return hipsolverDnSgeqrf_bufferSize(handle, m, n, a, lda, lwork);
}
template <>
cusolverStatus_t Geqrf<double>(
hipsolverDnHandle_t handle, int m, int n, double* a, int lda, double* tau, double* workspace, int lwork, int* devinfo) {
return hipsolverDnDgeqrf(handle, m, n, a, lda, tau, workspace, lwork, devinfo);
}
template <>
cusolverStatus_t Geqrf<float>(
hipsolverDnHandle_t handle, int m, int n, float* a, int lda, float* tau, float* workspace, int lwork, int* devinfo) {
return hipsolverDnSgeqrf(handle, m, n, a, lda, tau, workspace, lwork, devinfo);
}
template <>
cusolverStatus_t OrgqrBufferSize<double>(hipsolverDnHandle_t handle, int m, int n, int k, double* a, int lda, double* tau, int* lwork) {
return hipsolverDnDorgqr_bufferSize(handle, m, n, k, a, lda, tau, lwork);
}
template <>
cusolverStatus_t OrgqrBufferSize<float>(hipsolverDnHandle_t handle, int m, int n, int k, float* a, int lda, float* tau, int* lwork) {
return hipsolverDnSorgqr_bufferSize(handle, m, n, k, a, lda, tau, lwork);
}
template <>
cusolverStatus_t Orgqr<double>(
hipsolverDnHandle_t handle, int m, int n, int k, double* a, int lda, double* tau, double* work, int lwork, int* devinfo) {
return hipsolverDnDorgqr(handle, m, n, k, a, lda, tau, work, lwork, devinfo);
}
template <>
cusolverStatus_t Orgqr<float>(
hipsolverDnHandle_t handle, int m, int n, int k, float* a, int lda, float* tau, float* work, int lwork, int* devinfo) {
return hipsolverDnSorgqr(handle, m, n, k, a, lda, tau, work, lwork, devinfo);
}
template <>
cusolverStatus_t PotrfBuffersize<double>(hipsolverDnHandle_t handle, hipblasFillMode_t uplo, int n, double* a, int lda, int* lwork) {
return hipsolverDnDpotrf_bufferSize(handle, uplo, n, a, lda, lwork);
}
template <>
cusolverStatus_t PotrfBuffersize<float>(hipsolverDnHandle_t handle, hipblasFillMode_t uplo, int n, float* a, int lda, int* lwork) {
return hipsolverDnSpotrf_bufferSize(handle, uplo, n, a, lda, lwork);
}
template <>
cusolverStatus_t Potrf<double>(
hipsolverDnHandle_t handle, hipblasFillMode_t uplo, int n, double* a, int lda, double* workspace, int lwork, int* devinfo) {
return hipsolverDnDpotrf(handle, uplo, n, a, lda, workspace, lwork, devinfo);
}
template <>
cusolverStatus_t Potrf<float>(
hipsolverDnHandle_t handle, hipblasFillMode_t uplo, int n, float* a, int lda, float* workspace, int lwork, int* devinfo) {
return hipsolverDnSpotrf(handle, uplo, n, a, lda, workspace, lwork, devinfo);
}
template <typename T>
void SolveImpl(const Array& a, const Array& b, const Array& out) {
Device& device = a.device();
Dtype dtype = a.dtype();
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(static_cast<CudaDevice&>(device));
Array lu_matrix = Empty(a.shape(), dtype, device);
device.backend().CallKernel<CopyKernel>(a.Transpose(), lu_matrix);
auto lu_ptr = static_cast<T*>(internal::GetRawOffsetData(lu_matrix));
int64_t m = a.shape()[0];
int64_t lda = ::max(int64_t{1}, m);
int64_t nrhs = 1;
if (b.ndim() == 2) {
nrhs = b.shape()[1];
}
Array ipiv = Empty(Shape{m}, Dtype::kInt32, device);
auto ipiv_ptr = static_cast<int*>(internal::GetRawOffsetData(ipiv));
int buffersize = 0;
device_internals.cusolverdn_handle().Call(GetrfBuffersize<T>, m, m, lu_ptr, lda, &buffersize);
Array work = Empty(Shape{buffersize}, dtype, device);
auto work_ptr = static_cast<T*>(internal::GetRawOffsetData(work));
std::shared_ptr<void> devinfo = device.Allocate(sizeof(int));
device_internals.cusolverdn_handle().Call(Getrf<T>, m, m, lu_ptr, lda, work_ptr, ipiv_ptr, static_cast<int*>(devinfo.get()));
int devinfo_h = 0;
Device& native_device = GetDefaultContext().GetDevice({"native", 0});
device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device);
if (devinfo_h != 0) {
throw ChainerxError{"Unsuccessful getrf (LU) execution. Info = ", devinfo_h};
}
Array out_transposed = b.Transpose().Copy();
auto out_ptr = static_cast<T*>(internal::GetRawOffsetData(out_transposed));
device_internals.cusolverdn_handle().Call(
Getrs<T>, HIPBLAS_OP_N, m, nrhs, lu_ptr, lda, ipiv_ptr, out_ptr, lda, static_cast<int*>(devinfo.get()));
device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device);
if (devinfo_h != 0) {
throw ChainerxError{"Unsuccessful getrs (Solve) execution. Info = ", devinfo_h};
}
device.backend().CallKernel<CopyKernel>(out_transposed.Transpose(), out);
}
template <typename T>
void QrImpl(const Array& a, const Array& q, const Array& r, const Array& tau, QrMode mode) {
Device& device = a.device();
Dtype dtype = a.dtype();
int64_t m = a.shape()[0];
int64_t n = a.shape()[1];
int64_t k = ::min(m, n);
int64_t lda = ::max(int64_t{1}, m);
// cuSOLVER does not return correct result in this case and older versions of cuSOLVER (<10.1)
// might not work well with zero-sized arrays therefore it's better to return earlier
if (a.shape().GetTotalSize() == 0) {
if (mode == QrMode::kComplete) {
device.backend().CallKernel<IdentityKernel>(q);
}
return;
}
Array r_temp = a.Transpose().Copy(); // QR decomposition is done in-place
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(static_cast<CudaDevice&>(device));
auto r_ptr = static_cast<T*>(internal::GetRawOffsetData(r_temp));
auto tau_ptr = static_cast<T*>(internal::GetRawOffsetData(tau));
std::shared_ptr<void> devinfo = device.Allocate(sizeof(int));
int buffersize_geqrf = 0;
device_internals.cusolverdn_handle().Call(GeqrfBufferSize<T>, m, n, r_ptr, lda, &buffersize_geqrf);
Array work = Empty(Shape{buffersize_geqrf}, dtype, device);
auto work_ptr = static_cast<T*>(internal::GetRawOffsetData(work));
device_internals.cusolverdn_handle().Call(
Geqrf<T>, m, n, r_ptr, lda, tau_ptr, work_ptr, buffersize_geqrf, static_cast<int*>(devinfo.get()));
int devinfo_h = 0;
Device& native_device = GetDefaultContext().GetDevice({"native", 0});
device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device);
if (devinfo_h != 0) {
throw ChainerxError{"Unsuccessful geqrf (QR) execution. Info = ", devinfo_h};
}
if (mode == QrMode::kR) {
r_temp = r_temp.At(std::vector<ArrayIndex>{Slice{}, Slice{0, k}}).Transpose(); // R = R[:, 0:k].T
r_temp = Triu(r_temp, 0);
device.backend().CallKernel<CopyKernel>(r_temp, r);
return;
}
if (mode == QrMode::kRaw) {
device.backend().CallKernel<CopyKernel>(r_temp, r);
return;
}
int64_t mc;
Shape q_shape{0};
if (mode == QrMode::kComplete && m > n) {
mc = m;
q_shape = Shape{m, m};
} else {
mc = k;
q_shape = Shape{n, m};
}
Array q_temp = Empty(q_shape, dtype, device);
device.backend().CallKernel<CopyKernel>(r_temp, q_temp.At(std::vector<ArrayIndex>{Slice{0, n}, Slice{}})); // Q[0:n, :] = R
auto q_ptr = static_cast<T*>(internal::GetRawOffsetData(q_temp));
int buffersize_orgqr = 0;
device_internals.cusolverdn_handle().Call(OrgqrBufferSize<T>, m, mc, k, q_ptr, lda, tau_ptr, &buffersize_orgqr);
Array work_orgqr = Empty(Shape{buffersize_orgqr}, dtype, device);
auto work_orgqr_ptr = static_cast<T*>(internal::GetRawOffsetData(work_orgqr));
device_internals.cusolverdn_handle().Call(
Orgqr<T>, m, mc, k, q_ptr, lda, tau_ptr, work_orgqr_ptr, buffersize_orgqr, static_cast<int*>(devinfo.get()));
device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device);
if (devinfo_h != 0) {
throw ChainerxError{"Unsuccessful orgqr (QR) execution. Info = ", devinfo_h};
}
q_temp = q_temp.At(std::vector<ArrayIndex>{Slice{0, mc}, Slice{}}).Transpose(); // Q = Q[0:mc, :].T
r_temp = r_temp.At(std::vector<ArrayIndex>{Slice{}, Slice{0, mc}}).Transpose(); // R = R[:, 0:mc].T
r_temp = Triu(r_temp, 0);
device.backend().CallKernel<CopyKernel>(q_temp, q);
device.backend().CallKernel<CopyKernel>(r_temp, r);
}
} // namespace
class CudaSolveKernel : public SolveKernel {
public:
void Call(const Array& a, const Array& b, const Array& out) override {
Device& device = a.device();
CudaSetDeviceScope scope{device.index()};
CHAINERX_ASSERT(a.ndim() == 2);
CHAINERX_ASSERT(a.shape()[0] == a.shape()[1]);
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
SolveImpl<T>(a.dtype() == out.dtype() ? a : a.AsType(out.dtype()), b.dtype() == out.dtype() ? b : b.AsType(out.dtype()), out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(SolveKernel, CudaSolveKernel);
class CudaInverseKernel : public InverseKernel {
public:
void Call(const Array& a, const Array& out) override {
Device& device = a.device();
Dtype dtype = a.dtype();
CudaSetDeviceScope scope{device.index()};
CHAINERX_ASSERT(a.ndim() == 2);
CHAINERX_ASSERT(a.shape()[0] == a.shape()[1]);
// There is LAPACK routine ``getri`` for computing the inverse of an LU-factored matrix,
// but cuSOLVER does not have it implemented, therefore inverse is obtained with ``getrs``
// inv(A) == solve(A, Identity)
Array b = Identity(a.shape()[0], dtype, device);
device.backend().CallKernel<SolveKernel>(a, b, out);
}
};
CHAINERX_CUDA_REGISTER_KERNEL(InverseKernel, CudaInverseKernel);
class CudaSvdKernel : public SvdKernel {
public:
void Call(const Array& a, const Array& u, const Array& s, const Array& vt, bool full_matrices, bool compute_uv) override {
Device& device = a.device();
Dtype dtype = a.dtype();
CudaSetDeviceScope scope{device.index()};
CHAINERX_ASSERT(a.ndim() == 2);
if (a.shape().GetTotalSize() == 0) {
if (full_matrices && compute_uv) {
device.backend().CallKernel<IdentityKernel>(u);
device.backend().CallKernel<IdentityKernel>(vt);
}
// This kernel works correctly for zero-sized input also without early return
return;
}
// cuSOLVER assumes arrays are in column-major order.
// In order to avoid transposing the input matrix, matrix dimensions are swapped.
// Since the input is assumed to be transposed, it is necessary to
// swap the pointers to u and vt matrices when calling Gesvd.
int64_t n = a.shape()[0];
int64_t m = a.shape()[1];
int64_t k = ::min(m, n);
Array x = EmptyLike(a, device);
Array u_temp{};
Array vt_temp{};
bool trans_flag;
// Remark: gesvd only supports m>=n.
// See: https://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-gesvd
// Therefore for the case m<n we calculuate svd of transposed matrix,
// instead of calculating svd(A) = U S V^T, we compute svd(A^T) = V S U^T
if (m >= n) {
device.backend().CallKernel<CopyKernel>(a, x);
trans_flag = false;
} else {
m = a.shape()[0];
n = a.shape()[1];
x = x.Reshape(Shape{n, m});
device.backend().CallKernel<CopyKernel>(a.Transpose(), x);
trans_flag = true;
// Temporary arrays for u, vt are needed to store transposed results
Shape u_shape;
Shape vt_shape;
if (compute_uv) {
if (full_matrices) {
u_shape = Shape{m, m};
vt_shape = Shape{n, n};
} else {
u_shape = Shape{k, m};
vt_shape = Shape{n, k};
}
} else {
u_shape = Shape{0};
vt_shape = Shape{0};
}
u_temp = Empty(u_shape, dtype, device);
vt_temp = Empty(vt_shape, dtype, device);
}
int64_t lda = ::max(int64_t{1}, m);
int64_t ldu = ::max(int64_t{1}, m);
int64_t ldvt = full_matrices ? ::max(int64_t{1}, n) : ::max(int64_t{1}, k);
auto svd_impl = [&](auto pt) {
using T = typename decltype(pt)::type;
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(static_cast<CudaDevice&>(device));
auto x_ptr = static_cast<T*>(internal::GetRawOffsetData(x));
auto s_ptr = static_cast<T*>(internal::GetRawOffsetData(s));
auto u_ptr = static_cast<T*>(internal::GetRawOffsetData(u));
auto vt_ptr = static_cast<T*>(internal::GetRawOffsetData(vt));
if (trans_flag) {
u_ptr = static_cast<T*>(internal::GetRawOffsetData(vt_temp));
vt_ptr = static_cast<T*>(internal::GetRawOffsetData(u_temp));
}
std::shared_ptr<void> devinfo = device.Allocate(sizeof(int));
int buffersize = 0;
device_internals.cusolverdn_handle().Call(GesvdBuffersize<T>, m, n, &buffersize);
Array work = Empty(Shape{buffersize}, dtype, device);
auto work_ptr = static_cast<T*>(internal::GetRawOffsetData(work));
signed char job;
if (compute_uv) {
job = full_matrices ? 'A' : 'S';
} else {
job = 'N';
}
// When calling Gesvd pointers to u and vt are swapped instead of transposing the input matrix.
device_internals.cusolverdn_handle().Call(
Gesvd<T>,
job,
job,
m,
n,
x_ptr,
lda,
s_ptr,
vt_ptr,
ldu,
u_ptr,
ldvt,
work_ptr,
buffersize,
nullptr,
static_cast<int*>(devinfo.get()));
int devinfo_h = 0;
Device& native_device = GetDefaultContext().GetDevice({"native", 0});
device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device);
if (devinfo_h != 0) {
throw ChainerxError{"Unsuccessful gesvd (SVD) execution. Info = ", devinfo_h};
}
if (trans_flag) {
device.backend().CallKernel<CopyKernel>(u_temp.Transpose(), u);
device.backend().CallKernel<CopyKernel>(vt_temp.Transpose(), vt);
}
};
VisitFloatingPointDtype(dtype, svd_impl);
}
};
CHAINERX_CUDA_REGISTER_KERNEL(SvdKernel, CudaSvdKernel);
class CudaQrKernel : public QrKernel {
public:
void Call(const Array& a, const Array& q, const Array& r, const Array& tau, QrMode mode) override {
Device& device = a.device();
Dtype dtype = a.dtype();
CudaSetDeviceScope scope{device.index()};
CHAINERX_ASSERT(a.ndim() == 2);
VisitFloatingPointDtype(dtype, [&](auto pt) {
using T = typename decltype(pt)::type;
QrImpl<T>(a, q, r, tau, mode);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(QrKernel, CudaQrKernel);
class CudaCholeskyKernel : public CholeskyKernel {
public:
void Call(const Array& a, const Array& out) override {
Device& device = a.device();
device.CheckDevicesCompatible(a, out);
Dtype dtype = a.dtype();
CudaSetDeviceScope scope{device.index()};
CHAINERX_ASSERT(a.ndim() == 2);
CHAINERX_ASSERT(out.ndim() == 2);
CHAINERX_ASSERT(a.shape()[0] == a.shape()[1]);
CHAINERX_ASSERT(out.IsContiguous());
CHAINERX_ASSERT(a.dtype() == out.dtype());
// cuSOLVER might not work well with zero-sized arrays for older versions of cuSOLVER (<10.1)
// therefore it's better to return earlier
if (a.shape().GetTotalSize() == 0) {
return;
}
// potrf (cholesky) stores result in-place, therefore copy ``a`` to ``out`` and then pass ``out`` to the routine
device.backend().CallKernel<CopyKernel>(Tril(a, 0), out);
auto cholesky_impl = [&](auto pt) {
using T = typename decltype(pt)::type;
// Note that cuSOLVER uses Fortran order.
// To compute a lower triangular matrix L = cholesky(A), we use cuSOLVER to compute an upper triangular matrix U = cholesky(A).
hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER;
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(static_cast<CudaDevice&>(device));
// compute workspace size and prepare workspace
auto out_ptr = static_cast<T*>(internal::GetRawOffsetData(out));
int work_size = 0;
int64_t n = a.shape()[0];
device_internals.cusolverdn_handle().Call(PotrfBuffersize<T>, uplo, n, out_ptr, ::max(int64_t{1}, n), &work_size);
// POTRF execution
Array work = Empty(Shape{work_size}, dtype, device);
auto work_ptr = static_cast<T*>(internal::GetRawOffsetData(work));
std::shared_ptr<void> devinfo = device.Allocate(sizeof(int));
device_internals.cusolverdn_handle().Call(
Potrf<T>, uplo, n, out_ptr, ::max(int64_t{1}, n), work_ptr, work_size, static_cast<int*>(devinfo.get()));
int devinfo_h = 0;
Device& native_device = GetDefaultContext().GetDevice({"native", 0});
device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device);
if (devinfo_h != 0) {
throw ChainerxError{"Unsuccessful potrf (Cholesky) execution. Info = ", devinfo_h};
}
};
VisitFloatingPointDtype(dtype, cholesky_impl);
}
};
CHAINERX_CUDA_REGISTER_KERNEL(CholeskyKernel, CudaCholeskyKernel);
} // namespace cuda
} // namespace chainerx
| fa093ad88347a28f9f80d3431874bd13a20a8af2.cu | #include "chainerx/cuda/cuda_device.h"
#include <cstdint>
#include <mutex>
#include <type_traits>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include <cuda_fp16.hpp>
#include "chainerx/array.h"
#include "chainerx/axes.h"
#include "chainerx/backend.h"
#include "chainerx/backend_util.h"
#include "chainerx/cuda/cublas.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/cusolver.h"
#include "chainerx/cuda/data_type.cuh"
#include "chainerx/cuda/float16.cuh"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/error.h"
#include "chainerx/float16.h"
#include "chainerx/kernels/creation.h"
#include "chainerx/kernels/linalg.h"
#include "chainerx/kernels/misc.h"
#include "chainerx/macro.h"
#include "chainerx/native/native_device.h"
#include "chainerx/routines/arithmetic.h"
#include "chainerx/routines/creation.h"
#include "chainerx/routines/indexing.h"
#include "chainerx/routines/linalg.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
cusolverStatus_t GetrfBuffersize(cusolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, T* /*a*/, int /*lda*/, int* /*lwork*/) {
throw DtypeError{"Only Arrays of float or double type are supported by getrf (LU)"};
}
template <typename T>
cusolverStatus_t Getrf(
cusolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, T* /*a*/, int /*lda*/, T* /*workspace*/, int* /*devipiv*/, int* /*devinfo*/) {
throw DtypeError{"Only Arrays of float or double type are supported by getrf (LU)"};
}
template <typename T>
cusolverStatus_t Getrs(
cusolverDnHandle_t /*handle*/,
cublasOperation_t /*trans*/,
int /*n*/,
int /*nrhs*/,
T* /*a*/,
int /*lda*/,
int* /*devipiv*/,
T* /*b*/,
int /*ldb*/,
int* /*devinfo*/) {
throw DtypeError{"Only Arrays of float or double type are supported by getrs (Solve)"};
}
template <typename T>
cusolverStatus_t GesvdBuffersize(cusolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, int* /*lwork*/) {
throw DtypeError{"Only Arrays of float or double type are supported by gesvd (SVD)"};
}
template <typename T>
cusolverStatus_t Gesvd(
cusolverDnHandle_t /*handle*/,
signed char /*jobu*/,
signed char /*jobvt*/,
int /*m*/,
int /*n*/,
T* /*a*/,
int /*lda*/,
T* /*s*/,
T* /*u*/,
int /*ldu*/,
T* /*vt*/,
int /*ldvt*/,
T* /*work*/,
int /*lwork*/,
T* /*rwork*/,
int* /*devinfo*/) {
throw DtypeError{"Only Arrays of float or double type are supported by gesvd (SVD)"};
}
template <typename T>
cusolverStatus_t GeqrfBufferSize(cusolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, T* /*a*/, int /*lda*/, int* /*lwork*/) {
throw DtypeError{"Only Arrays of float or double type are supported by geqrf (QR)"};
}
template <typename T>
cusolverStatus_t Geqrf(
cusolverDnHandle_t /*handle*/,
int /*m*/,
int /*n*/,
T* /*a*/,
int /*lda*/,
T* /*tau*/,
T* /*workspace*/,
int /*lwork*/,
int* /*devinfo*/) {
throw DtypeError{"Only Arrays of float or double type are supported by geqrf (QR)"};
}
template <typename T>
cusolverStatus_t OrgqrBufferSize(
cusolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, int /*k*/, T* /*a*/, int /*lda*/, T* /*tau*/, int* /*lwork*/) {
throw DtypeError{"Only Arrays of float or double type are supported by orgqr (QR)"};
}
template <typename T>
cusolverStatus_t Orgqr(
cusolverDnHandle_t /*handle*/,
int /*m*/,
int /*n*/,
int /*k*/,
T* /*a*/,
int /*lda*/,
T* /*tau*/,
T* /*work*/,
int /*lwork*/,
int* /*devinfo*/) {
throw DtypeError{"Only Arrays of float or double type are supported by orgqr (QR)"};
}
template <typename T>
cusolverStatus_t PotrfBuffersize(
cusolverDnHandle_t /*handle*/, cublasFillMode_t /*uplo*/, int /*n*/, T* /*a*/, int /*lda*/, int* /*lwork*/) {
throw DtypeError{"Only Arrays of float or double type are supported by potrf (Cholesky)"};
}
template <typename T>
cusolverStatus_t Potrf(
cusolverDnHandle_t /*handle*/,
cublasFillMode_t /*uplo*/,
int /*n*/,
T* /*a*/,
int /*lda*/,
T* /*workspace*/,
int /*lwork*/,
int* /*devinfo*/) {
throw DtypeError{"Only Arrays of float or double type are supported by potrf (Cholesky)"};
}
template <>
cusolverStatus_t GetrfBuffersize<double>(cusolverDnHandle_t handle, int m, int n, double* a, int lda, int* lwork) {
return cusolverDnDgetrf_bufferSize(handle, m, n, a, lda, lwork);
}
template <>
cusolverStatus_t GetrfBuffersize<float>(cusolverDnHandle_t handle, int m, int n, float* a, int lda, int* lwork) {
return cusolverDnSgetrf_bufferSize(handle, m, n, a, lda, lwork);
}
template <>
cusolverStatus_t Getrf<double>(cusolverDnHandle_t handle, int m, int n, double* a, int lda, double* workspace, int* devipiv, int* devinfo) {
return cusolverDnDgetrf(handle, m, n, a, lda, workspace, devipiv, devinfo);
}
template <>
cusolverStatus_t Getrf<float>(cusolverDnHandle_t handle, int m, int n, float* a, int lda, float* workspace, int* devipiv, int* devinfo) {
return cusolverDnSgetrf(handle, m, n, a, lda, workspace, devipiv, devinfo);
}
template <>
cusolverStatus_t Getrs<double>(
cusolverDnHandle_t handle,
cublasOperation_t trans,
int n,
int nrhs,
double* a,
int lda,
int* devipiv,
double* b,
int ldb,
int* devinfo) {
return cusolverDnDgetrs(handle, trans, n, nrhs, a, lda, devipiv, b, ldb, devinfo);
}
template <>
cusolverStatus_t Getrs<float>(
cusolverDnHandle_t handle,
cublasOperation_t trans,
int n,
int nrhs,
float* a,
int lda,
int* devipiv,
float* b,
int ldb,
int* devinfo) {
return cusolverDnSgetrs(handle, trans, n, nrhs, a, lda, devipiv, b, ldb, devinfo);
}
template <>
cusolverStatus_t GesvdBuffersize<double>(cusolverDnHandle_t handle, int m, int n, int* lwork) {
return cusolverDnDgesvd_bufferSize(handle, m, n, lwork);
}
template <>
cusolverStatus_t GesvdBuffersize<float>(cusolverDnHandle_t handle, int m, int n, int* lwork) {
return cusolverDnSgesvd_bufferSize(handle, m, n, lwork);
}
template <>
cusolverStatus_t Gesvd<double>(
cusolverDnHandle_t handle,
signed char jobu,
signed char jobvt,
int m,
int n,
double* a,
int lda,
double* s,
double* u,
int ldu,
double* vt,
int ldvt,
double* work,
int lwork,
double* rwork,
int* devinfo) {
return cusolverDnDgesvd(handle, jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, rwork, devinfo);
}
template <>
cusolverStatus_t Gesvd<float>(
cusolverDnHandle_t handle,
signed char jobu,
signed char jobvt,
int m,
int n,
float* a,
int lda,
float* s,
float* u,
int ldu,
float* vt,
int ldvt,
float* work,
int lwork,
float* rwork,
int* devinfo) {
return cusolverDnSgesvd(handle, jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, rwork, devinfo);
}
template <>
cusolverStatus_t GeqrfBufferSize<double>(cusolverDnHandle_t handle, int m, int n, double* a, int lda, int* lwork) {
return cusolverDnDgeqrf_bufferSize(handle, m, n, a, lda, lwork);
}
template <>
cusolverStatus_t GeqrfBufferSize<float>(cusolverDnHandle_t handle, int m, int n, float* a, int lda, int* lwork) {
return cusolverDnSgeqrf_bufferSize(handle, m, n, a, lda, lwork);
}
template <>
cusolverStatus_t Geqrf<double>(
cusolverDnHandle_t handle, int m, int n, double* a, int lda, double* tau, double* workspace, int lwork, int* devinfo) {
return cusolverDnDgeqrf(handle, m, n, a, lda, tau, workspace, lwork, devinfo);
}
template <>
cusolverStatus_t Geqrf<float>(
cusolverDnHandle_t handle, int m, int n, float* a, int lda, float* tau, float* workspace, int lwork, int* devinfo) {
return cusolverDnSgeqrf(handle, m, n, a, lda, tau, workspace, lwork, devinfo);
}
template <>
cusolverStatus_t OrgqrBufferSize<double>(cusolverDnHandle_t handle, int m, int n, int k, double* a, int lda, double* tau, int* lwork) {
return cusolverDnDorgqr_bufferSize(handle, m, n, k, a, lda, tau, lwork);
}
template <>
cusolverStatus_t OrgqrBufferSize<float>(cusolverDnHandle_t handle, int m, int n, int k, float* a, int lda, float* tau, int* lwork) {
return cusolverDnSorgqr_bufferSize(handle, m, n, k, a, lda, tau, lwork);
}
template <>
cusolverStatus_t Orgqr<double>(
cusolverDnHandle_t handle, int m, int n, int k, double* a, int lda, double* tau, double* work, int lwork, int* devinfo) {
return cusolverDnDorgqr(handle, m, n, k, a, lda, tau, work, lwork, devinfo);
}
template <>
cusolverStatus_t Orgqr<float>(
cusolverDnHandle_t handle, int m, int n, int k, float* a, int lda, float* tau, float* work, int lwork, int* devinfo) {
return cusolverDnSorgqr(handle, m, n, k, a, lda, tau, work, lwork, devinfo);
}
template <>
cusolverStatus_t PotrfBuffersize<double>(cusolverDnHandle_t handle, cublasFillMode_t uplo, int n, double* a, int lda, int* lwork) {
return cusolverDnDpotrf_bufferSize(handle, uplo, n, a, lda, lwork);
}
template <>
cusolverStatus_t PotrfBuffersize<float>(cusolverDnHandle_t handle, cublasFillMode_t uplo, int n, float* a, int lda, int* lwork) {
return cusolverDnSpotrf_bufferSize(handle, uplo, n, a, lda, lwork);
}
template <>
cusolverStatus_t Potrf<double>(
cusolverDnHandle_t handle, cublasFillMode_t uplo, int n, double* a, int lda, double* workspace, int lwork, int* devinfo) {
return cusolverDnDpotrf(handle, uplo, n, a, lda, workspace, lwork, devinfo);
}
template <>
cusolverStatus_t Potrf<float>(
cusolverDnHandle_t handle, cublasFillMode_t uplo, int n, float* a, int lda, float* workspace, int lwork, int* devinfo) {
return cusolverDnSpotrf(handle, uplo, n, a, lda, workspace, lwork, devinfo);
}
template <typename T>
void SolveImpl(const Array& a, const Array& b, const Array& out) {
Device& device = a.device();
Dtype dtype = a.dtype();
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(static_cast<CudaDevice&>(device));
Array lu_matrix = Empty(a.shape(), dtype, device);
device.backend().CallKernel<CopyKernel>(a.Transpose(), lu_matrix);
auto lu_ptr = static_cast<T*>(internal::GetRawOffsetData(lu_matrix));
int64_t m = a.shape()[0];
int64_t lda = std::max(int64_t{1}, m);
int64_t nrhs = 1;
if (b.ndim() == 2) {
nrhs = b.shape()[1];
}
Array ipiv = Empty(Shape{m}, Dtype::kInt32, device);
auto ipiv_ptr = static_cast<int*>(internal::GetRawOffsetData(ipiv));
int buffersize = 0;
device_internals.cusolverdn_handle().Call(GetrfBuffersize<T>, m, m, lu_ptr, lda, &buffersize);
Array work = Empty(Shape{buffersize}, dtype, device);
auto work_ptr = static_cast<T*>(internal::GetRawOffsetData(work));
std::shared_ptr<void> devinfo = device.Allocate(sizeof(int));
device_internals.cusolverdn_handle().Call(Getrf<T>, m, m, lu_ptr, lda, work_ptr, ipiv_ptr, static_cast<int*>(devinfo.get()));
int devinfo_h = 0;
Device& native_device = GetDefaultContext().GetDevice({"native", 0});
device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device);
if (devinfo_h != 0) {
throw ChainerxError{"Unsuccessful getrf (LU) execution. Info = ", devinfo_h};
}
Array out_transposed = b.Transpose().Copy();
auto out_ptr = static_cast<T*>(internal::GetRawOffsetData(out_transposed));
device_internals.cusolverdn_handle().Call(
Getrs<T>, CUBLAS_OP_N, m, nrhs, lu_ptr, lda, ipiv_ptr, out_ptr, lda, static_cast<int*>(devinfo.get()));
device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device);
if (devinfo_h != 0) {
throw ChainerxError{"Unsuccessful getrs (Solve) execution. Info = ", devinfo_h};
}
device.backend().CallKernel<CopyKernel>(out_transposed.Transpose(), out);
}
template <typename T>
void QrImpl(const Array& a, const Array& q, const Array& r, const Array& tau, QrMode mode) {
Device& device = a.device();
Dtype dtype = a.dtype();
int64_t m = a.shape()[0];
int64_t n = a.shape()[1];
int64_t k = std::min(m, n);
int64_t lda = std::max(int64_t{1}, m);
// cuSOLVER does not return correct result in this case and older versions of cuSOLVER (<10.1)
// might not work well with zero-sized arrays therefore it's better to return earlier
if (a.shape().GetTotalSize() == 0) {
if (mode == QrMode::kComplete) {
device.backend().CallKernel<IdentityKernel>(q);
}
return;
}
Array r_temp = a.Transpose().Copy(); // QR decomposition is done in-place
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(static_cast<CudaDevice&>(device));
auto r_ptr = static_cast<T*>(internal::GetRawOffsetData(r_temp));
auto tau_ptr = static_cast<T*>(internal::GetRawOffsetData(tau));
std::shared_ptr<void> devinfo = device.Allocate(sizeof(int));
int buffersize_geqrf = 0;
device_internals.cusolverdn_handle().Call(GeqrfBufferSize<T>, m, n, r_ptr, lda, &buffersize_geqrf);
Array work = Empty(Shape{buffersize_geqrf}, dtype, device);
auto work_ptr = static_cast<T*>(internal::GetRawOffsetData(work));
device_internals.cusolverdn_handle().Call(
Geqrf<T>, m, n, r_ptr, lda, tau_ptr, work_ptr, buffersize_geqrf, static_cast<int*>(devinfo.get()));
int devinfo_h = 0;
Device& native_device = GetDefaultContext().GetDevice({"native", 0});
device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device);
if (devinfo_h != 0) {
throw ChainerxError{"Unsuccessful geqrf (QR) execution. Info = ", devinfo_h};
}
if (mode == QrMode::kR) {
r_temp = r_temp.At(std::vector<ArrayIndex>{Slice{}, Slice{0, k}}).Transpose(); // R = R[:, 0:k].T
r_temp = Triu(r_temp, 0);
device.backend().CallKernel<CopyKernel>(r_temp, r);
return;
}
if (mode == QrMode::kRaw) {
device.backend().CallKernel<CopyKernel>(r_temp, r);
return;
}
int64_t mc;
Shape q_shape{0};
if (mode == QrMode::kComplete && m > n) {
mc = m;
q_shape = Shape{m, m};
} else {
mc = k;
q_shape = Shape{n, m};
}
Array q_temp = Empty(q_shape, dtype, device);
device.backend().CallKernel<CopyKernel>(r_temp, q_temp.At(std::vector<ArrayIndex>{Slice{0, n}, Slice{}})); // Q[0:n, :] = R
auto q_ptr = static_cast<T*>(internal::GetRawOffsetData(q_temp));
int buffersize_orgqr = 0;
device_internals.cusolverdn_handle().Call(OrgqrBufferSize<T>, m, mc, k, q_ptr, lda, tau_ptr, &buffersize_orgqr);
Array work_orgqr = Empty(Shape{buffersize_orgqr}, dtype, device);
auto work_orgqr_ptr = static_cast<T*>(internal::GetRawOffsetData(work_orgqr));
device_internals.cusolverdn_handle().Call(
Orgqr<T>, m, mc, k, q_ptr, lda, tau_ptr, work_orgqr_ptr, buffersize_orgqr, static_cast<int*>(devinfo.get()));
device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device);
if (devinfo_h != 0) {
throw ChainerxError{"Unsuccessful orgqr (QR) execution. Info = ", devinfo_h};
}
q_temp = q_temp.At(std::vector<ArrayIndex>{Slice{0, mc}, Slice{}}).Transpose(); // Q = Q[0:mc, :].T
r_temp = r_temp.At(std::vector<ArrayIndex>{Slice{}, Slice{0, mc}}).Transpose(); // R = R[:, 0:mc].T
r_temp = Triu(r_temp, 0);
device.backend().CallKernel<CopyKernel>(q_temp, q);
device.backend().CallKernel<CopyKernel>(r_temp, r);
}
} // namespace
class CudaSolveKernel : public SolveKernel {
public:
void Call(const Array& a, const Array& b, const Array& out) override {
Device& device = a.device();
CudaSetDeviceScope scope{device.index()};
CHAINERX_ASSERT(a.ndim() == 2);
CHAINERX_ASSERT(a.shape()[0] == a.shape()[1]);
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
SolveImpl<T>(a.dtype() == out.dtype() ? a : a.AsType(out.dtype()), b.dtype() == out.dtype() ? b : b.AsType(out.dtype()), out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(SolveKernel, CudaSolveKernel);
class CudaInverseKernel : public InverseKernel {
public:
void Call(const Array& a, const Array& out) override {
Device& device = a.device();
Dtype dtype = a.dtype();
CudaSetDeviceScope scope{device.index()};
CHAINERX_ASSERT(a.ndim() == 2);
CHAINERX_ASSERT(a.shape()[0] == a.shape()[1]);
// There is LAPACK routine ``getri`` for computing the inverse of an LU-factored matrix,
// but cuSOLVER does not have it implemented, therefore inverse is obtained with ``getrs``
// inv(A) == solve(A, Identity)
Array b = Identity(a.shape()[0], dtype, device);
device.backend().CallKernel<SolveKernel>(a, b, out);
}
};
CHAINERX_CUDA_REGISTER_KERNEL(InverseKernel, CudaInverseKernel);
class CudaSvdKernel : public SvdKernel {
public:
void Call(const Array& a, const Array& u, const Array& s, const Array& vt, bool full_matrices, bool compute_uv) override {
Device& device = a.device();
Dtype dtype = a.dtype();
CudaSetDeviceScope scope{device.index()};
CHAINERX_ASSERT(a.ndim() == 2);
if (a.shape().GetTotalSize() == 0) {
if (full_matrices && compute_uv) {
device.backend().CallKernel<IdentityKernel>(u);
device.backend().CallKernel<IdentityKernel>(vt);
}
// This kernel works correctly for zero-sized input also without early return
return;
}
// cuSOLVER assumes arrays are in column-major order.
// In order to avoid transposing the input matrix, matrix dimensions are swapped.
// Since the input is assumed to be transposed, it is necessary to
// swap the pointers to u and vt matrices when calling Gesvd.
int64_t n = a.shape()[0];
int64_t m = a.shape()[1];
int64_t k = std::min(m, n);
Array x = EmptyLike(a, device);
Array u_temp{};
Array vt_temp{};
bool trans_flag;
// Remark: gesvd only supports m>=n.
// See: https://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-gesvd
// Therefore for the case m<n we calculuate svd of transposed matrix,
// instead of calculating svd(A) = U S V^T, we compute svd(A^T) = V S U^T
if (m >= n) {
device.backend().CallKernel<CopyKernel>(a, x);
trans_flag = false;
} else {
m = a.shape()[0];
n = a.shape()[1];
x = x.Reshape(Shape{n, m});
device.backend().CallKernel<CopyKernel>(a.Transpose(), x);
trans_flag = true;
// Temporary arrays for u, vt are needed to store transposed results
Shape u_shape;
Shape vt_shape;
if (compute_uv) {
if (full_matrices) {
u_shape = Shape{m, m};
vt_shape = Shape{n, n};
} else {
u_shape = Shape{k, m};
vt_shape = Shape{n, k};
}
} else {
u_shape = Shape{0};
vt_shape = Shape{0};
}
u_temp = Empty(u_shape, dtype, device);
vt_temp = Empty(vt_shape, dtype, device);
}
int64_t lda = std::max(int64_t{1}, m);
int64_t ldu = std::max(int64_t{1}, m);
int64_t ldvt = full_matrices ? std::max(int64_t{1}, n) : std::max(int64_t{1}, k);
auto svd_impl = [&](auto pt) {
using T = typename decltype(pt)::type;
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(static_cast<CudaDevice&>(device));
auto x_ptr = static_cast<T*>(internal::GetRawOffsetData(x));
auto s_ptr = static_cast<T*>(internal::GetRawOffsetData(s));
auto u_ptr = static_cast<T*>(internal::GetRawOffsetData(u));
auto vt_ptr = static_cast<T*>(internal::GetRawOffsetData(vt));
if (trans_flag) {
u_ptr = static_cast<T*>(internal::GetRawOffsetData(vt_temp));
vt_ptr = static_cast<T*>(internal::GetRawOffsetData(u_temp));
}
std::shared_ptr<void> devinfo = device.Allocate(sizeof(int));
int buffersize = 0;
device_internals.cusolverdn_handle().Call(GesvdBuffersize<T>, m, n, &buffersize);
Array work = Empty(Shape{buffersize}, dtype, device);
auto work_ptr = static_cast<T*>(internal::GetRawOffsetData(work));
signed char job;
if (compute_uv) {
job = full_matrices ? 'A' : 'S';
} else {
job = 'N';
}
// When calling Gesvd pointers to u and vt are swapped instead of transposing the input matrix.
device_internals.cusolverdn_handle().Call(
Gesvd<T>,
job,
job,
m,
n,
x_ptr,
lda,
s_ptr,
vt_ptr,
ldu,
u_ptr,
ldvt,
work_ptr,
buffersize,
nullptr,
static_cast<int*>(devinfo.get()));
int devinfo_h = 0;
Device& native_device = GetDefaultContext().GetDevice({"native", 0});
device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device);
if (devinfo_h != 0) {
throw ChainerxError{"Unsuccessful gesvd (SVD) execution. Info = ", devinfo_h};
}
if (trans_flag) {
device.backend().CallKernel<CopyKernel>(u_temp.Transpose(), u);
device.backend().CallKernel<CopyKernel>(vt_temp.Transpose(), vt);
}
};
VisitFloatingPointDtype(dtype, svd_impl);
}
};
CHAINERX_CUDA_REGISTER_KERNEL(SvdKernel, CudaSvdKernel);
class CudaQrKernel : public QrKernel {
public:
void Call(const Array& a, const Array& q, const Array& r, const Array& tau, QrMode mode) override {
Device& device = a.device();
Dtype dtype = a.dtype();
CudaSetDeviceScope scope{device.index()};
CHAINERX_ASSERT(a.ndim() == 2);
VisitFloatingPointDtype(dtype, [&](auto pt) {
using T = typename decltype(pt)::type;
QrImpl<T>(a, q, r, tau, mode);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(QrKernel, CudaQrKernel);
class CudaCholeskyKernel : public CholeskyKernel {
public:
void Call(const Array& a, const Array& out) override {
Device& device = a.device();
device.CheckDevicesCompatible(a, out);
Dtype dtype = a.dtype();
CudaSetDeviceScope scope{device.index()};
CHAINERX_ASSERT(a.ndim() == 2);
CHAINERX_ASSERT(out.ndim() == 2);
CHAINERX_ASSERT(a.shape()[0] == a.shape()[1]);
CHAINERX_ASSERT(out.IsContiguous());
CHAINERX_ASSERT(a.dtype() == out.dtype());
// cuSOLVER might not work well with zero-sized arrays for older versions of cuSOLVER (<10.1)
// therefore it's better to return earlier
if (a.shape().GetTotalSize() == 0) {
return;
}
// potrf (cholesky) stores result in-place, therefore copy ``a`` to ``out`` and then pass ``out`` to the routine
device.backend().CallKernel<CopyKernel>(Tril(a, 0), out);
auto cholesky_impl = [&](auto pt) {
using T = typename decltype(pt)::type;
// Note that cuSOLVER uses Fortran order.
// To compute a lower triangular matrix L = cholesky(A), we use cuSOLVER to compute an upper triangular matrix U = cholesky(A).
cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER;
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(static_cast<CudaDevice&>(device));
// compute workspace size and prepare workspace
auto out_ptr = static_cast<T*>(internal::GetRawOffsetData(out));
int work_size = 0;
int64_t n = a.shape()[0];
device_internals.cusolverdn_handle().Call(PotrfBuffersize<T>, uplo, n, out_ptr, std::max(int64_t{1}, n), &work_size);
// POTRF execution
Array work = Empty(Shape{work_size}, dtype, device);
auto work_ptr = static_cast<T*>(internal::GetRawOffsetData(work));
std::shared_ptr<void> devinfo = device.Allocate(sizeof(int));
device_internals.cusolverdn_handle().Call(
Potrf<T>, uplo, n, out_ptr, std::max(int64_t{1}, n), work_ptr, work_size, static_cast<int*>(devinfo.get()));
int devinfo_h = 0;
Device& native_device = GetDefaultContext().GetDevice({"native", 0});
device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device);
if (devinfo_h != 0) {
throw ChainerxError{"Unsuccessful potrf (Cholesky) execution. Info = ", devinfo_h};
}
};
VisitFloatingPointDtype(dtype, cholesky_impl);
}
};
CHAINERX_CUDA_REGISTER_KERNEL(CholeskyKernel, CudaCholeskyKernel);
} // namespace cuda
} // namespace chainerx
|
52ee581626271954b3098b62ead13ec4e64ad6bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/span.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_lists.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
#include <cstddef>
#include <cstring>
#include <string>
using cudf::detail::device_span;
using cudf::detail::host_span;
template <typename T>
void expect_equivolent(host_span<T> a, host_span<T> b)
{
EXPECT_EQ(a.size(), b.size());
EXPECT_EQ(a.data(), b.data());
}
template <typename Iterator1, typename T>
void expect_match(Iterator1 expected, size_t expected_size, host_span<T> input)
{
EXPECT_EQ(expected_size, input.size());
for (size_t i = 0; i < expected_size; i++) { EXPECT_EQ(*(expected + i), *(input.begin() + i)); }
}
template <typename T>
void expect_match(std::string expected, host_span<T> input)
{
return expect_match(expected.begin(), expected.size(), input);
}
std::string const hello_wold_message = "hello world";
std::vector<char> create_hello_world_message()
{
return std::vector<char>(hello_wold_message.begin(), hello_wold_message.end());
}
class SpanTest : public cudf::test::BaseFixture {
};
TEST(SpanTest, CanCreateFullSubspan)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_equivolent(message_span, message_span.subspan(0, message_span.size()));
}
TEST(SpanTest, CanTakeFirst)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("hello", message_span.first(5));
}
TEST(SpanTest, CanTakeLast)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("world", message_span.last(5));
}
TEST(SpanTest, CanTakeSubspanFull)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("hello world", message_span.subspan(0, 11));
}
TEST(SpanTest, CanTakeSubspanPartial)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("lo w", message_span.subspan(3, 4));
}
TEST(SpanTest, CanGetFront)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ('h', message_span.front());
}
TEST(SpanTest, CanGetBack)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ('d', message_span.back());
}
TEST(SpanTest, CanGetData)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ(message.data(), message_span.data());
}
TEST(SpanTest, CanDetermineEmptiness)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
auto const empty_span = host_span<char>();
EXPECT_FALSE(message_span.empty());
EXPECT_TRUE(empty_span.empty());
}
TEST(SpanTest, CanGetSize)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
auto const empty_span = host_span<char>();
EXPECT_EQ(static_cast<size_t>(11), message_span.size());
EXPECT_EQ(static_cast<size_t>(0), empty_span.size());
}
TEST(SpanTest, CanGetSizeBytes)
{
auto doubles = std::vector<double>({6, 3, 2});
auto const doubles_span = host_span<double>(doubles.data(), doubles.size());
auto const empty_span = host_span<double>();
EXPECT_EQ(static_cast<size_t>(24), doubles_span.size_bytes());
EXPECT_EQ(static_cast<size_t>(0), empty_span.size_bytes());
}
TEST(SpanTest, CanCopySpan)
{
auto message = create_hello_world_message();
host_span<char> message_span_copy;
{
auto const message_span = host_span<char>(message.data(), message.size());
message_span_copy = message_span;
}
EXPECT_EQ(message.data(), message_span_copy.data());
EXPECT_EQ(message.size(), message_span_copy.size());
}
TEST(SpanTest, CanSubscriptRead)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ('o', message_span[4]);
}
TEST(SpanTest, CanSubscriptWrite)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
message_span[4] = 'x';
EXPECT_EQ('x', message_span[4]);
}
TEST(SpanTest, CanConstructFromHostContainers)
{
auto std_vector = std::vector<int>(1);
auto h_vector = thrust::host_vector<int>(1);
(void)host_span<int>(std_vector);
(void)host_span<int>(h_vector);
auto const std_vector_c = std_vector;
auto const h_vector_c = h_vector;
(void)host_span<int const>(std_vector_c);
(void)host_span<int const>(h_vector_c);
}
TEST(SpanTest, CanConstructFromDeviceContainers)
{
auto d_thrust_vector = thrust::device_vector<int>(1);
auto d_vector = rmm::device_vector<int>(1);
auto d_uvector = rmm::device_uvector<int>(1, 0);
(void)device_span<int>(d_thrust_vector);
(void)device_span<int>(d_vector);
(void)device_span<int>(d_uvector);
auto const& d_thrust_vector_c = d_thrust_vector;
auto const& d_vector_c = d_vector;
auto const& d_uvector_c = d_uvector;
(void)device_span<int const>(d_thrust_vector_c);
(void)device_span<int const>(d_vector_c);
(void)device_span<int const>(d_uvector_c);
}
__global__ void simple_device_kernel(device_span<bool> result) { result[0] = true; }
TEST(SpanTest, CanUseDeviceSpan)
{
rmm::device_vector<bool> d_message = std::vector<bool>({false});
auto d_span = device_span<bool>(d_message.data().get(), d_message.size());
hipLaunchKernelGGL(( simple_device_kernel), dim3(1), dim3(1), 0, 0, d_span);
hipDeviceSynchronize();
thrust::host_vector<bool> h_message = d_message;
ASSERT_TRUE(h_message[0]);
}
CUDF_TEST_PROGRAM_MAIN()
| 52ee581626271954b3098b62ead13ec4e64ad6bf.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/span.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_lists.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
#include <cstddef>
#include <cstring>
#include <string>
using cudf::detail::device_span;
using cudf::detail::host_span;
template <typename T>
void expect_equivolent(host_span<T> a, host_span<T> b)
{
EXPECT_EQ(a.size(), b.size());
EXPECT_EQ(a.data(), b.data());
}
template <typename Iterator1, typename T>
void expect_match(Iterator1 expected, size_t expected_size, host_span<T> input)
{
EXPECT_EQ(expected_size, input.size());
for (size_t i = 0; i < expected_size; i++) { EXPECT_EQ(*(expected + i), *(input.begin() + i)); }
}
template <typename T>
void expect_match(std::string expected, host_span<T> input)
{
return expect_match(expected.begin(), expected.size(), input);
}
std::string const hello_wold_message = "hello world";
std::vector<char> create_hello_world_message()
{
return std::vector<char>(hello_wold_message.begin(), hello_wold_message.end());
}
class SpanTest : public cudf::test::BaseFixture {
};
TEST(SpanTest, CanCreateFullSubspan)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_equivolent(message_span, message_span.subspan(0, message_span.size()));
}
TEST(SpanTest, CanTakeFirst)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("hello", message_span.first(5));
}
TEST(SpanTest, CanTakeLast)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("world", message_span.last(5));
}
TEST(SpanTest, CanTakeSubspanFull)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("hello world", message_span.subspan(0, 11));
}
TEST(SpanTest, CanTakeSubspanPartial)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("lo w", message_span.subspan(3, 4));
}
TEST(SpanTest, CanGetFront)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ('h', message_span.front());
}
TEST(SpanTest, CanGetBack)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ('d', message_span.back());
}
TEST(SpanTest, CanGetData)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ(message.data(), message_span.data());
}
TEST(SpanTest, CanDetermineEmptiness)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
auto const empty_span = host_span<char>();
EXPECT_FALSE(message_span.empty());
EXPECT_TRUE(empty_span.empty());
}
TEST(SpanTest, CanGetSize)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
auto const empty_span = host_span<char>();
EXPECT_EQ(static_cast<size_t>(11), message_span.size());
EXPECT_EQ(static_cast<size_t>(0), empty_span.size());
}
TEST(SpanTest, CanGetSizeBytes)
{
auto doubles = std::vector<double>({6, 3, 2});
auto const doubles_span = host_span<double>(doubles.data(), doubles.size());
auto const empty_span = host_span<double>();
EXPECT_EQ(static_cast<size_t>(24), doubles_span.size_bytes());
EXPECT_EQ(static_cast<size_t>(0), empty_span.size_bytes());
}
TEST(SpanTest, CanCopySpan)
{
auto message = create_hello_world_message();
host_span<char> message_span_copy;
{
auto const message_span = host_span<char>(message.data(), message.size());
message_span_copy = message_span;
}
EXPECT_EQ(message.data(), message_span_copy.data());
EXPECT_EQ(message.size(), message_span_copy.size());
}
TEST(SpanTest, CanSubscriptRead)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ('o', message_span[4]);
}
TEST(SpanTest, CanSubscriptWrite)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
message_span[4] = 'x';
EXPECT_EQ('x', message_span[4]);
}
TEST(SpanTest, CanConstructFromHostContainers)
{
auto std_vector = std::vector<int>(1);
auto h_vector = thrust::host_vector<int>(1);
(void)host_span<int>(std_vector);
(void)host_span<int>(h_vector);
auto const std_vector_c = std_vector;
auto const h_vector_c = h_vector;
(void)host_span<int const>(std_vector_c);
(void)host_span<int const>(h_vector_c);
}
TEST(SpanTest, CanConstructFromDeviceContainers)
{
auto d_thrust_vector = thrust::device_vector<int>(1);
auto d_vector = rmm::device_vector<int>(1);
auto d_uvector = rmm::device_uvector<int>(1, 0);
(void)device_span<int>(d_thrust_vector);
(void)device_span<int>(d_vector);
(void)device_span<int>(d_uvector);
auto const& d_thrust_vector_c = d_thrust_vector;
auto const& d_vector_c = d_vector;
auto const& d_uvector_c = d_uvector;
(void)device_span<int const>(d_thrust_vector_c);
(void)device_span<int const>(d_vector_c);
(void)device_span<int const>(d_uvector_c);
}
__global__ void simple_device_kernel(device_span<bool> result) { result[0] = true; }
TEST(SpanTest, CanUseDeviceSpan)
{
rmm::device_vector<bool> d_message = std::vector<bool>({false});
auto d_span = device_span<bool>(d_message.data().get(), d_message.size());
simple_device_kernel<<<1, 1>>>(d_span);
cudaDeviceSynchronize();
thrust::host_vector<bool> h_message = d_message;
ASSERT_TRUE(h_message[0]);
}
CUDF_TEST_PROGRAM_MAIN()
|
66732793900ec2ca00131dc68a5b74340651452a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void kernelA(){
// Giant conditional so that it only prints once, this would not be done in pactice
if (blockIdx.x == 0 & blockIdx.y == 1 & blockIdx.z == 0 & threadIdx.x == 1 & threadIdx.y == 0 & threadIdx.z == 1) {
printf("gridDim (%d, %d, %d)\n", gridDim.x, gridDim.y, gridDim.z);
printf("blockDim (%d, %d, %d)\n", blockDim.x, blockDim.y, blockDim.z);
printf("blockIdx (%d, %d, %d)\n", blockIdx.x, blockIdx.y, blockIdx.z);
printf("threadIdx (%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z);
// minimum unit being executed by compute engine at the same time
// called wave front in AMD. It is not set by the programmer
printf("warpSize (%d)\n", warpSize);
}
}
int main()
{
hipSetDevice(0);
// dim3 is an integer vector type
dim3 blocks(50, 100, 50);
dim3 threads(8, 8, 16);
hipLaunchKernelGGL(( kernelA) , dim3(blocks),dim3(threads), 0, 0, );
hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
| 66732793900ec2ca00131dc68a5b74340651452a.cu | #include <stdio.h>
__global__ void kernelA(){
// Giant conditional so that it only prints once, this would not be done in pactice
if (blockIdx.x == 0 & blockIdx.y == 1 & blockIdx.z == 0 & threadIdx.x == 1 & threadIdx.y == 0 & threadIdx.z == 1) {
printf("gridDim (%d, %d, %d)\n", gridDim.x, gridDim.y, gridDim.z);
printf("blockDim (%d, %d, %d)\n", blockDim.x, blockDim.y, blockDim.z);
printf("blockIdx (%d, %d, %d)\n", blockIdx.x, blockIdx.y, blockIdx.z);
printf("threadIdx (%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z);
// minimum unit being executed by compute engine at the same time
// called wave front in AMD. It is not set by the programmer
printf("warpSize (%d)\n", warpSize);
}
}
int main()
{
cudaSetDevice(0);
// dim3 is an integer vector type
dim3 blocks(50, 100, 50);
dim3 threads(8, 8, 16);
kernelA <<<blocks,threads>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
f273217a81337d748451dfe77fd83fe3e75cb167.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************\
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
* *
* cudart (c) 2008 Erik Entrich *
\*************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <hip/hip_runtime.h>
struct vector3d
{
float x, y, z;
};
struct rgbcolor
{
float r, g, b;
};
struct object
{
int type;
struct vector3d pos;
float rad;
struct vector3d n;
struct rgbcolor c;
float e;
float d, g;
};
#define PI 3.14159265358979323846f
#define TYPE_SPHERE 1
#define TYPE_PLANE 2
#define THREADCOUNT 256
#define BLOCKCOUNT 16
//: !!!!!
#define TILE_WIDTH 20
#define TILE_HEIGHT 20
#define RAYTRACE_SAMPLES_PER_KERNEL_EXECUTION (THREADCOUNT * BLOCKCOUNT * 4)
struct object *objects;
struct object *device_objects;
int objectcount;
struct rgbcolor *device_imagedata;
unsigned int *device_randseeds;
vector3d *device_primary_ray_directions;
char envmap_filename[256] = "";
struct rgbcolor *envmap;
struct rgbcolor *device_envmap;
unsigned int envmap_width, envmap_height;
__device__ unsigned int device_envmap_width, device_envmap_height;
float envmap_offset;
__device__ float device_envmap_offset;
int width, height;
int numsamples;
int maxdepth;
char filename[256];
struct rgbcolor background;
__device__ int device_samples;
__device__ int device_width, device_height;
__device__ int device_maxdepth;
__device__ int device_objectcount;
__device__ struct rgbcolor device_background;
__device__ float vec_length(struct vector3d vec)
{
return sqrt(vec.x*vec.x + vec.y*vec.y + vec.z*vec.z);
}
__device__ struct vector3d vec_normalize(struct vector3d vec)
{
float length;
struct vector3d vecout;
length = vec_length(vec);
vecout.x = vec.x / length;
vecout.y = vec.y / length;
vecout.z = vec.z / length;
return vecout;
}
__device__ float vec_dot(struct vector3d veca, struct vector3d vecb)
{
return (veca.x*vecb.x + veca.y*vecb.y + veca.z*vecb.z);
}
__device__ struct vector3d vec_cross(struct vector3d veca, struct vector3d vecb)
{
struct vector3d vecout;
vecout.x = veca.y*vecb.z - veca.z*vecb.y;
vecout.y = veca.z*vecb.x - veca.x*vecb.z;
vecout.z = veca.x*vecb.y - veca.y*vecb.x;
return vecout;
}
__shared__ unsigned int randseed[THREADCOUNT];
__device__ float device_random(int tid)
{
#define MULTIPLIER ((unsigned int) 1664525)
#define OFFSET ((unsigned int) 1013904223)
#define MODULUS ((double) 4294967296.0)
#define MODULUS_INV ((float) (1.0 / MODULUS))
unsigned int sNew = randseed[tid] * MULTIPLIER + OFFSET;
randseed[tid] = sNew;
float res = sNew * MODULUS_INV;
return res;
}
int save_pfm(unsigned int width, unsigned int height, struct rgbcolor *imagedata, char filename[], int normalize)
{
FILE *pfm;
int x, y;
float r, g, b, max;
pfm = fopen(filename, "wb");
if (pfm != NULL)
{
max = 1;
if (normalize)
{
for (y = 0; y<height; y++)
for (x = 0; x<width; x++)
{
if (max < imagedata[x + y*width].r)
max = imagedata[x + y*width].r;
if (max < imagedata[x + y*width].g)
max = imagedata[x + y*width].g;
if (max < imagedata[x + y*width].b)
max = imagedata[x + y*width].b;
}
}
fprintf(pfm, "PF\n%i %i\n-%f\n", width, height, max);
for (y = 0; y<height; y++)
for (x = 0; x<width; x++)
{
r = imagedata[x + y*width].r / max;
g = imagedata[x + y*width].g / max;
b = imagedata[x + y*width].b / max;
fwrite(&r, sizeof(float), 1, pfm);
fwrite(&g, sizeof(float), 1, pfm);
fwrite(&b, sizeof(float), 1, pfm);
}
fclose(pfm);
return 0;
}
else
{
perror("fopen");
return 1;
}
}
struct rgbcolor *load_pfm(unsigned int *width, unsigned int *height, char filename[])
{
FILE *pfm;
char buffer[256];
unsigned int x, y;
float r, g, b, scale;
struct rgbcolor *imagedata;
pfm = fopen(filename, "rb+");
if (pfm != NULL)
{
fgets(buffer, sizeof(buffer)-1, pfm);
if (strcmp(buffer, "PF\n") != 0)
{
fclose(pfm);
return NULL;
}
fscanf(pfm, "%u %u\n", width, height);
fscanf(pfm, "%f\n", &scale);
if (scale >= 0.0f)
{
fclose(pfm);
return NULL;
}
scale *= -1.0f;
imagedata = (struct rgbcolor*) malloc(sizeof(struct rgbcolor) * *width * *height);
if (imagedata == NULL)
{
perror("malloc");
fclose(pfm);
return NULL;
}
for (y = 0; y<*height; y++)
for (x = 0; x<*width; x++)
{
fread(&r, sizeof(float), 1, pfm);
fread(&g, sizeof(float), 1, pfm);
fread(&b, sizeof(float), 1, pfm);
imagedata[x + y**width].r = r * scale;
imagedata[x + y**width].g = g * scale;
imagedata[x + y**width].b = b * scale;
}
fclose(pfm);
return imagedata;
}
else
{
perror("fopen");
return NULL;
}
}
int build_scene(char *scenefilename)
{
int maxobjects = 64;
int linenum;
char tempbuffer[64];
FILE *scenefile;
objectcount = 0;
objects = new object[maxobjects];
if (objects == NULL)
{
perror("malloc");
return 0;
}
scenefile = fopen(scenefilename, "r");
if (scenefile == NULL)
{
perror("fopen");
return 0;
}
fscanf(scenefile, "%i %i %i %i\n", &width, &height, &numsamples, &maxdepth);
fscanf(scenefile, "%f %f %f\n", &background.r, &background.g, &background.b);
fgets(filename, 255, scenefile);
filename[strlen(filename) - 1] = '\0';
linenum = 3;
while (fgets(tempbuffer, 64, scenefile) != NULL)
{
linenum++;
if ((tempbuffer[0] == '#') || (tempbuffer[0] == '\n'))
continue;
if (strcmp(tempbuffer, "sphere\n") == 0)
{
objects[objectcount].type = TYPE_SPHERE;
if (fscanf(scenefile, "%f %f %f %f\n", &objects[objectcount].pos.x, &objects[objectcount].pos.y, &objects[objectcount].pos.z, &objects[objectcount].rad) != 4)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
if (fscanf(scenefile, "%f %f %f\n", &objects[objectcount].c.r, &objects[objectcount].c.g, &objects[objectcount].c.b) != 3)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
if (fscanf(scenefile, "%f %f %f\n", &objects[objectcount].e, &objects[objectcount].d, &objects[objectcount].g) != 3)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
}
else
if (strcmp(tempbuffer, "plane\n") == 0)
{
objects[objectcount].type = TYPE_PLANE;
if (fscanf(scenefile, "%f %f %f\n", &objects[objectcount].pos.x, &objects[objectcount].pos.y, &objects[objectcount].pos.z) != 3)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
if (fscanf(scenefile, "%f %f %f\n", &objects[objectcount].n.x, &objects[objectcount].n.y, &objects[objectcount].n.z) != 3)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
if (fscanf(scenefile, "%f %f %f\n", &objects[objectcount].c.r, &objects[objectcount].c.g, &objects[objectcount].c.b) != 3)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
if (fscanf(scenefile, "%f %f %f\n", &objects[objectcount].e, &objects[objectcount].d, &objects[objectcount].g) != 3)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
}
else
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
objectcount++;
}
fclose(scenefile);
return 1;
}
__global__ void generate_primary_rays(unsigned int *randseeds, vector3d *primary_ray_directions) {
//initialize random
randseed[threadIdx.x] = randseeds[blockIdx.x * blockDim.x + threadIdx.x];
__syncthreads();
const int threadidx_global = blockIdx.x * blockDim.x + threadIdx.x;
const int num_pixels_in_tile = TILE_WIDTH * TILE_HEIGHT;
const int num_tiles_in_x = device_width / TILE_WIDTH;
vector3d raydir;
for (int sample_id = threadidx_global; sample_id < device_width * device_height * device_samples; sample_id += gridDim.x * blockDim.x) {
int pixel_id = sample_id / device_samples;
int tile_id = pixel_id / num_pixels_in_tile;
int local_pixel_id = pixel_id % num_pixels_in_tile;
int tile_base_x = (tile_id % num_tiles_in_x) * TILE_WIDTH;
int tile_base_y = (tile_id / num_tiles_in_x) * TILE_HEIGHT;
int local_pixel_base_x = local_pixel_id % TILE_WIDTH;
int local_pixel_base_y = local_pixel_id / TILE_WIDTH;
int px = tile_base_x + local_pixel_base_x;
int py = tile_base_y + local_pixel_base_y;
raydir.x = ((float)px / (float)device_width) - 0.5f + device_random(threadIdx.x) / (float)device_width;
raydir.y = (((float)py / (float)device_height) - 0.5f) * ((float)device_height / (float)device_width) + device_random(threadIdx.x) / (float)device_height;
raydir.z = 1;
raydir = vec_normalize(raydir);
primary_ray_directions[sample_id] = raydir;
}
//propagate (changed) random seed to next kernel execution
randseeds[blockIdx.x * blockDim.x + threadIdx.x] = randseed[threadIdx.x];
}
// need to modify this function
__global__ void raytrace(int offset, int count, struct object *globalobjects, unsigned int *randseeds, struct rgbcolor *envmap, vector3d *primary_ray_directions)
{
volatile int px, py, tx, ty, raycounter;
struct vector3d raydir, campos;
struct rgbcolor pcolor, contrib;
__shared__ struct object localobjects[64];
volatile float mindist;
volatile int obj, depth, counter;
volatile float t, v, x, y, z;
struct vector3d d, n, ir, o;
// ************************* move global to shared : copy
if (threadIdx.x == 0)
{
for (counter = 0; counter<device_objectcount; counter++)
{
localobjects[counter] = globalobjects[counter];
}
}
randseed[threadIdx.x] = randseeds[blockIdx.x * blockDim.x + threadIdx.x];
__syncthreads();
const int threadidx_global = blockIdx.x * blockDim.x + threadIdx.x;
for (int sample_id = offset + threadidx_global; sample_id < offset + count; sample_id += gridDim.x * blockDim.x) {
pcolor.r = 0;
pcolor.g = 0;
pcolor.b = 0;
// start point
campos.x = 0;
campos.y = 0;
campos.z = 0;
//ray direction calculation with pixel information.
raydir = primary_ray_directions[sample_id];
contrib.r = 1.0f;
contrib.g = 1.0f;
contrib.b = 1.0f;
depth = 1;
// need to check
// one kernel/pixel --> make thread pool and get ray
// while ray
// if color is black, stop tracing
// *************************
while ((depth <= device_maxdepth) && ((contrib.r * 255.0f > 1.0f) || (contrib.g * 255.0f > 1.0f) || (contrib.b * 255.0f > 1.0f))) {
mindist = 10000.0f;
obj = -1;
depth++;
// get nearest object
// intersection point
for (counter = 0; counter<device_objectcount; counter++)
{
if (localobjects[counter].type == TYPE_SPHERE)
{
d.x = localobjects[counter].pos.x - campos.x;
d.y = localobjects[counter].pos.y - campos.y;
d.z = localobjects[counter].pos.z - campos.z;
v = vec_dot(raydir, d);
if (v - localobjects[counter].rad > mindist)
continue;
//original + direction * t = hit point
t = localobjects[counter].rad*localobjects[counter].rad + v*v - d.x*d.x - d.y*d.y - d.z*d.z;
if (t < 0)
continue;
t = v - sqrt(t);
if ((t > mindist) || (t < 0))
continue;
n.x = campos.x + t*raydir.x - localobjects[counter].pos.x;
n.y = campos.y + t*raydir.y - localobjects[counter].pos.y;
n.z = campos.z + t*raydir.z - localobjects[counter].pos.z;
n = vec_normalize(n);
mindist = t;
obj = counter;
}
else if (localobjects[counter].type == TYPE_PLANE)
{
v = vec_dot(localobjects[counter].n, raydir);
if (v >= 0)
continue;
d.x = localobjects[counter].pos.x - campos.x;
d.y = localobjects[counter].pos.y - campos.y;
d.z = localobjects[counter].pos.z - campos.z;
t = vec_dot(localobjects[counter].n, d) / v;
if ((t > mindist) || (t < 0))
continue;
n = localobjects[counter].n;
mindist = t;
obj = counter;
}
}
if (obj != -1)
{
x = campos.x + mindist*raydir.x;
y = campos.y + mindist*raydir.y;
z = campos.z + mindist*raydir.z;
ir.x = -raydir.x;
ir.y = -raydir.y;
ir.z = -raydir.z;
t = 2 * vec_dot(ir, n);
raydir.x = t*n.x - ir.x;
raydir.y = t*n.y - ir.y;
raydir.z = t*n.z - ir.z;
raydir = vec_normalize(raydir);
do
{
o.x = (2.0f*device_random(threadIdx.x)) - 1.0f;
o.y = (2.0f*device_random(threadIdx.x)) - 1.0f;
o.z = (2.0f*device_random(threadIdx.x)) - 1.0f;
} while ((o.x*o.x + o.y*o.y + o.z*o.z > 1) || (vec_dot(o, n) <= 0));
v = (localobjects[obj].d*device_random(threadIdx.x))*localobjects[obj].g + localobjects[obj].d*(1.0f - localobjects[obj].g);
raydir.x = o.x*v + raydir.x*(1.0f - v);
raydir.y = o.y*v + raydir.y*(1.0f - v);
raydir.z = o.z*v + raydir.z*(1.0f - v);
raydir = vec_normalize(raydir);
campos.x = x + 0.001f*raydir.x;
campos.y = y + 0.001f*raydir.y;
campos.z = z + 0.001f*raydir.z;
contrib.r *= localobjects[obj].c.r;
contrib.g *= localobjects[obj].c.g;
contrib.b *= localobjects[obj].c.b;
pcolor.r += localobjects[obj].e * contrib.r;
pcolor.g += localobjects[obj].e * contrib.g;
pcolor.b += localobjects[obj].e * contrib.b;
}
else // obj : !
{
if (envmap == NULL) //envmap :
{
pcolor.r += device_background.r * contrib.r;
pcolor.g += device_background.g * contrib.g;
pcolor.b += device_background.b * contrib.b;
}
else //
{
// xy point
tx = (int)(((atan2(raydir.x, raydir.z) / PI + 1.0f) * 0.5f) * device_envmap_width) % device_envmap_width;
ty = (atan2(raydir.y, sqrt(raydir.x*raydir.x + raydir.z*raydir.z)) / PI + 0.5f) / (1.0f - device_envmap_offset) * device_envmap_height;
if (ty >= device_envmap_height)
{
ty = device_envmap_height - 1;
}
// contribution: .
pcolor.r += ((float)envmap[tx + ty*device_envmap_width].r) * contrib.r;
pcolor.g += ((float)envmap[tx + ty*device_envmap_width].g) * contrib.g;
pcolor.b += ((float)envmap[tx + ty*device_envmap_width].b) * contrib.b;
}
contrib.r = 0.0f;
contrib.g = 0.0f;
contrib.b = 0.0f;
}
}
primary_ray_directions[sample_id].x = pcolor.r;
primary_ray_directions[sample_id].y = pcolor.g;
primary_ray_directions[sample_id].z = pcolor.b;
}
//pcolor.r = pcolor.r / (float)device_samples;
//pcolor.g = pcolor.g / (float)device_samples;
//pcolor.b = pcolor.b / (float)device_samples;
///* imagedata[px+py*device_width].r = 1.0f - exp( -pcolor.r );
//imagedata[px+py*device_width].g = 1.0f - exp( -pcolor.g );
//imagedata[px+py*device_width].b = 1.0f - exp( -pcolor.b );*/
//imagedata[px + py*device_width].r = pcolor.r;
//imagedata[px + py*device_width].g = pcolor.g;
//imagedata[px + py*device_width].b = pcolor.b;
randseeds[blockIdx.x * blockDim.x + threadIdx.x] = randseed[threadIdx.x];
}
__global__ void reconstruct(struct rgbcolor *imagedata, vector3d *primary_ray_directions) {
const int threadidx_global = blockIdx.x * blockDim.x + threadIdx.x;
const int num_pixels_in_tile = TILE_WIDTH * TILE_HEIGHT;
const int num_tiles_in_x = device_width / TILE_WIDTH;
rgbcolor pcolor;
for (int pixel_id = threadidx_global; pixel_id < device_width * device_height; pixel_id += gridDim.x * blockDim.x) {
int tile_id = pixel_id / num_pixels_in_tile;
int local_pixel_id = pixel_id % num_pixels_in_tile;
int tile_base_x = (tile_id % num_tiles_in_x) * TILE_WIDTH;
int tile_base_y = (tile_id / num_tiles_in_x) * TILE_HEIGHT;
int local_pixel_base_x = local_pixel_id % TILE_WIDTH;
int local_pixel_base_y = local_pixel_id / TILE_WIDTH;
int px = tile_base_x + local_pixel_base_x;
int py = tile_base_y + local_pixel_base_y;
pcolor.r = pcolor.g = pcolor.b = 0;
for (int sample_id = pixel_id * device_samples; sample_id < (pixel_id + 1) * device_samples; ++sample_id) {
pcolor.r += primary_ray_directions[sample_id].x;
pcolor.g += primary_ray_directions[sample_id].y;
pcolor.b += primary_ray_directions[sample_id].z;
}
imagedata[px + py*device_width].r = pcolor.r / device_samples;
imagedata[px + py*device_width].g = pcolor.g / device_samples;
imagedata[px + py*device_width].b = pcolor.b / device_samples;
}
}
//random seed
void initialize_randseeds(unsigned int *device_randseeds, int count) {
unsigned int *randseeds = new unsigned int[count];
for (int counter = 0; counter < count; counter++)
randseeds[counter] = rand();
if (hipMemcpy(device_randseeds, randseeds, sizeof(unsigned int)* count, hipMemcpyHostToDevice) != hipSuccess)
printf("Error: %s\n", hipGetErrorString(hipGetLastError()));
delete[] randseeds;
}
void render_image(int width, int height, int samples, rgbcolor *imagedata)
{
int starttime;
hipError_t error;
puts("Allocating memory on device");
if (hipMalloc((void **)&device_objects, sizeof(struct object) * objectcount) != hipSuccess) {
printf("Error: %s\n", hipGetErrorString(hipGetLastError()));
}
if (hipMalloc((void **)&device_imagedata, sizeof(struct rgbcolor) * width * height) != hipSuccess) {
printf("Error: %s\n", hipGetErrorString(hipGetLastError()));
}
if (hipMalloc((void **)&device_randseeds, sizeof(unsigned int)* BLOCKCOUNT * THREADCOUNT) != hipSuccess) {
printf("Error: %s\n", hipGetErrorString(hipGetLastError()));
}
if (hipMalloc((void **)&device_primary_ray_directions, sizeof(vector3d)* width * height * samples) != hipSuccess) {
printf("Error: %s\n", hipGetErrorString(hipGetLastError()));
}
if (envmap != NULL) {
if (hipMalloc((void **)&device_envmap, sizeof(struct rgbcolor) * envmap_width * envmap_height) != hipSuccess) {
printf("Error: %s\n", hipGetErrorString(hipGetLastError()));
}
}
else {
device_envmap = NULL;
}
puts("Copying data to device");
if (hipMemcpy(device_objects, objects, sizeof(struct object) * objectcount, hipMemcpyHostToDevice) != hipSuccess)
{
printf("Error: %s\n", hipGetErrorString(hipGetLastError()));
}
if (hipMemcpy(device_imagedata, imagedata, sizeof(struct rgbcolor) * width * height, hipMemcpyHostToDevice) != hipSuccess)
{
printf("Error: %s\n", hipGetErrorString(hipGetLastError()));
}
if (envmap != NULL)
{
if (hipMemcpy(device_envmap, envmap, sizeof(struct rgbcolor) * envmap_width * envmap_height, hipMemcpyHostToDevice) != hipSuccess)
{
printf("Error: %s\n", hipGetErrorString(hipGetLastError()));
}
}
hipMemcpyToSymbol(device_width, &width, sizeof(width), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(device_height, &height, sizeof(height), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(device_samples, &numsamples, sizeof(numsamples), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(device_maxdepth, &maxdepth, sizeof(maxdepth), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(device_objectcount, &objectcount, sizeof(objectcount), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(device_background, &background, sizeof(background), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(device_envmap_width, &envmap_width, sizeof(envmap_width), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(device_envmap_height, &envmap_height, sizeof(envmap_height), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(device_envmap_offset, &envmap_offset, sizeof(envmap_offset), 0, hipMemcpyHostToDevice);
starttime = time(NULL);
initialize_randseeds(device_randseeds, BLOCKCOUNT * THREADCOUNT);
//Step 0: Initialize device_primary_ray_directions
puts("Generating primary rays");
generate_primary_rays << < BLOCKCOUNT, THREADCOUNT >> >(device_randseeds, device_primary_ray_directions);
int total_samples = width * height * numsamples;
// *************************row : row tile : tile is rather than rows
for (int offset = 0; offset < total_samples; offset += RAYTRACE_SAMPLES_PER_KERNEL_EXECUTION)
{
int count = min(RAYTRACE_SAMPLES_PER_KERNEL_EXECUTION, total_samples - offset);
printf("Rendering sample %i ~ %i of %i\r", offset, offset + count - 1, total_samples); fflush(stdout);
raytrace << < BLOCKCOUNT, THREADCOUNT >> > (offset, count, device_objects, device_randseeds, device_envmap, device_primary_ray_directions);
error = hipGetLastError();
if (error != hipSuccess)
{
printf("Error: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
}
puts("Reconstructing");
reconstruct << < BLOCKCOUNT, THREADCOUNT >> > (device_imagedata, device_primary_ray_directions);
puts("\nWaiting for threads to finish");
hipDeviceSynchronize();
printf("Time taken: %is\n", time(NULL) - starttime);
puts("Copying image data from device");
if (hipMemcpy(imagedata, device_imagedata, sizeof(struct rgbcolor) * width * height, hipMemcpyDeviceToHost) != hipSuccess)
{
printf("Error: %s\n", hipGetErrorString(hipGetLastError()));
}
hipFree(device_primary_ray_directions);
hipFree(device_objects);
hipFree(device_imagedata);
hipFree(device_randseeds);
if (envmap != NULL)
hipFree(device_envmap);
}
int main(int argc, char *argv[])
{
puts("Simple CUDA Ray Tracer by 50m30n3, modified for KAIST CS610 project");
if (argc != 2) {
fputs("USAGE: cudart scenefile\n", stderr);
return 1;
}
puts("Building scene");
if (!build_scene(argv[1]))
{
puts("Cannot build scene!");
return 1;
}
puts("Allocating Data");
rgbcolor *imagedata = new rgbcolor[width * height];
envmap = NULL;
if (strlen(envmap_filename) > 0)
{
puts("Loading Envmap");
envmap = load_pfm(&envmap_width, &envmap_height, envmap_filename);
if (envmap != NULL)
{
printf("Envmap %s: %ix%i pixels\n", envmap_filename, envmap_width, envmap_height);
}
}
printf("Rendering %s at %ix%i with %i samples\n", filename, width, height, numsamples);
render_image(width, height, numsamples, imagedata);
puts("Saving image");
save_pfm(width, height, imagedata, filename, 0);
delete[] objects;
delete[] imagedata;
return 0;
}
| f273217a81337d748451dfe77fd83fe3e75cb167.cu | /*************************************************************************\
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
* *
* cudart (c) 2008 Erik Entrich *
\*************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <cuda.h>
struct vector3d
{
float x, y, z;
};
struct rgbcolor
{
float r, g, b;
};
struct object
{
int type;
struct vector3d pos;
float rad;
struct vector3d n;
struct rgbcolor c;
float e;
float d, g;
};
#define PI 3.14159265358979323846f
#define TYPE_SPHERE 1
#define TYPE_PLANE 2
#define THREADCOUNT 256
#define BLOCKCOUNT 16
//주의: 이미지 사이즈로 정확히 나눠떨어져야함!!!!!
#define TILE_WIDTH 20
#define TILE_HEIGHT 20
#define RAYTRACE_SAMPLES_PER_KERNEL_EXECUTION (THREADCOUNT * BLOCKCOUNT * 4)
struct object *objects;
struct object *device_objects;
int objectcount;
struct rgbcolor *device_imagedata;
unsigned int *device_randseeds;
vector3d *device_primary_ray_directions;
char envmap_filename[256] = "";
struct rgbcolor *envmap;
struct rgbcolor *device_envmap;
unsigned int envmap_width, envmap_height;
__device__ unsigned int device_envmap_width, device_envmap_height;
float envmap_offset;
__device__ float device_envmap_offset;
int width, height;
int numsamples;
int maxdepth;
char filename[256];
struct rgbcolor background;
__device__ int device_samples;
__device__ int device_width, device_height;
__device__ int device_maxdepth;
__device__ int device_objectcount;
__device__ struct rgbcolor device_background;
__device__ float vec_length(struct vector3d vec)
{
return sqrt(vec.x*vec.x + vec.y*vec.y + vec.z*vec.z);
}
__device__ struct vector3d vec_normalize(struct vector3d vec)
{
float length;
struct vector3d vecout;
length = vec_length(vec);
vecout.x = vec.x / length;
vecout.y = vec.y / length;
vecout.z = vec.z / length;
return vecout;
}
__device__ float vec_dot(struct vector3d veca, struct vector3d vecb)
{
return (veca.x*vecb.x + veca.y*vecb.y + veca.z*vecb.z);
}
__device__ struct vector3d vec_cross(struct vector3d veca, struct vector3d vecb)
{
struct vector3d vecout;
vecout.x = veca.y*vecb.z - veca.z*vecb.y;
vecout.y = veca.z*vecb.x - veca.x*vecb.z;
vecout.z = veca.x*vecb.y - veca.y*vecb.x;
return vecout;
}
__shared__ unsigned int randseed[THREADCOUNT];
__device__ float device_random(int tid)
{
#define MULTIPLIER ((unsigned int) 1664525)
#define OFFSET ((unsigned int) 1013904223)
#define MODULUS ((double) 4294967296.0)
#define MODULUS_INV ((float) (1.0 / MODULUS))
unsigned int sNew = randseed[tid] * MULTIPLIER + OFFSET;
randseed[tid] = sNew;
float res = sNew * MODULUS_INV;
return res;
}
int save_pfm(unsigned int width, unsigned int height, struct rgbcolor *imagedata, char filename[], int normalize)
{
FILE *pfm;
int x, y;
float r, g, b, max;
pfm = fopen(filename, "wb");
if (pfm != NULL)
{
max = 1;
if (normalize)
{
for (y = 0; y<height; y++)
for (x = 0; x<width; x++)
{
if (max < imagedata[x + y*width].r)
max = imagedata[x + y*width].r;
if (max < imagedata[x + y*width].g)
max = imagedata[x + y*width].g;
if (max < imagedata[x + y*width].b)
max = imagedata[x + y*width].b;
}
}
fprintf(pfm, "PF\n%i %i\n-%f\n", width, height, max);
for (y = 0; y<height; y++)
for (x = 0; x<width; x++)
{
r = imagedata[x + y*width].r / max;
g = imagedata[x + y*width].g / max;
b = imagedata[x + y*width].b / max;
fwrite(&r, sizeof(float), 1, pfm);
fwrite(&g, sizeof(float), 1, pfm);
fwrite(&b, sizeof(float), 1, pfm);
}
fclose(pfm);
return 0;
}
else
{
perror("fopen");
return 1;
}
}
struct rgbcolor *load_pfm(unsigned int *width, unsigned int *height, char filename[])
{
FILE *pfm;
char buffer[256];
unsigned int x, y;
float r, g, b, scale;
struct rgbcolor *imagedata;
pfm = fopen(filename, "rb+");
if (pfm != NULL)
{
fgets(buffer, sizeof(buffer)-1, pfm);
if (strcmp(buffer, "PF\n") != 0)
{
fclose(pfm);
return NULL;
}
fscanf(pfm, "%u %u\n", width, height);
fscanf(pfm, "%f\n", &scale);
if (scale >= 0.0f)
{
fclose(pfm);
return NULL;
}
scale *= -1.0f;
imagedata = (struct rgbcolor*) malloc(sizeof(struct rgbcolor) * *width * *height);
if (imagedata == NULL)
{
perror("malloc");
fclose(pfm);
return NULL;
}
for (y = 0; y<*height; y++)
for (x = 0; x<*width; x++)
{
fread(&r, sizeof(float), 1, pfm);
fread(&g, sizeof(float), 1, pfm);
fread(&b, sizeof(float), 1, pfm);
imagedata[x + y**width].r = r * scale;
imagedata[x + y**width].g = g * scale;
imagedata[x + y**width].b = b * scale;
}
fclose(pfm);
return imagedata;
}
else
{
perror("fopen");
return NULL;
}
}
int build_scene(char *scenefilename)
{
int maxobjects = 64;
int linenum;
char tempbuffer[64];
FILE *scenefile;
objectcount = 0;
objects = new object[maxobjects];
if (objects == NULL)
{
perror("malloc");
return 0;
}
scenefile = fopen(scenefilename, "r");
if (scenefile == NULL)
{
perror("fopen");
return 0;
}
fscanf(scenefile, "%i %i %i %i\n", &width, &height, &numsamples, &maxdepth);
fscanf(scenefile, "%f %f %f\n", &background.r, &background.g, &background.b);
fgets(filename, 255, scenefile);
filename[strlen(filename) - 1] = '\0';
linenum = 3;
while (fgets(tempbuffer, 64, scenefile) != NULL)
{
linenum++;
if ((tempbuffer[0] == '#') || (tempbuffer[0] == '\n'))
continue;
if (strcmp(tempbuffer, "sphere\n") == 0)
{
objects[objectcount].type = TYPE_SPHERE;
if (fscanf(scenefile, "%f %f %f %f\n", &objects[objectcount].pos.x, &objects[objectcount].pos.y, &objects[objectcount].pos.z, &objects[objectcount].rad) != 4)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
if (fscanf(scenefile, "%f %f %f\n", &objects[objectcount].c.r, &objects[objectcount].c.g, &objects[objectcount].c.b) != 3)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
if (fscanf(scenefile, "%f %f %f\n", &objects[objectcount].e, &objects[objectcount].d, &objects[objectcount].g) != 3)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
}
else
if (strcmp(tempbuffer, "plane\n") == 0)
{
objects[objectcount].type = TYPE_PLANE;
if (fscanf(scenefile, "%f %f %f\n", &objects[objectcount].pos.x, &objects[objectcount].pos.y, &objects[objectcount].pos.z) != 3)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
if (fscanf(scenefile, "%f %f %f\n", &objects[objectcount].n.x, &objects[objectcount].n.y, &objects[objectcount].n.z) != 3)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
if (fscanf(scenefile, "%f %f %f\n", &objects[objectcount].c.r, &objects[objectcount].c.g, &objects[objectcount].c.b) != 3)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
if (fscanf(scenefile, "%f %f %f\n", &objects[objectcount].e, &objects[objectcount].d, &objects[objectcount].g) != 3)
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
}
else
{
fprintf(stderr, "Error in scnefile on line %i\n", linenum);
return 0;
}
objectcount++;
}
fclose(scenefile);
return 1;
}
__global__ void generate_primary_rays(unsigned int *randseeds, vector3d *primary_ray_directions) {
//initialize random
randseed[threadIdx.x] = randseeds[blockIdx.x * blockDim.x + threadIdx.x];
__syncthreads();
const int threadidx_global = blockIdx.x * blockDim.x + threadIdx.x;
const int num_pixels_in_tile = TILE_WIDTH * TILE_HEIGHT;
const int num_tiles_in_x = device_width / TILE_WIDTH;
vector3d raydir;
for (int sample_id = threadidx_global; sample_id < device_width * device_height * device_samples; sample_id += gridDim.x * blockDim.x) {
int pixel_id = sample_id / device_samples;
int tile_id = pixel_id / num_pixels_in_tile;
int local_pixel_id = pixel_id % num_pixels_in_tile;
int tile_base_x = (tile_id % num_tiles_in_x) * TILE_WIDTH;
int tile_base_y = (tile_id / num_tiles_in_x) * TILE_HEIGHT;
int local_pixel_base_x = local_pixel_id % TILE_WIDTH;
int local_pixel_base_y = local_pixel_id / TILE_WIDTH;
int px = tile_base_x + local_pixel_base_x;
int py = tile_base_y + local_pixel_base_y;
raydir.x = ((float)px / (float)device_width) - 0.5f + device_random(threadIdx.x) / (float)device_width;
raydir.y = (((float)py / (float)device_height) - 0.5f) * ((float)device_height / (float)device_width) + device_random(threadIdx.x) / (float)device_height;
raydir.z = 1;
raydir = vec_normalize(raydir);
primary_ray_directions[sample_id] = raydir;
}
//propagate (changed) random seed to next kernel execution
randseeds[blockIdx.x * blockDim.x + threadIdx.x] = randseed[threadIdx.x];
}
// need to modify this function
__global__ void raytrace(int offset, int count, struct object *globalobjects, unsigned int *randseeds, struct rgbcolor *envmap, vector3d *primary_ray_directions)
{
volatile int px, py, tx, ty, raycounter;
struct vector3d raydir, campos;
struct rgbcolor pcolor, contrib;
__shared__ struct object localobjects[64];
volatile float mindist;
volatile int obj, depth, counter;
volatile float t, v, x, y, z;
struct vector3d d, n, ir, o;
// ************************* move global to shared : 동시에 copy 가능
if (threadIdx.x == 0)
{
for (counter = 0; counter<device_objectcount; counter++)
{
localobjects[counter] = globalobjects[counter];
}
}
randseed[threadIdx.x] = randseeds[blockIdx.x * blockDim.x + threadIdx.x];
__syncthreads();
const int threadidx_global = blockIdx.x * blockDim.x + threadIdx.x;
for (int sample_id = offset + threadidx_global; sample_id < offset + count; sample_id += gridDim.x * blockDim.x) {
pcolor.r = 0;
pcolor.g = 0;
pcolor.b = 0;
// start point
campos.x = 0;
campos.y = 0;
campos.z = 0;
//ray direction calculation with pixel information.
raydir = primary_ray_directions[sample_id];
contrib.r = 1.0f;
contrib.g = 1.0f;
contrib.b = 1.0f;
depth = 1;
// need to check
// one kernel/pixel --> make thread pool and get ray
// while 한번에 ray 하나
// if color is black, stop tracing
// *************************
while ((depth <= device_maxdepth) && ((contrib.r * 255.0f > 1.0f) || (contrib.g * 255.0f > 1.0f) || (contrib.b * 255.0f > 1.0f))) {
mindist = 10000.0f;
obj = -1;
depth++;
// get nearest object
// intersection 이 발생할 때 그 point 계산
for (counter = 0; counter<device_objectcount; counter++)
{
if (localobjects[counter].type == TYPE_SPHERE)
{
d.x = localobjects[counter].pos.x - campos.x;
d.y = localobjects[counter].pos.y - campos.y;
d.z = localobjects[counter].pos.z - campos.z;
v = vec_dot(raydir, d);
if (v - localobjects[counter].rad > mindist)
continue;
//original + direction * t = hit point
t = localobjects[counter].rad*localobjects[counter].rad + v*v - d.x*d.x - d.y*d.y - d.z*d.z;
if (t < 0)
continue;
t = v - sqrt(t);
if ((t > mindist) || (t < 0))
continue;
n.x = campos.x + t*raydir.x - localobjects[counter].pos.x;
n.y = campos.y + t*raydir.y - localobjects[counter].pos.y;
n.z = campos.z + t*raydir.z - localobjects[counter].pos.z;
n = vec_normalize(n);
mindist = t;
obj = counter;
}
else if (localobjects[counter].type == TYPE_PLANE)
{
v = vec_dot(localobjects[counter].n, raydir);
if (v >= 0)
continue;
d.x = localobjects[counter].pos.x - campos.x;
d.y = localobjects[counter].pos.y - campos.y;
d.z = localobjects[counter].pos.z - campos.z;
t = vec_dot(localobjects[counter].n, d) / v;
if ((t > mindist) || (t < 0))
continue;
n = localobjects[counter].n;
mindist = t;
obj = counter;
}
}
if (obj != -1)
{
x = campos.x + mindist*raydir.x;
y = campos.y + mindist*raydir.y;
z = campos.z + mindist*raydir.z;
ir.x = -raydir.x;
ir.y = -raydir.y;
ir.z = -raydir.z;
t = 2 * vec_dot(ir, n);
raydir.x = t*n.x - ir.x;
raydir.y = t*n.y - ir.y;
raydir.z = t*n.z - ir.z;
raydir = vec_normalize(raydir);
do
{
o.x = (2.0f*device_random(threadIdx.x)) - 1.0f;
o.y = (2.0f*device_random(threadIdx.x)) - 1.0f;
o.z = (2.0f*device_random(threadIdx.x)) - 1.0f;
} while ((o.x*o.x + o.y*o.y + o.z*o.z > 1) || (vec_dot(o, n) <= 0));
v = (localobjects[obj].d*device_random(threadIdx.x))*localobjects[obj].g + localobjects[obj].d*(1.0f - localobjects[obj].g);
raydir.x = o.x*v + raydir.x*(1.0f - v);
raydir.y = o.y*v + raydir.y*(1.0f - v);
raydir.z = o.z*v + raydir.z*(1.0f - v);
raydir = vec_normalize(raydir);
campos.x = x + 0.001f*raydir.x;
campos.y = y + 0.001f*raydir.y;
campos.z = z + 0.001f*raydir.z;
contrib.r *= localobjects[obj].c.r;
contrib.g *= localobjects[obj].c.g;
contrib.b *= localobjects[obj].c.b;
pcolor.r += localobjects[obj].e * contrib.r;
pcolor.g += localobjects[obj].e * contrib.g;
pcolor.b += localobjects[obj].e * contrib.b;
}
else // 만난 obj 가 없을 때 : 배경으로!
{
if (envmap == NULL) //envmap 없을 때: 배경사진
{
pcolor.r += device_background.r * contrib.r;
pcolor.g += device_background.g * contrib.g;
pcolor.b += device_background.b * contrib.b;
}
else // 있을 때
{
//배경 이미지의 xy point
tx = (int)(((atan2(raydir.x, raydir.z) / PI + 1.0f) * 0.5f) * device_envmap_width) % device_envmap_width;
ty = (atan2(raydir.y, sqrt(raydir.x*raydir.x + raydir.z*raydir.z)) / PI + 0.5f) / (1.0f - device_envmap_offset) * device_envmap_height;
if (ty >= device_envmap_height)
{
ty = device_envmap_height - 1;
}
//배경 색에 contribution: 빛이 비쳐지는 정도 곱함.
pcolor.r += ((float)envmap[tx + ty*device_envmap_width].r) * contrib.r;
pcolor.g += ((float)envmap[tx + ty*device_envmap_width].g) * contrib.g;
pcolor.b += ((float)envmap[tx + ty*device_envmap_width].b) * contrib.b;
}
contrib.r = 0.0f;
contrib.g = 0.0f;
contrib.b = 0.0f;
}
}
primary_ray_directions[sample_id].x = pcolor.r;
primary_ray_directions[sample_id].y = pcolor.g;
primary_ray_directions[sample_id].z = pcolor.b;
}
//pcolor.r = pcolor.r / (float)device_samples;
//pcolor.g = pcolor.g / (float)device_samples;
//pcolor.b = pcolor.b / (float)device_samples;
///* imagedata[px+py*device_width].r = 1.0f - exp( -pcolor.r );
//imagedata[px+py*device_width].g = 1.0f - exp( -pcolor.g );
//imagedata[px+py*device_width].b = 1.0f - exp( -pcolor.b );*/
//imagedata[px + py*device_width].r = pcolor.r;
//imagedata[px + py*device_width].g = pcolor.g;
//imagedata[px + py*device_width].b = pcolor.b;
randseeds[blockIdx.x * blockDim.x + threadIdx.x] = randseed[threadIdx.x];
}
__global__ void reconstruct(struct rgbcolor *imagedata, vector3d *primary_ray_directions) {
const int threadidx_global = blockIdx.x * blockDim.x + threadIdx.x;
const int num_pixels_in_tile = TILE_WIDTH * TILE_HEIGHT;
const int num_tiles_in_x = device_width / TILE_WIDTH;
rgbcolor pcolor;
for (int pixel_id = threadidx_global; pixel_id < device_width * device_height; pixel_id += gridDim.x * blockDim.x) {
int tile_id = pixel_id / num_pixels_in_tile;
int local_pixel_id = pixel_id % num_pixels_in_tile;
int tile_base_x = (tile_id % num_tiles_in_x) * TILE_WIDTH;
int tile_base_y = (tile_id / num_tiles_in_x) * TILE_HEIGHT;
int local_pixel_base_x = local_pixel_id % TILE_WIDTH;
int local_pixel_base_y = local_pixel_id / TILE_WIDTH;
int px = tile_base_x + local_pixel_base_x;
int py = tile_base_y + local_pixel_base_y;
pcolor.r = pcolor.g = pcolor.b = 0;
for (int sample_id = pixel_id * device_samples; sample_id < (pixel_id + 1) * device_samples; ++sample_id) {
pcolor.r += primary_ray_directions[sample_id].x;
pcolor.g += primary_ray_directions[sample_id].y;
pcolor.b += primary_ray_directions[sample_id].z;
}
imagedata[px + py*device_width].r = pcolor.r / device_samples;
imagedata[px + py*device_width].g = pcolor.g / device_samples;
imagedata[px + py*device_width].b = pcolor.b / device_samples;
}
}
//random seed 초기화 함수 따로 분리
void initialize_randseeds(unsigned int *device_randseeds, int count) {
unsigned int *randseeds = new unsigned int[count];
for (int counter = 0; counter < count; counter++)
randseeds[counter] = rand();
if (cudaMemcpy(device_randseeds, randseeds, sizeof(unsigned int)* count, cudaMemcpyHostToDevice) != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(cudaGetLastError()));
delete[] randseeds;
}
void render_image(int width, int height, int samples, rgbcolor *imagedata)
{
int starttime;
cudaError_t error;
puts("Allocating memory on device");
if (cudaMalloc((void **)&device_objects, sizeof(struct object) * objectcount) != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(cudaGetLastError()));
}
if (cudaMalloc((void **)&device_imagedata, sizeof(struct rgbcolor) * width * height) != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(cudaGetLastError()));
}
if (cudaMalloc((void **)&device_randseeds, sizeof(unsigned int)* BLOCKCOUNT * THREADCOUNT) != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(cudaGetLastError()));
}
if (cudaMalloc((void **)&device_primary_ray_directions, sizeof(vector3d)* width * height * samples) != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(cudaGetLastError()));
}
if (envmap != NULL) {
if (cudaMalloc((void **)&device_envmap, sizeof(struct rgbcolor) * envmap_width * envmap_height) != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(cudaGetLastError()));
}
}
else {
device_envmap = NULL;
}
puts("Copying data to device");
if (cudaMemcpy(device_objects, objects, sizeof(struct object) * objectcount, cudaMemcpyHostToDevice) != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(cudaGetLastError()));
}
if (cudaMemcpy(device_imagedata, imagedata, sizeof(struct rgbcolor) * width * height, cudaMemcpyHostToDevice) != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(cudaGetLastError()));
}
if (envmap != NULL)
{
if (cudaMemcpy(device_envmap, envmap, sizeof(struct rgbcolor) * envmap_width * envmap_height, cudaMemcpyHostToDevice) != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(cudaGetLastError()));
}
}
cudaMemcpyToSymbol(device_width, &width, sizeof(width), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(device_height, &height, sizeof(height), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(device_samples, &numsamples, sizeof(numsamples), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(device_maxdepth, &maxdepth, sizeof(maxdepth), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(device_objectcount, &objectcount, sizeof(objectcount), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(device_background, &background, sizeof(background), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(device_envmap_width, &envmap_width, sizeof(envmap_width), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(device_envmap_height, &envmap_height, sizeof(envmap_height), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(device_envmap_offset, &envmap_offset, sizeof(envmap_offset), 0, cudaMemcpyHostToDevice);
starttime = time(NULL);
initialize_randseeds(device_randseeds, BLOCKCOUNT * THREADCOUNT);
//Step 0: Initialize device_primary_ray_directions
puts("Generating primary rays");
generate_primary_rays << < BLOCKCOUNT, THREADCOUNT >> >(device_randseeds, device_primary_ray_directions);
int total_samples = width * height * numsamples;
// *************************row 처리 : row 단위 말고 tile : tile is rather than rows
for (int offset = 0; offset < total_samples; offset += RAYTRACE_SAMPLES_PER_KERNEL_EXECUTION)
{
int count = min(RAYTRACE_SAMPLES_PER_KERNEL_EXECUTION, total_samples - offset);
printf("Rendering sample %i ~ %i of %i\r", offset, offset + count - 1, total_samples); fflush(stdout);
raytrace << < BLOCKCOUNT, THREADCOUNT >> > (offset, count, device_objects, device_randseeds, device_envmap, device_primary_ray_directions);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
}
puts("Reconstructing");
reconstruct << < BLOCKCOUNT, THREADCOUNT >> > (device_imagedata, device_primary_ray_directions);
puts("\nWaiting for threads to finish");
cudaThreadSynchronize();
printf("Time taken: %is\n", time(NULL) - starttime);
puts("Copying image data from device");
if (cudaMemcpy(imagedata, device_imagedata, sizeof(struct rgbcolor) * width * height, cudaMemcpyDeviceToHost) != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(cudaGetLastError()));
}
cudaFree(device_primary_ray_directions);
cudaFree(device_objects);
cudaFree(device_imagedata);
cudaFree(device_randseeds);
if (envmap != NULL)
cudaFree(device_envmap);
}
int main(int argc, char *argv[])
{
puts("Simple CUDA Ray Tracer by 50m30n3, modified for KAIST CS610 project");
if (argc != 2) {
fputs("USAGE: cudart scenefile\n", stderr);
return 1;
}
puts("Building scene");
if (!build_scene(argv[1]))
{
puts("Cannot build scene!");
return 1;
}
puts("Allocating Data");
rgbcolor *imagedata = new rgbcolor[width * height];
envmap = NULL;
if (strlen(envmap_filename) > 0)
{
puts("Loading Envmap");
envmap = load_pfm(&envmap_width, &envmap_height, envmap_filename);
if (envmap != NULL)
{
printf("Envmap %s: %ix%i pixels\n", envmap_filename, envmap_width, envmap_height);
}
}
printf("Rendering %s at %ix%i with %i samples\n", filename, width, height, numsamples);
render_image(width, height, numsamples, imagedata);
puts("Saving image");
save_pfm(width, height, imagedata, filename, 0);
delete[] objects;
delete[] imagedata;
return 0;
}
|
d7490a8caf1b31a2e62ae797f200ec343e7fc9f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PReLUParamBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = channels * dim;
Dtype dsum = 0.;
for (int n = 0; n < bottom[0]->num(); ++n) {
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUParamBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(cdim)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
cdim, top_diff + top[0]->offset(n),
bottom_data + bottom[0]->offset(n),
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype d;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &d);
dsum += d;
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
if (channel_shared_) {
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
| d7490a8caf1b31a2e62ae797f200ec343e7fc9f5.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PReLUParamBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = channels * dim;
Dtype dsum = 0.;
for (int n = 0; n < bottom[0]->num(); ++n) {
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUParamBackward<Dtype><<<CAFFE_GET_BLOCKS(cdim),
CAFFE_CUDA_NUM_THREADS>>>(
cdim, top_diff + top[0]->offset(n),
bottom_data + bottom[0]->offset(n),
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype d;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &d);
dsum += d;
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
if (channel_shared_) {
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
|
6d9cfe6f0e769aa2f5b0204bb9d6ff893b1afa08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define THREADS 256
#define BLOCKS 32
#define NUM THREADS*BLOCKS
int seed_var =1239;
__device__ void swap(int *xp, int *yp)
{
int temp = *xp;
*xp = *yp;
*yp = temp;
}
__global__ void bitonic_sort_step(int *d_pr, int *d_bt, int j, int k)
{
int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i)
{
if ((i&k)==0)
{
/* Sort ascending */
if (d_pr[i]>d_pr[ixj])
{
/* exchange(i,ixj); */
swap(&d_pr[i],&d_pr[ixj]);
swap(&d_bt[i],&d_bt[ixj]);
}
}
if ((i&k)!=0)
{
/* Sort descending */
if (d_pr[i]<d_pr[ixj])
{
/* exchange(i,ixj); */
swap(&d_pr[i], &d_pr[ixj]);
swap(&d_bt[i], &d_bt[ixj]);
}
}
}
} | 6d9cfe6f0e769aa2f5b0204bb9d6ff893b1afa08.cu | #include "includes.h"
#define THREADS 256
#define BLOCKS 32
#define NUM THREADS*BLOCKS
int seed_var =1239;
__device__ void swap(int *xp, int *yp)
{
int temp = *xp;
*xp = *yp;
*yp = temp;
}
__global__ void bitonic_sort_step(int *d_pr, int *d_bt, int j, int k)
{
int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i)
{
if ((i&k)==0)
{
/* Sort ascending */
if (d_pr[i]>d_pr[ixj])
{
/* exchange(i,ixj); */
swap(&d_pr[i],&d_pr[ixj]);
swap(&d_bt[i],&d_bt[ixj]);
}
}
if ((i&k)!=0)
{
/* Sort descending */
if (d_pr[i]<d_pr[ixj])
{
/* exchange(i,ixj); */
swap(&d_pr[i], &d_pr[ixj]);
swap(&d_bt[i], &d_bt[ixj]);
}
}
}
} |
be873f2f00b4162b3935f813d519e4f38dce4d8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "book.h"
#include <math.h>
#include <stdbool.h>
#include <stdint.h>
#define CAMERA_PIXEL_SCALE 0.0000617 //meters at 1cm distance
#define CAMERA_DEPTH_UNIT 4.5 //cm
#define swap(a,b) a^=b;b^=a;a^=b
#define xyzlt(a,b) ((a[0]<b[0])||((a[0]==b[0])&&(a[1]<b[1]))||((a[0]==b[0])&&(a[1]==b[1])&&(a[2]<b[2])))
/*
*** bitmap reading code courtesy of @BeholderOf from http://www.vbforums.com/showthread.php?261522-C-C-Loading-Bitmap-Files-%28Manually%29
*** with modifications by @ollo from https://stackoverflow.com/questions/14279242/read-bitmap-file-into-structure
*/
#pragma pack(push, 1)
typedef struct tagBITMAPFILEHEADER
{
uint16_t bfType; //specifies the file type
uint32_t bfSize; //specifies the size in bytes of the bitmap file
uint16_t bfReserved1; //reserved; must be 0
uint16_t bfReserved2; //reserved; must be 0
uint32_t bfOffBits; //species the offset in bytes from the bitmapfileheader to the bitmap bits
}BITMAPFILEHEADER;
#pragma pack(pop)
#pragma pack(push, 1)
typedef struct tagBITMAPINFOHEADER
{
uint32_t biSize; //specifies the number of bytes required by the struct
int32_t biWidth; //specifies width in pixels
int32_t biHeight; //species height in pixels
uint16_t biPlanes; //specifies the number of color planes, must be 1
uint16_t biBitCount; //specifies the number of bit per pixel
uint32_t biCompression;//spcifies the type of compression
uint32_t biSizeImage; //size of image in bytes
int32_t biXPelsPerMeter; //number of pixels per meter in x axis
int32_t biYPelsPerMeter; //number of pixels per meter in y axis
uint32_t biClrUsed; //number of colors used by th ebitmap
uint32_t biClrImportant; //number of colors that are important
}BITMAPINFOHEADER;
#pragma pack(pop)
unsigned char *LoadBitmapFile(char *filename, BITMAPINFOHEADER *bitmapInfoHeader)
{
FILE *filePtr; //our file pointer
BITMAPFILEHEADER bitmapFileHeader; //our bitmap file header
unsigned char *bitmapImage; //store image data
int imageIdx=0; //image index counter
unsigned char tempRGB; //our swap variable
//open filename in read binary mode
filePtr = fopen(filename,"rb");
if (filePtr == NULL)
return NULL;
//read the bitmap file header
fread(&bitmapFileHeader, sizeof(BITMAPFILEHEADER),1,filePtr);
//verify that this is a bmp file by check bitmap id
if (bitmapFileHeader.bfType !=0x4D42)
{
fclose(filePtr);
return NULL;
}
//read the bitmap info header
fread(bitmapInfoHeader, sizeof(BITMAPINFOHEADER),1,filePtr); // small edit. forgot to add the closing bracket at sizeof
//move file point to the begging of bitmap data
fseek(filePtr, bitmapFileHeader.bfOffBits, SEEK_SET);
//allocate enough memory for the bitmap image data
bitmapImage = (unsigned char*)malloc(bitmapInfoHeader->biSizeImage);
//verify memory allocation
if (!bitmapImage)
{
free(bitmapImage);
fclose(filePtr);
return NULL;
}
//read in the bitmap image data
fread(bitmapImage,bitmapInfoHeader->biSizeImage,1,filePtr);
//make sure bitmap image data was read
if (bitmapImage == NULL)
{
fclose(filePtr);
return NULL;
}
//swap the r and b values to get RGB (bitmap is BGR)
for (imageIdx = 0;imageIdx < bitmapInfoHeader->biSizeImage;imageIdx+=3) // fixed semicolon
{
tempRGB = bitmapImage[imageIdx];
bitmapImage[imageIdx] = bitmapImage[imageIdx + 2];
bitmapImage[imageIdx + 2] = tempRGB;
}
//close file and return bitmap iamge data
fclose(filePtr);
return bitmapImage;
}
/*
*** end of external code
*/
struct rawImageData{
int32_t width;
int32_t height;
unsigned char* image_data;
};
#define data(n,dim) (*(&(n->x)+dim))
#define idx(p2,ix,dim) *((float*)(p2+ix+dim))
#define idxa(p2,ix,dim) ((float*)(p2+ix+dim))
#define idxn(n,ix,dim) *((float*)(((kdNode**) (&n+ix))+3)+dim)
#define idxf(f,ix1,ix2,w) *(f+ix1*w+ix2)
#define idxfa(f,ix1,ix2,w) (f+ix1*w+ix2)
struct point3D{
float x;
float y;
float z;
};
struct kdNode{
kdNode* parent;
kdNode* left;
kdNode* right;
float x;
float y;
float z;
};
struct superArray{
int length;
int width;
int height;
float* data;
};
#define indexSuperArray(a,i, j, k) *(a->data+i*a->width*a->height+j*a->height+k)
__host__ __device__ superArray* allocSuperArray(int length, int width, int height){
superArray* res = (superArray*)malloc(sizeof(int)*3+sizeof(float*));
res->length = length;
res->width = width;
res->height = height;
res->data = (float*) malloc(sizeof(float)*length*width*height);
return res;
}
size_t rawImageDataSize(int, int);
void loadImage(char* fname, rawImageData** d){
BITMAPINFOHEADER bitmapInfoHeader;
unsigned char* temp;
temp = LoadBitmapFile(fname,&bitmapInfoHeader);
int width = bitmapInfoHeader.biWidth;
int height = bitmapInfoHeader.biHeight;
*d = (rawImageData*) malloc(sizeof(int)*2+sizeof(char*));
(**d).width = width;
(**d).height = height;
(**d).image_data = (unsigned char*)malloc(width*height*sizeof(char));
for(int i = 0; i<width*height; i++){
(**d).image_data[i] = temp[2*i];
}
}
size_t rawImageDataSize(int width, int height){
size_t size = 2*sizeof(int)+width*height*sizeof(unsigned char);
return size;
}
#define p3idx(i,dim) 3*i+dim
__global__ void get3DPoints(int* width, int* height, unsigned char* image_data, point3D* p){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
int dims_prod = (*width)*(*height);
int elems_per_thread = dims_prod/(blockDim.x*gridDim.x);
int base = tid*elems_per_thread;
int remainder = dims_prod%(blockDim.x*gridDim.x);
int end = base+elems_per_thread;
for(int i = base; i<end; i+=1){
//use reverse raytracing to figure out the location of the point in 3D space
unsigned char h = (image_data[i]);
int x = i/(*width);
int y = i-x*(*height);
float x_meters = x * CAMERA_PIXEL_SCALE;
float y_meters = y * CAMERA_PIXEL_SCALE;
float h_camera = sqrt(0.0001+x_meters*x_meters+y_meters*y_meters);
float h_real = h * CAMERA_DEPTH_UNIT;
float scale = h_real/h_camera;
idx(p,i,0) = x_meters * scale;
idx(p,i,1) = y_meters * scale;
idx(p,i,2) = 0.01 * scale;
}
}
__global__ void doBatchTransformation(point3D* a, int length, float* m, point3D* b){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int elems_per_thread =length/(blockDim.x*gridDim.x);
int base = tid*elems_per_thread;
int remainder = length%(blockDim.x*gridDim.x);
if(tid<remainder){
elems_per_thread++;
base+=tid;
}
else{
base+=remainder;
}
int end = base+elems_per_thread;
for(int i = base; i<end; i++){
float x = idx(a,i,0);
float y = idx(a,i,1);
float z = idx(a,i,2);
idx(b,i,0) = x*idxf(m,0,0,4)+y*idxf(m,0,1,4)+z*idxf(m,0,2,4)+idxf(m,0,3,4);
idx(b,i,1) = x*idxf(m,1,0,4)+y*idxf(m,1,1,4)+z*idxf(m,1,2,4)+idxf(m,1,3,4);
idx(b,i,2) = x*idxf(m,2,0,4)+y*idxf(m,2,1,4)+z*idxf(m,2,2,4)+idxf(m,2,3,4);
float w = x*idxf(m,3,0,4)+y*idxf(m,3,1,4)+z*idxf(m,3,2,4)+idxf(m,3,3,4);
idx(b,i,0) /= w;
idx(b,i,1) /= w;
idx(b,i,2) /= w;
}
}
__global__ void getMean(point3D* a, int* length, float* sum){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int elems_per_thread = (length[blockIdx.x])/(gridDim.x);
if(elems_per_thread < 2){
return;
}
int base = 0;
for(int i = 0; i<blockIdx.x; i++){
base+=length[blockIdx.x];
}
int end = base+elems_per_thread;
float local_sum[3] = {0,0,0};
for(int i = base; i<end; i++){
for(int dim = 0; dim<3; dim++){
local_sum[dim] += idx(a,i,dim);
}
}
if(threadIdx.x==0){
idx(sum,blockIdx.x,0) = 0;
idx(sum,blockIdx.x,1) = 0;
idx(sum,blockIdx.x,2) = 0;
for(int dim = 0; dim<3; dim++){
idx(sum,blockIdx.x,dim) += local_sum[dim];
}
for(int dim = 0; dim<3; dim++){
idx(sum,blockIdx.x,dim) /= length[blockIdx.x];
}
}
}
__global__ void doPartitionStep(point3D* a, int* length, int* dim, float* med, point3D* b, int *div){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int elems_per_thread = (int) (length[blockIdx.x])/(gridDim.x);
if(elems_per_thread<2){
div[blockIdx.x] = -1;
length[2*blockIdx.x] = 0;
length[2*blockIdx.x+1] = 0;
return;
}
int base = 0;
for(int i = 0; i<blockIdx.x; i++){
base+=length[i];
}
int end = base+elems_per_thread;
//__shared__ int* lsizes;
//__shared__ int* rsizes;
unsigned int curr;
__shared__ unsigned int* r;
__shared__ unsigned int* l;
if(threadIdx.x==0){
r = (unsigned int*)malloc(sizeof(int));
l = (unsigned int*)malloc(sizeof(int));
*r = end-1;
*l = base;
}
for(int i = base; i<end; i++){
if(idx(a,i,*dim)>*med){
curr = atomicAdd(l,1U);
}
else{
curr = atomicSub(r,1U);
}
b[curr] = a[i];
}
if(threadIdx.x==0){
div[blockIdx.x] = *r;
length[blockIdx.x*2+1] = *l;
length[blockIdx.x*2] = length[blockIdx.x]-(*l);
free(r);
free(l);
}
}
void finishKDTree( int* lengths, int llength, kdNode* root, kdNode* tree){
int loc = 0;
for(int i = 0; i<llength; i++){
if(lengths[i]<=1){
loc+=lengths[i];
continue;
}
for(int j = 0; j<lengths[i]; j++){
kdNode* point = tree+loc+j;
kdNode* curr = root;
int dim = 0;
while(true){
if(data(curr,dim)>data(point,dim)){
if(curr->left!=NULL){
curr = curr->left;
}
else{
curr->right = point;
break;
}
}
else{
if(curr->right!=NULL){
curr = curr->right;
}
else{
curr->left = point;
break;
}
}
dim++;
dim%=3;
}
}
loc+=lengths[i];
}
}
void buildKDTree(point3D* a, int length, kdNode* b, int nthreads, kdNode* head){
int blocks = 1;
int max_blocks = length/nthreads; //number of blocks to use before we have a thread for every block
point3D* curr;
point3D* next;
hipMalloc((void**)&curr,length*sizeof(float)*3);
hipMalloc((void**)&next,length*sizeof(float)*3);
point3D* temp;
int* tree_struct;
hipMalloc((void**)&tree_struct,length*sizeof(int));
float* means;
hipMalloc((void**)&means,max_blocks*sizeof(float));
int* dev_length;
hipMalloc((void**)&dev_length,sizeof(int)*max_blocks);
hipMemcpy(dev_length, &length, 1*sizeof(int), hipMemcpyHostToDevice);
int dim = 0;
int tree_ptr;
int* dev_dim;
hipMalloc((void**)&dev_dim, sizeof(int));
for(;blocks<max_blocks;blocks++){
hipDeviceSynchronize();
hipLaunchKernelGGL(( getMean), dim3(blocks),dim3(nthreads), 0, 0, curr,dev_length,means);
hipMemcpy(dev_dim,&dim,1*sizeof(int),hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( doPartitionStep), dim3(blocks),dim3(nthreads), 0, 0, curr,dev_length,dev_dim,means,next,(tree_struct));
dim++;
dim%=3;
temp = curr;
curr = next;
next = temp;
}
kdNode* host_tree;
host_tree = (kdNode*) malloc(length*sizeof(kdNode));
// hipMemcpy(host_tree, b, length,hipMemcpyDeviceToHost);
int* host_tree_struct = (int*) malloc(length*sizeof(int));
hipMemcpy(host_tree_struct, tree_struct, length*sizeof(int), hipMemcpyDeviceToHost);
int c;
int p;
int r;
int l;
for(int i = 0; i<length; i++){
c = host_tree_struct[i];
if(c<0||c>length)continue;
l = host_tree_struct[2*(i+1)];
r = host_tree_struct[2*(i+1)+1];
host_tree[c].right = host_tree+r;
host_tree[c].left = host_tree+l;
if(i==0){
host_tree[c].parent = NULL;
*head = host_tree[c];
}
else{
host_tree[c].parent = host_tree+p;
}
p = (i+1)/2-1;
}
int* lengths = (int*)malloc(max_blocks*sizeof(int));
hipMemcpy(lengths, dev_length, max_blocks*sizeof(int), hipMemcpyDeviceToHost);
finishKDTree(lengths,max_blocks,host_tree,head);
hipFree(curr);
hipFree(next);
hipFree(tree_struct);
hipFree(means);
hipFree(dev_length);
hipFree(dev_dim);
}
#define k 4
#define dist(f1, f2) ((f1.x-f2.x)*(f1.x-f2.x)+(f1.y-f2.y)*(f1.y-f2.y)+(f1.z-f2.z)*(f1.z-f2.z))
__global__ void doKNN(kdNode* kdt, kdNode* ktRoot, int* size, kdNode** ktg){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int elems_per_thread = (*size)/(blockDim.x*gridDim.x);
int base = tid*elems_per_thread;
int end = base+elems_per_thread;
kdNode buffer[k];
float dists[k];
int head = 0;
kdNode* curr;
kdNode* point;
int dim;
for(int i = base; i<end; i++){
curr = ktRoot;
point = kdt+i;
dim = 0;
while(true){
if(data(curr,dim)>data(point,dim)){
if(curr->left!=NULL){
curr = curr->left;
}
else{break;}
}
else{
if(curr->right!=NULL){
curr = curr->right;
}
else{break;}
}
dim++;
dim%=3;
}
buffer[head] = *curr;
for(int j = 1; j<k; j++){
dists[j] = INFINITY;
}
int mi = 1;
int dir = 0; //top right, top left, bottom left, bottom right
while(true){
if(dist((*curr),(*point))<dists[mi]){
buffer[mi] = *curr;
if(dir==2){
dir = 0;
curr = curr->left;
}
else if(dir==3){
dir = 1;
curr = curr->right;
}
else{
if(data(curr,dim)>data(point,dim)){
if(curr->left!=NULL){
dir = 0;
curr = curr->left;
dim++;
dim%=3;
}
else{
dir += 2;
curr = curr->parent;
dim--;
dim%=3;
}
}
else{
if(curr->right!=NULL){
dir = 1;
curr = curr->right;
dim++;
dim%=3;
}
else{
dir+=2;
curr = curr->parent;
dim--;
dim%=3;
}
}
}
}
else if( curr->parent != NULL){
if(curr->parent->left==curr){
dir = 2;
}
else{
dir = 3;
}
curr = curr->parent;
dim--;
dim%=3;
}
else{break;}
for(int j = 0; j<k; j++){
if(dists[j]>dists[mi]){
mi = j;
}
}
}
for(int j = 0; j<k; j++){
ktg[i][j] = buffer[j];
}
}
}
__global__ void getCenterOfMass(point3D* pts, int* length, float* com){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int elems_per_thread = (*length)/(blockDim.x*gridDim.x);
int base = tid*elems_per_thread;
int end = base+elems_per_thread;
float* local_sum;
local_sum = (float*) malloc(sizeof(point3D));
idx(local_sum,threadIdx.x,0) = 0;
idx(local_sum,threadIdx.x,1) = 0;
idx(local_sum,threadIdx.x,2) = 0;
__shared__ float* block_sum;
block_sum = (float*) malloc(sizeof(point3D));
block_sum[0] = 0;
block_sum[1] = 0;
block_sum[2] = 0;
for(int i = base; i<end; i++){
idx(local_sum,threadIdx.x,0) += idx(pts,i,0);
idx(local_sum,threadIdx.x,1) += idx(pts,i,1);
idx(local_sum,threadIdx.x,2) += idx(pts,i,2);
}
atomicAdd(block_sum+0, local_sum[0]);
atomicAdd(block_sum+1, local_sum[1]);
atomicAdd(block_sum+2, local_sum[2]);
if(blockIdx.x==0){
atomicAdd(com+0, block_sum[0]);
atomicAdd(com+1, block_sum[1]);
atomicAdd(com+2, block_sum[2]);
}
if(tid==0){
com[0] /= blockDim.x*gridDim.x;
com[1] /= blockDim.x*gridDim.x;
com[2] /= blockDim.x*gridDim.x;
}
free(local_sum);
free(block_sum);
}
#define dot3(a,b) (a[0]*b[0]+a[1]*b[1]+a[2]*b[2])
static inline void getQRDecomposition(float A[3][3], float Q[3][3]){
float u[3][3];
float sum;
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
u[i][j] = A[i][j];
}
}
float dpb;
for(int i = 0; i<3; i++){
sum = 0;
for(int j = 0; j<3; j++){
sum += u[i][j]*u[i][j];
}
for(int j = 0; j<3; j++){
Q[i][j] = u[i][j]/sqrt(sum);
}
for(int j = 3; j>3-i; j--){
dpb = dot3(A[i], Q[j]);
for(int jj = 0; jj<3; jj++){
u[i][jj] -= Q[j][jj]*dpb;
}
}
}
}
static inline void getRQDecomposition(float A[3][3], float Q[3][3]){
float B[3][3];
float S[3][3];
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
B[3-j][3-i] = A[i][j];
}
}
getQRDecomposition(B,S);
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
Q[3-j][3-i] = S[i][j];
}
}
}
__global__ void consolodate(point3D* x, point3D* p, int* length, float* W){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int threads =(blockDim.x*gridDim.x);
int elems_per_thread = (*length)/threads;
int base = tid*elems_per_thread;
int end = base+elems_per_thread;
__shared__ superArray* sum_t;
//change this to proper alloc
sum_t = allocSuperArray(gridDim.x,3,3);
for(int i = base; i<end; i++){
for(int m = 0; m<3; m++){
for(int n = 0; n<3; n++){
indexSuperArray(sum_t,threadIdx.x,m,n) = idx(x,i,m)*idx(p,i,n);
}
}
}
for(int d = 1; d<threads; d*=2){
if(threadIdx.x%d==0&&threadIdx.x!=0){
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
indexSuperArray(sum_t,threadIdx.x-d,i,j)+=indexSuperArray(sum_t,threadIdx.x,i,j);
}
}
}
}
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
atomicAdd(&idx(W,i,j),indexSuperArray(sum_t,threadIdx.x,i,j));
}
}
free(sum_t);
}
__device__ point3D* p2blockbuffer;
__device__ point3D* f2blockbuffer;
__device__ point3D* distsblockbuffer;
#undef k
__global__ void selectSubset(kdNode* pt, kdNode* pthead, point3D* p, int* plength, int* flength, int* newlength, point3D* p2, point3D* f2){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int threads =(blockDim.x*gridDim.x);
int elems_per_thread = (*plength)/threads;
int base = tid*elems_per_thread;
int end = base+elems_per_thread;
point3D* buffer;
buffer = (point3D*) malloc(elems_per_thread*sizeof(point3D));
point3D* buffer2;
buffer2 = (point3D*) malloc(elems_per_thread*sizeof(point3D));
float* dists;
dists = (float*)malloc(elems_per_thread*sizeof(float));
for(int i = base; i<end; i++){
kdNode curr = *pthead;
int dim = 0;
//get closest point in pt
int stop = false;
while(!stop){
if(data((&curr),dim)>idx(p,i,dim)){
if(curr.left!=NULL){
curr = *curr.left;
}
else{stop = true;}
}
else{
if(curr.right!=NULL){
curr = *curr.right;
}
else{stop = true;}
}
dim++;
dim%=3;
}
memcpy((float*)(buffer+i-base), &curr.x,3*sizeof(float));
buffer2[i-base] = p[i];
dists[i-base] = dist(curr,p[i]);
}
__shared__ point3D* p2buffer;
__shared__ point3D* f2buffer;
__shared__ point3D* distsbuffer;
point3D* temp;
point3D* temp2;
temp = (point3D*)malloc( (*newlength)*sizeof(point3D));
temp2 = (point3D*)malloc( (*newlength)*sizeof(point3D));
if(threadIdx.x==0){
p2buffer = (point3D*)malloc(sizeof(point3D*)*gridDim.x);
f2buffer = (point3D*)malloc(sizeof(point3D*)*gridDim.x);
distsbuffer = (point3D*) malloc(gridDim.x*elems_per_thread*sizeof(point3D));
}
memcpy(distsbuffer+threadIdx.x, dists, (*newlength)*sizeof(point3D));
memcpy(temp, buffer, (*newlength)*sizeof(point3D));
memcpy(temp+(*newlength), buffer2, (*newlength)*sizeof(point3D));
memcpy(p2buffer+threadIdx.x ,temp, (*newlength)*2*sizeof(point3D));
//reduce by threads
for(int d = 1; d<threads; d*=2){
if(threadIdx.x%d==0&&threadIdx.x!=0){
int j = 0;
int k = 0;
for(int i = 0; i<(*newlength); i++){
if(idx(distsbuffer,threadIdx.x,j)>idx(distsbuffer,threadIdx.x-d,k)){
memcpy(idxa(p2buffer,threadIdx.x,i), idxa(p2buffer,threadIdx.x,k), 3*sizeof(float));
}
else{
memcpy(idxa(p2buffer,threadIdx.x,i), idxa(p2buffer,threadIdx.x,k), 3*sizeof(float));
}
}
}
}
free(buffer);
if(threadIdx.x==0){
p2blockbuffer = (point3D*)malloc(sizeof(point3D*)*blockDim.x);
f2blockbuffer = (point3D*)malloc(sizeof(point3D*)*blockDim.x);
distsblockbuffer = (point3D*)malloc(blockDim.x*(*newlength)*sizeof(point3D));
memcpy(distsblockbuffer+blockIdx.x,distsbuffer,(*newlength)*sizeof(point3D));
memcpy(p2blockbuffer+blockIdx.x,p2buffer,(*newlength)*sizeof(point3D));
memcpy(f2blockbuffer+blockIdx.x,f2buffer,(*newlength)*sizeof(point3D));
}
for(int d = 1; d<threads; d*=2){
if(blockIdx.x%d==0&&blockIdx.x!=0){
int j = 0;
int k = 0;
for(int i = 0; i<(*newlength); i++){
if(idx(distsblockbuffer,blockIdx.x,j)>idx(distsblockbuffer,blockIdx.x-d,k)){
memcpy(idxa(p2blockbuffer,blockIdx.x,i), idxa(p2blockbuffer,blockIdx.x,k), 3*sizeof(float));
memcpy(idxa(f2blockbuffer,blockIdx.x,i), idxa(f2blockbuffer,blockIdx.x,k), 3*sizeof(float));
}
else{
memcpy(idxa(p2blockbuffer,blockIdx.x,i), idxa(p2blockbuffer,blockIdx.x,k), 3*sizeof(float));
memcpy(idxa(f2blockbuffer,blockIdx.x,i), idxa(f2blockbuffer,blockIdx.x,k), 3*sizeof(float));
}
}
}
}
if(blockIdx.x == 0){
memcpy(p2, p2blockbuffer, (*newlength)*sizeof(point3D));
memcpy(f2, f2blockbuffer, (*newlength)*sizeof(point3D));
}
free(temp);
free(temp2);
if(threadIdx.x==0){
free(p2buffer);
free(f2buffer);
free(distsbuffer);
}
if(blockIdx.x==0){
free(p2blockbuffer);
free(f2blockbuffer);
free(distsblockbuffer);
}
free(buffer2);
free(dists);
}
#define fl2size 3*sizeof(float*)+3*3*sizeof(float)
void doICP(point3D* f, point3D* p, kdNode* ft, kdNode* pt, kdNode* ftHead, int plength, int flength){
float* WDev;
float* TDev;
hipMalloc((void**)&WDev, 9*sizeof(float));
hipMalloc((void**)&TDev, 16*sizeof(float));
point3D* p2;
hipMalloc((void**)&p2, plength*sizeof(point3D));
point3D* f2;
hipMalloc((void**)&f2, flength*sizeof(point3D));
float* com;
float* com_host;
hipMalloc((void**)&com,sizeof(float)*3);
com_host = (float*)malloc(sizeof(float)*3);
float U[3][3];
float V[3][3];
float W[3][3];
float T[4][4];
float R[3][3];
int* p2length;
int p2length_host = flength/8;
hipMalloc((void**) &p2length, sizeof(int));
hipMemcpy(p2length,&p2length_host, 1*sizeof(int), hipMemcpyHostToDevice);
int* plength_dev;
hipMalloc((void**) &plength_dev, sizeof(int));
hipMemcpy(plength_dev,&plength, 1*sizeof(int), hipMemcpyHostToDevice);
int* flength_dev;
hipMalloc((void**) &flength_dev, sizeof(int));
hipMemcpy(plength_dev,&flength, 1*sizeof(int), hipMemcpyHostToDevice);
for(int n = 0; n<100; n++){
hipDeviceSynchronize();
hipLaunchKernelGGL(( selectSubset), dim3(128),dim3(128), 0, 0, ft,ftHead,p,plength_dev,flength_dev,p2length,f2,p2);
hipDeviceSynchronize();
hipLaunchKernelGGL(( consolodate), dim3(128),dim3(128), 0, 0, f2,p2,p2length,WDev);
hipDeviceSynchronize();
hipMemcpy(W, WDev,fl2size, hipMemcpyDeviceToHost);
getQRDecomposition(W,U);
getRQDecomposition(W,V);
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
R[i][j] = 0;
}
}
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
for(int k = 0; k<3; k++){
R[i][k] += U[i][j]*V[j][k];
}
}
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( getCenterOfMass), dim3(128),dim3(128), 0, 0, f,flength_dev,com);
hipDeviceSynchronize();
hipMemcpy(com_host, com, 3*sizeof(float),hipMemcpyDeviceToHost);
hipDeviceSynchronize();
/*
** [R, R, R, c]
** [R, R, R, c]
** [R, R, R, c]
** [0, 0, 0, 1]
*/
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
T[i][j] = R[i][j];
}
}
for(int i = 0; i<3; i++){
T[i][3] = com_host[i];
}
for(int i = 0; i<3; i++){
T[3][i] = 0;
}
T[3][3] = 1;
hipMemcpy(&T,TDev,fl2size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipLaunchKernelGGL(( doBatchTransformation), dim3(128),dim3(128), 0, 0, f, flength, TDev, f);
}
hipFree(WDev);
hipFree(TDev);
hipFree(p2);
hipFree(f2);
hipFree(com);
hipFree(p2length);
hipFree(plength_dev);
hipFree(flength_dev);
}
int main( void ) {
char* filesToLoad[5] = {"snapshot0.bmp","snapshot1.bmp","snapshot2.bmp","snapshot3.bmp","snapshot4.bmp"};
int numfiles = 5;
rawImageData* img;
int* width_dev;
int* height_dev;
unsigned char* data_dev;
point3D* p1_dev;
point3D* p2_dev;
point3D* temp;
kdNode* pt1;
kdNode* pt2;
loadImage(filesToLoad[0], &img);
int length = img->width*img->height;
int p1length = length;
hipMalloc((void**)&width_dev, sizeof(int));
hipMalloc((void**)&height_dev, sizeof(int));
hipMalloc((void**)&data_dev, length*sizeof(char));
hipMalloc((void**)&p1_dev, length*sizeof(point3D));
hipMalloc((void**)&p2_dev, length*sizeof(point3D));
hipMalloc((void**)&pt1, length*sizeof(kdNode));
hipMalloc((void**)&pt2, length*sizeof(kdNode));
hipMemcpy(data_dev,img->image_data,length*sizeof(char),hipMemcpyHostToDevice);
hipMemcpy(width_dev,&img->width,sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(height_dev,&img->height,sizeof(int),hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( get3DPoints), dim3(128),dim3(128), 0, 0, width_dev,height_dev,data_dev, p1_dev);
kdNode* head = (kdNode*)malloc(sizeof(kdNode));
kdNode* trash = (kdNode*)malloc(sizeof(kdNode));
buildKDTree(p1_dev, length, pt1, 300, head);
for(int i = 1; i<numfiles; i++){
free(img);
loadImage(filesToLoad[0], &img);
hipMemcpy(data_dev,img->image_data,length*sizeof(char),hipMemcpyHostToDevice);
hipMemcpy(width_dev,&img->width,sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(height_dev,&img->height,sizeof(int),hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( get3DPoints), dim3(128),dim3(128), 0, 0, width_dev,height_dev,data_dev, p2_dev);
buildKDTree(p2_dev, length, pt2, 300, trash);
doICP(p1_dev,p2_dev,pt1,pt2,head,p1length,length);
hipMalloc((void**)&temp, (p1length+length)*sizeof(point3D));
hipMemcpy(temp,p1_dev, p1length*sizeof(point3D),hipMemcpyDeviceToDevice);
hipMemcpy(temp+p1length,p2_dev,length*sizeof(point3D), hipMemcpyDeviceToDevice);
p1length += length;
hipDeviceSynchronize();
hipFree(p1_dev);
p1_dev = temp;
}
//read the data back and write it to a file
point3D* p1;
p1 = (point3D*)malloc(p1length*sizeof(point3D));
hipMemcpy(p1,p1_dev,p1length*sizeof(point3D),hipMemcpyDeviceToHost);
hipDeviceSynchronize();
FILE* fp;
fp = fopen("result.bin","w");
fwrite(p1,sizeof(point3D),p1length,fp);
printf("%d",p1length);
return 0;
}
| be873f2f00b4162b3935f813d519e4f38dce4d8f.cu | #include "book.h"
#include <math.h>
#include <stdbool.h>
#include <stdint.h>
#define CAMERA_PIXEL_SCALE 0.0000617 //meters at 1cm distance
#define CAMERA_DEPTH_UNIT 4.5 //cm
#define swap(a,b) a^=b;b^=a;a^=b
#define xyzlt(a,b) ((a[0]<b[0])||((a[0]==b[0])&&(a[1]<b[1]))||((a[0]==b[0])&&(a[1]==b[1])&&(a[2]<b[2])))
/*
*** bitmap reading code courtesy of @BeholderOf from http://www.vbforums.com/showthread.php?261522-C-C-Loading-Bitmap-Files-%28Manually%29
*** with modifications by @ollo from https://stackoverflow.com/questions/14279242/read-bitmap-file-into-structure
*/
#pragma pack(push, 1)
typedef struct tagBITMAPFILEHEADER
{
uint16_t bfType; //specifies the file type
uint32_t bfSize; //specifies the size in bytes of the bitmap file
uint16_t bfReserved1; //reserved; must be 0
uint16_t bfReserved2; //reserved; must be 0
uint32_t bfOffBits; //species the offset in bytes from the bitmapfileheader to the bitmap bits
}BITMAPFILEHEADER;
#pragma pack(pop)
#pragma pack(push, 1)
typedef struct tagBITMAPINFOHEADER
{
uint32_t biSize; //specifies the number of bytes required by the struct
int32_t biWidth; //specifies width in pixels
int32_t biHeight; //species height in pixels
uint16_t biPlanes; //specifies the number of color planes, must be 1
uint16_t biBitCount; //specifies the number of bit per pixel
uint32_t biCompression;//spcifies the type of compression
uint32_t biSizeImage; //size of image in bytes
int32_t biXPelsPerMeter; //number of pixels per meter in x axis
int32_t biYPelsPerMeter; //number of pixels per meter in y axis
uint32_t biClrUsed; //number of colors used by th ebitmap
uint32_t biClrImportant; //number of colors that are important
}BITMAPINFOHEADER;
#pragma pack(pop)
unsigned char *LoadBitmapFile(char *filename, BITMAPINFOHEADER *bitmapInfoHeader)
{
FILE *filePtr; //our file pointer
BITMAPFILEHEADER bitmapFileHeader; //our bitmap file header
unsigned char *bitmapImage; //store image data
int imageIdx=0; //image index counter
unsigned char tempRGB; //our swap variable
//open filename in read binary mode
filePtr = fopen(filename,"rb");
if (filePtr == NULL)
return NULL;
//read the bitmap file header
fread(&bitmapFileHeader, sizeof(BITMAPFILEHEADER),1,filePtr);
//verify that this is a bmp file by check bitmap id
if (bitmapFileHeader.bfType !=0x4D42)
{
fclose(filePtr);
return NULL;
}
//read the bitmap info header
fread(bitmapInfoHeader, sizeof(BITMAPINFOHEADER),1,filePtr); // small edit. forgot to add the closing bracket at sizeof
//move file point to the begging of bitmap data
fseek(filePtr, bitmapFileHeader.bfOffBits, SEEK_SET);
//allocate enough memory for the bitmap image data
bitmapImage = (unsigned char*)malloc(bitmapInfoHeader->biSizeImage);
//verify memory allocation
if (!bitmapImage)
{
free(bitmapImage);
fclose(filePtr);
return NULL;
}
//read in the bitmap image data
fread(bitmapImage,bitmapInfoHeader->biSizeImage,1,filePtr);
//make sure bitmap image data was read
if (bitmapImage == NULL)
{
fclose(filePtr);
return NULL;
}
//swap the r and b values to get RGB (bitmap is BGR)
for (imageIdx = 0;imageIdx < bitmapInfoHeader->biSizeImage;imageIdx+=3) // fixed semicolon
{
tempRGB = bitmapImage[imageIdx];
bitmapImage[imageIdx] = bitmapImage[imageIdx + 2];
bitmapImage[imageIdx + 2] = tempRGB;
}
//close file and return bitmap iamge data
fclose(filePtr);
return bitmapImage;
}
/*
*** end of external code
*/
struct rawImageData{
int32_t width;
int32_t height;
unsigned char* image_data;
};
#define data(n,dim) (*(&(n->x)+dim))
#define idx(p2,ix,dim) *((float*)(p2+ix+dim))
#define idxa(p2,ix,dim) ((float*)(p2+ix+dim))
#define idxn(n,ix,dim) *((float*)(((kdNode**) (&n+ix))+3)+dim)
#define idxf(f,ix1,ix2,w) *(f+ix1*w+ix2)
#define idxfa(f,ix1,ix2,w) (f+ix1*w+ix2)
struct point3D{
float x;
float y;
float z;
};
struct kdNode{
kdNode* parent;
kdNode* left;
kdNode* right;
float x;
float y;
float z;
};
struct superArray{
int length;
int width;
int height;
float* data;
};
#define indexSuperArray(a,i, j, k) *(a->data+i*a->width*a->height+j*a->height+k)
__host__ __device__ superArray* allocSuperArray(int length, int width, int height){
superArray* res = (superArray*)malloc(sizeof(int)*3+sizeof(float*));
res->length = length;
res->width = width;
res->height = height;
res->data = (float*) malloc(sizeof(float)*length*width*height);
return res;
}
size_t rawImageDataSize(int, int);
void loadImage(char* fname, rawImageData** d){
BITMAPINFOHEADER bitmapInfoHeader;
unsigned char* temp;
temp = LoadBitmapFile(fname,&bitmapInfoHeader);
int width = bitmapInfoHeader.biWidth;
int height = bitmapInfoHeader.biHeight;
*d = (rawImageData*) malloc(sizeof(int)*2+sizeof(char*));
(**d).width = width;
(**d).height = height;
(**d).image_data = (unsigned char*)malloc(width*height*sizeof(char));
for(int i = 0; i<width*height; i++){
(**d).image_data[i] = temp[2*i];
}
}
size_t rawImageDataSize(int width, int height){
size_t size = 2*sizeof(int)+width*height*sizeof(unsigned char);
return size;
}
#define p3idx(i,dim) 3*i+dim
__global__ void get3DPoints(int* width, int* height, unsigned char* image_data, point3D* p){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
int dims_prod = (*width)*(*height);
int elems_per_thread = dims_prod/(blockDim.x*gridDim.x);
int base = tid*elems_per_thread;
int remainder = dims_prod%(blockDim.x*gridDim.x);
int end = base+elems_per_thread;
for(int i = base; i<end; i+=1){
//use reverse raytracing to figure out the location of the point in 3D space
unsigned char h = (image_data[i]);
int x = i/(*width);
int y = i-x*(*height);
float x_meters = x * CAMERA_PIXEL_SCALE;
float y_meters = y * CAMERA_PIXEL_SCALE;
float h_camera = sqrt(0.0001+x_meters*x_meters+y_meters*y_meters);
float h_real = h * CAMERA_DEPTH_UNIT;
float scale = h_real/h_camera;
idx(p,i,0) = x_meters * scale;
idx(p,i,1) = y_meters * scale;
idx(p,i,2) = 0.01 * scale;
}
}
__global__ void doBatchTransformation(point3D* a, int length, float* m, point3D* b){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int elems_per_thread =length/(blockDim.x*gridDim.x);
int base = tid*elems_per_thread;
int remainder = length%(blockDim.x*gridDim.x);
if(tid<remainder){
elems_per_thread++;
base+=tid;
}
else{
base+=remainder;
}
int end = base+elems_per_thread;
for(int i = base; i<end; i++){
float x = idx(a,i,0);
float y = idx(a,i,1);
float z = idx(a,i,2);
idx(b,i,0) = x*idxf(m,0,0,4)+y*idxf(m,0,1,4)+z*idxf(m,0,2,4)+idxf(m,0,3,4);
idx(b,i,1) = x*idxf(m,1,0,4)+y*idxf(m,1,1,4)+z*idxf(m,1,2,4)+idxf(m,1,3,4);
idx(b,i,2) = x*idxf(m,2,0,4)+y*idxf(m,2,1,4)+z*idxf(m,2,2,4)+idxf(m,2,3,4);
float w = x*idxf(m,3,0,4)+y*idxf(m,3,1,4)+z*idxf(m,3,2,4)+idxf(m,3,3,4);
idx(b,i,0) /= w;
idx(b,i,1) /= w;
idx(b,i,2) /= w;
}
}
__global__ void getMean(point3D* a, int* length, float* sum){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int elems_per_thread = (length[blockIdx.x])/(gridDim.x);
if(elems_per_thread < 2){
return;
}
int base = 0;
for(int i = 0; i<blockIdx.x; i++){
base+=length[blockIdx.x];
}
int end = base+elems_per_thread;
float local_sum[3] = {0,0,0};
for(int i = base; i<end; i++){
for(int dim = 0; dim<3; dim++){
local_sum[dim] += idx(a,i,dim);
}
}
if(threadIdx.x==0){
idx(sum,blockIdx.x,0) = 0;
idx(sum,blockIdx.x,1) = 0;
idx(sum,blockIdx.x,2) = 0;
for(int dim = 0; dim<3; dim++){
idx(sum,blockIdx.x,dim) += local_sum[dim];
}
for(int dim = 0; dim<3; dim++){
idx(sum,blockIdx.x,dim) /= length[blockIdx.x];
}
}
}
__global__ void doPartitionStep(point3D* a, int* length, int* dim, float* med, point3D* b, int *div){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int elems_per_thread = (int) (length[blockIdx.x])/(gridDim.x);
if(elems_per_thread<2){
div[blockIdx.x] = -1;
length[2*blockIdx.x] = 0;
length[2*blockIdx.x+1] = 0;
return;
}
int base = 0;
for(int i = 0; i<blockIdx.x; i++){
base+=length[i];
}
int end = base+elems_per_thread;
//__shared__ int* lsizes;
//__shared__ int* rsizes;
unsigned int curr;
__shared__ unsigned int* r;
__shared__ unsigned int* l;
if(threadIdx.x==0){
r = (unsigned int*)malloc(sizeof(int));
l = (unsigned int*)malloc(sizeof(int));
*r = end-1;
*l = base;
}
for(int i = base; i<end; i++){
if(idx(a,i,*dim)>*med){
curr = atomicAdd(l,1U);
}
else{
curr = atomicSub(r,1U);
}
b[curr] = a[i];
}
if(threadIdx.x==0){
div[blockIdx.x] = *r;
length[blockIdx.x*2+1] = *l;
length[blockIdx.x*2] = length[blockIdx.x]-(*l);
free(r);
free(l);
}
}
void finishKDTree( int* lengths, int llength, kdNode* root, kdNode* tree){
int loc = 0;
for(int i = 0; i<llength; i++){
if(lengths[i]<=1){
loc+=lengths[i];
continue;
}
for(int j = 0; j<lengths[i]; j++){
kdNode* point = tree+loc+j;
kdNode* curr = root;
int dim = 0;
while(true){
if(data(curr,dim)>data(point,dim)){
if(curr->left!=NULL){
curr = curr->left;
}
else{
curr->right = point;
break;
}
}
else{
if(curr->right!=NULL){
curr = curr->right;
}
else{
curr->left = point;
break;
}
}
dim++;
dim%=3;
}
}
loc+=lengths[i];
}
}
void buildKDTree(point3D* a, int length, kdNode* b, int nthreads, kdNode* head){
int blocks = 1;
int max_blocks = length/nthreads; //number of blocks to use before we have a thread for every block
point3D* curr;
point3D* next;
cudaMalloc((void**)&curr,length*sizeof(float)*3);
cudaMalloc((void**)&next,length*sizeof(float)*3);
point3D* temp;
int* tree_struct;
cudaMalloc((void**)&tree_struct,length*sizeof(int));
float* means;
cudaMalloc((void**)&means,max_blocks*sizeof(float));
int* dev_length;
cudaMalloc((void**)&dev_length,sizeof(int)*max_blocks);
cudaMemcpy(dev_length, &length, 1*sizeof(int), cudaMemcpyHostToDevice);
int dim = 0;
int tree_ptr;
int* dev_dim;
cudaMalloc((void**)&dev_dim, sizeof(int));
for(;blocks<max_blocks;blocks++){
cudaThreadSynchronize();
getMean<<<blocks,nthreads>>>(curr,dev_length,means);
cudaMemcpy(dev_dim,&dim,1*sizeof(int),cudaMemcpyHostToDevice);
cudaThreadSynchronize();
doPartitionStep<<<blocks,nthreads>>>(curr,dev_length,dev_dim,means,next,(tree_struct));
dim++;
dim%=3;
temp = curr;
curr = next;
next = temp;
}
kdNode* host_tree;
host_tree = (kdNode*) malloc(length*sizeof(kdNode));
// cudaMemcpy(host_tree, b, length,cudaMemcpyDeviceToHost);
int* host_tree_struct = (int*) malloc(length*sizeof(int));
cudaMemcpy(host_tree_struct, tree_struct, length*sizeof(int), cudaMemcpyDeviceToHost);
int c;
int p;
int r;
int l;
for(int i = 0; i<length; i++){
c = host_tree_struct[i];
if(c<0||c>length)continue;
l = host_tree_struct[2*(i+1)];
r = host_tree_struct[2*(i+1)+1];
host_tree[c].right = host_tree+r;
host_tree[c].left = host_tree+l;
if(i==0){
host_tree[c].parent = NULL;
*head = host_tree[c];
}
else{
host_tree[c].parent = host_tree+p;
}
p = (i+1)/2-1;
}
int* lengths = (int*)malloc(max_blocks*sizeof(int));
cudaMemcpy(lengths, dev_length, max_blocks*sizeof(int), cudaMemcpyDeviceToHost);
finishKDTree(lengths,max_blocks,host_tree,head);
cudaFree(curr);
cudaFree(next);
cudaFree(tree_struct);
cudaFree(means);
cudaFree(dev_length);
cudaFree(dev_dim);
}
#define k 4
#define dist(f1, f2) ((f1.x-f2.x)*(f1.x-f2.x)+(f1.y-f2.y)*(f1.y-f2.y)+(f1.z-f2.z)*(f1.z-f2.z))
__global__ void doKNN(kdNode* kdt, kdNode* ktRoot, int* size, kdNode** ktg){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int elems_per_thread = (*size)/(blockDim.x*gridDim.x);
int base = tid*elems_per_thread;
int end = base+elems_per_thread;
kdNode buffer[k];
float dists[k];
int head = 0;
kdNode* curr;
kdNode* point;
int dim;
for(int i = base; i<end; i++){
curr = ktRoot;
point = kdt+i;
dim = 0;
while(true){
if(data(curr,dim)>data(point,dim)){
if(curr->left!=NULL){
curr = curr->left;
}
else{break;}
}
else{
if(curr->right!=NULL){
curr = curr->right;
}
else{break;}
}
dim++;
dim%=3;
}
buffer[head] = *curr;
for(int j = 1; j<k; j++){
dists[j] = INFINITY;
}
int mi = 1;
int dir = 0; //top right, top left, bottom left, bottom right
while(true){
if(dist((*curr),(*point))<dists[mi]){
buffer[mi] = *curr;
if(dir==2){
dir = 0;
curr = curr->left;
}
else if(dir==3){
dir = 1;
curr = curr->right;
}
else{
if(data(curr,dim)>data(point,dim)){
if(curr->left!=NULL){
dir = 0;
curr = curr->left;
dim++;
dim%=3;
}
else{
dir += 2;
curr = curr->parent;
dim--;
dim%=3;
}
}
else{
if(curr->right!=NULL){
dir = 1;
curr = curr->right;
dim++;
dim%=3;
}
else{
dir+=2;
curr = curr->parent;
dim--;
dim%=3;
}
}
}
}
else if( curr->parent != NULL){
if(curr->parent->left==curr){
dir = 2;
}
else{
dir = 3;
}
curr = curr->parent;
dim--;
dim%=3;
}
else{break;}
for(int j = 0; j<k; j++){
if(dists[j]>dists[mi]){
mi = j;
}
}
}
for(int j = 0; j<k; j++){
ktg[i][j] = buffer[j];
}
}
}
__global__ void getCenterOfMass(point3D* pts, int* length, float* com){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int elems_per_thread = (*length)/(blockDim.x*gridDim.x);
int base = tid*elems_per_thread;
int end = base+elems_per_thread;
float* local_sum;
local_sum = (float*) malloc(sizeof(point3D));
idx(local_sum,threadIdx.x,0) = 0;
idx(local_sum,threadIdx.x,1) = 0;
idx(local_sum,threadIdx.x,2) = 0;
__shared__ float* block_sum;
block_sum = (float*) malloc(sizeof(point3D));
block_sum[0] = 0;
block_sum[1] = 0;
block_sum[2] = 0;
for(int i = base; i<end; i++){
idx(local_sum,threadIdx.x,0) += idx(pts,i,0);
idx(local_sum,threadIdx.x,1) += idx(pts,i,1);
idx(local_sum,threadIdx.x,2) += idx(pts,i,2);
}
atomicAdd(block_sum+0, local_sum[0]);
atomicAdd(block_sum+1, local_sum[1]);
atomicAdd(block_sum+2, local_sum[2]);
if(blockIdx.x==0){
atomicAdd(com+0, block_sum[0]);
atomicAdd(com+1, block_sum[1]);
atomicAdd(com+2, block_sum[2]);
}
if(tid==0){
com[0] /= blockDim.x*gridDim.x;
com[1] /= blockDim.x*gridDim.x;
com[2] /= blockDim.x*gridDim.x;
}
free(local_sum);
free(block_sum);
}
#define dot3(a,b) (a[0]*b[0]+a[1]*b[1]+a[2]*b[2])
static inline void getQRDecomposition(float A[3][3], float Q[3][3]){
float u[3][3];
float sum;
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
u[i][j] = A[i][j];
}
}
float dpb;
for(int i = 0; i<3; i++){
sum = 0;
for(int j = 0; j<3; j++){
sum += u[i][j]*u[i][j];
}
for(int j = 0; j<3; j++){
Q[i][j] = u[i][j]/sqrt(sum);
}
for(int j = 3; j>3-i; j--){
dpb = dot3(A[i], Q[j]);
for(int jj = 0; jj<3; jj++){
u[i][jj] -= Q[j][jj]*dpb;
}
}
}
}
static inline void getRQDecomposition(float A[3][3], float Q[3][3]){
float B[3][3];
float S[3][3];
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
B[3-j][3-i] = A[i][j];
}
}
getQRDecomposition(B,S);
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
Q[3-j][3-i] = S[i][j];
}
}
}
__global__ void consolodate(point3D* x, point3D* p, int* length, float* W){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int threads =(blockDim.x*gridDim.x);
int elems_per_thread = (*length)/threads;
int base = tid*elems_per_thread;
int end = base+elems_per_thread;
__shared__ superArray* sum_t;
//change this to proper alloc
sum_t = allocSuperArray(gridDim.x,3,3);
for(int i = base; i<end; i++){
for(int m = 0; m<3; m++){
for(int n = 0; n<3; n++){
indexSuperArray(sum_t,threadIdx.x,m,n) = idx(x,i,m)*idx(p,i,n);
}
}
}
for(int d = 1; d<threads; d*=2){
if(threadIdx.x%d==0&&threadIdx.x!=0){
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
indexSuperArray(sum_t,threadIdx.x-d,i,j)+=indexSuperArray(sum_t,threadIdx.x,i,j);
}
}
}
}
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
atomicAdd(&idx(W,i,j),indexSuperArray(sum_t,threadIdx.x,i,j));
}
}
free(sum_t);
}
__device__ point3D* p2blockbuffer;
__device__ point3D* f2blockbuffer;
__device__ point3D* distsblockbuffer;
#undef k
__global__ void selectSubset(kdNode* pt, kdNode* pthead, point3D* p, int* plength, int* flength, int* newlength, point3D* p2, point3D* f2){
int tid = threadIdx.x+blockIdx.x+blockDim.x;
int threads =(blockDim.x*gridDim.x);
int elems_per_thread = (*plength)/threads;
int base = tid*elems_per_thread;
int end = base+elems_per_thread;
point3D* buffer;
buffer = (point3D*) malloc(elems_per_thread*sizeof(point3D));
point3D* buffer2;
buffer2 = (point3D*) malloc(elems_per_thread*sizeof(point3D));
float* dists;
dists = (float*)malloc(elems_per_thread*sizeof(float));
for(int i = base; i<end; i++){
kdNode curr = *pthead;
int dim = 0;
//get closest point in pt
int stop = false;
while(!stop){
if(data((&curr),dim)>idx(p,i,dim)){
if(curr.left!=NULL){
curr = *curr.left;
}
else{stop = true;}
}
else{
if(curr.right!=NULL){
curr = *curr.right;
}
else{stop = true;}
}
dim++;
dim%=3;
}
memcpy((float*)(buffer+i-base), &curr.x,3*sizeof(float));
buffer2[i-base] = p[i];
dists[i-base] = dist(curr,p[i]);
}
__shared__ point3D* p2buffer;
__shared__ point3D* f2buffer;
__shared__ point3D* distsbuffer;
point3D* temp;
point3D* temp2;
temp = (point3D*)malloc( (*newlength)*sizeof(point3D));
temp2 = (point3D*)malloc( (*newlength)*sizeof(point3D));
if(threadIdx.x==0){
p2buffer = (point3D*)malloc(sizeof(point3D*)*gridDim.x);
f2buffer = (point3D*)malloc(sizeof(point3D*)*gridDim.x);
distsbuffer = (point3D*) malloc(gridDim.x*elems_per_thread*sizeof(point3D));
}
memcpy(distsbuffer+threadIdx.x, dists, (*newlength)*sizeof(point3D));
memcpy(temp, buffer, (*newlength)*sizeof(point3D));
memcpy(temp+(*newlength), buffer2, (*newlength)*sizeof(point3D));
memcpy(p2buffer+threadIdx.x ,temp, (*newlength)*2*sizeof(point3D));
//reduce by threads
for(int d = 1; d<threads; d*=2){
if(threadIdx.x%d==0&&threadIdx.x!=0){
int j = 0;
int k = 0;
for(int i = 0; i<(*newlength); i++){
if(idx(distsbuffer,threadIdx.x,j)>idx(distsbuffer,threadIdx.x-d,k)){
memcpy(idxa(p2buffer,threadIdx.x,i), idxa(p2buffer,threadIdx.x,k), 3*sizeof(float));
}
else{
memcpy(idxa(p2buffer,threadIdx.x,i), idxa(p2buffer,threadIdx.x,k), 3*sizeof(float));
}
}
}
}
free(buffer);
if(threadIdx.x==0){
p2blockbuffer = (point3D*)malloc(sizeof(point3D*)*blockDim.x);
f2blockbuffer = (point3D*)malloc(sizeof(point3D*)*blockDim.x);
distsblockbuffer = (point3D*)malloc(blockDim.x*(*newlength)*sizeof(point3D));
memcpy(distsblockbuffer+blockIdx.x,distsbuffer,(*newlength)*sizeof(point3D));
memcpy(p2blockbuffer+blockIdx.x,p2buffer,(*newlength)*sizeof(point3D));
memcpy(f2blockbuffer+blockIdx.x,f2buffer,(*newlength)*sizeof(point3D));
}
for(int d = 1; d<threads; d*=2){
if(blockIdx.x%d==0&&blockIdx.x!=0){
int j = 0;
int k = 0;
for(int i = 0; i<(*newlength); i++){
if(idx(distsblockbuffer,blockIdx.x,j)>idx(distsblockbuffer,blockIdx.x-d,k)){
memcpy(idxa(p2blockbuffer,blockIdx.x,i), idxa(p2blockbuffer,blockIdx.x,k), 3*sizeof(float));
memcpy(idxa(f2blockbuffer,blockIdx.x,i), idxa(f2blockbuffer,blockIdx.x,k), 3*sizeof(float));
}
else{
memcpy(idxa(p2blockbuffer,blockIdx.x,i), idxa(p2blockbuffer,blockIdx.x,k), 3*sizeof(float));
memcpy(idxa(f2blockbuffer,blockIdx.x,i), idxa(f2blockbuffer,blockIdx.x,k), 3*sizeof(float));
}
}
}
}
if(blockIdx.x == 0){
memcpy(p2, p2blockbuffer, (*newlength)*sizeof(point3D));
memcpy(f2, f2blockbuffer, (*newlength)*sizeof(point3D));
}
free(temp);
free(temp2);
if(threadIdx.x==0){
free(p2buffer);
free(f2buffer);
free(distsbuffer);
}
if(blockIdx.x==0){
free(p2blockbuffer);
free(f2blockbuffer);
free(distsblockbuffer);
}
free(buffer2);
free(dists);
}
#define fl2size 3*sizeof(float*)+3*3*sizeof(float)
void doICP(point3D* f, point3D* p, kdNode* ft, kdNode* pt, kdNode* ftHead, int plength, int flength){
float* WDev;
float* TDev;
cudaMalloc((void**)&WDev, 9*sizeof(float));
cudaMalloc((void**)&TDev, 16*sizeof(float));
point3D* p2;
cudaMalloc((void**)&p2, plength*sizeof(point3D));
point3D* f2;
cudaMalloc((void**)&f2, flength*sizeof(point3D));
float* com;
float* com_host;
cudaMalloc((void**)&com,sizeof(float)*3);
com_host = (float*)malloc(sizeof(float)*3);
float U[3][3];
float V[3][3];
float W[3][3];
float T[4][4];
float R[3][3];
int* p2length;
int p2length_host = flength/8;
cudaMalloc((void**) &p2length, sizeof(int));
cudaMemcpy(p2length,&p2length_host, 1*sizeof(int), cudaMemcpyHostToDevice);
int* plength_dev;
cudaMalloc((void**) &plength_dev, sizeof(int));
cudaMemcpy(plength_dev,&plength, 1*sizeof(int), cudaMemcpyHostToDevice);
int* flength_dev;
cudaMalloc((void**) &flength_dev, sizeof(int));
cudaMemcpy(plength_dev,&flength, 1*sizeof(int), cudaMemcpyHostToDevice);
for(int n = 0; n<100; n++){
cudaThreadSynchronize();
selectSubset<<<128,128>>>(ft,ftHead,p,plength_dev,flength_dev,p2length,f2,p2);
cudaThreadSynchronize();
consolodate<<<128,128>>>(f2,p2,p2length,WDev);
cudaThreadSynchronize();
cudaMemcpy(W, WDev,fl2size, cudaMemcpyDeviceToHost);
getQRDecomposition(W,U);
getRQDecomposition(W,V);
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
R[i][j] = 0;
}
}
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
for(int k = 0; k<3; k++){
R[i][k] += U[i][j]*V[j][k];
}
}
}
cudaThreadSynchronize();
getCenterOfMass<<<128,128>>>(f,flength_dev,com);
cudaThreadSynchronize();
cudaMemcpy(com_host, com, 3*sizeof(float),cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
/*
** [R, R, R, c]
** [R, R, R, c]
** [R, R, R, c]
** [0, 0, 0, 1]
*/
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++){
T[i][j] = R[i][j];
}
}
for(int i = 0; i<3; i++){
T[i][3] = com_host[i];
}
for(int i = 0; i<3; i++){
T[3][i] = 0;
}
T[3][3] = 1;
cudaMemcpy(&T,TDev,fl2size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
doBatchTransformation<<<128,128>>>(f, flength, TDev, f);
}
cudaFree(WDev);
cudaFree(TDev);
cudaFree(p2);
cudaFree(f2);
cudaFree(com);
cudaFree(p2length);
cudaFree(plength_dev);
cudaFree(flength_dev);
}
int main( void ) {
char* filesToLoad[5] = {"snapshot0.bmp","snapshot1.bmp","snapshot2.bmp","snapshot3.bmp","snapshot4.bmp"};
int numfiles = 5;
rawImageData* img;
int* width_dev;
int* height_dev;
unsigned char* data_dev;
point3D* p1_dev;
point3D* p2_dev;
point3D* temp;
kdNode* pt1;
kdNode* pt2;
loadImage(filesToLoad[0], &img);
int length = img->width*img->height;
int p1length = length;
cudaMalloc((void**)&width_dev, sizeof(int));
cudaMalloc((void**)&height_dev, sizeof(int));
cudaMalloc((void**)&data_dev, length*sizeof(char));
cudaMalloc((void**)&p1_dev, length*sizeof(point3D));
cudaMalloc((void**)&p2_dev, length*sizeof(point3D));
cudaMalloc((void**)&pt1, length*sizeof(kdNode));
cudaMalloc((void**)&pt2, length*sizeof(kdNode));
cudaMemcpy(data_dev,img->image_data,length*sizeof(char),cudaMemcpyHostToDevice);
cudaMemcpy(width_dev,&img->width,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(height_dev,&img->height,sizeof(int),cudaMemcpyHostToDevice);
cudaThreadSynchronize();
get3DPoints<<<128,128>>>(width_dev,height_dev,data_dev, p1_dev);
kdNode* head = (kdNode*)malloc(sizeof(kdNode));
kdNode* trash = (kdNode*)malloc(sizeof(kdNode));
buildKDTree(p1_dev, length, pt1, 300, head);
for(int i = 1; i<numfiles; i++){
free(img);
loadImage(filesToLoad[0], &img);
cudaMemcpy(data_dev,img->image_data,length*sizeof(char),cudaMemcpyHostToDevice);
cudaMemcpy(width_dev,&img->width,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(height_dev,&img->height,sizeof(int),cudaMemcpyHostToDevice);
cudaThreadSynchronize();
get3DPoints<<<128,128>>>(width_dev,height_dev,data_dev, p2_dev);
buildKDTree(p2_dev, length, pt2, 300, trash);
doICP(p1_dev,p2_dev,pt1,pt2,head,p1length,length);
cudaMalloc((void**)&temp, (p1length+length)*sizeof(point3D));
cudaMemcpy(temp,p1_dev, p1length*sizeof(point3D),cudaMemcpyDeviceToDevice);
cudaMemcpy(temp+p1length,p2_dev,length*sizeof(point3D), cudaMemcpyDeviceToDevice);
p1length += length;
cudaThreadSynchronize();
cudaFree(p1_dev);
p1_dev = temp;
}
//read the data back and write it to a file
point3D* p1;
p1 = (point3D*)malloc(p1length*sizeof(point3D));
cudaMemcpy(p1,p1_dev,p1length*sizeof(point3D),cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
FILE* fp;
fp = fopen("result.bin","w");
fwrite(p1,sizeof(point3D),p1length,fp);
printf("%d",p1length);
return 0;
}
|
1c7f28b1cbc792c055a2a29098e7c6e71948a335.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil.h>
// includes, kernels
#include <scan_largearray_kernel.cu>
#define DEFAULT_NUM_ELEMENTS 16777216
#define MAX_RAND 3
#define BLOCK_SIZE 1024
// if not divisible by 2, add another block to compute the remainder of the input
#define NUM_BLOCKS ((DEFAULT_NUM_ELEMENTS % 2 == 0) ? (DEFAULT_NUM_ELEMENTS / BLOCK_SIZE) : (DEFAULT_NUM_ELEMENTS / BLOCK_SIZE + 1))
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int ReadFile(float*, char* file_name, int size);
void WriteFile(float*, char* file_name, int size);
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int errorM = 0;
float device_time;
float host_time;
int* size = NULL; //(int*)malloc(1 * sizeof(int));
unsigned int data2read = 1;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( float) * num_elements;
float* h_data = (float*) malloc( mem_size);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Randomly generate input data and write the result to
// file name specified by first argument
// * Two arguments: Read the first argument which indicate the size of the array,
// randomly generate input data and write the input data
// to the second argument. (for generating random input data)
// * Three arguments: Read the first file which indicate the size of the array,
// then input data from the file name specified by 2nd argument and write the
// SCAN output to file name specified by the 3rd argument.
switch(argc-1)
{
case 2:
// Determine size of array
cutReadFilei(argv[1], &size, &data2read, true);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = (int)(rand() % MAX_RAND);
}
WriteFile(h_data, argv[2], num_elements);
break;
case 3: // Three Arguments
cutReadFilei(argv[1], &size, &data2read, true);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
errorM = ReadFile(h_data, argv[2], size[0]);
if(errorM != 1)
{
printf("Error reading input file!\n");
exit(1);
}
break;
default: // No Arguments or one argument
// initialize the input data on the host to be integer values
// between 0 and 1000
// Use DEFAULT_NUM_ELEMENTS num_elements
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
// compute reference solution
float* reference = (float*) malloc( mem_size);
cutStartTimer(timer);
computeGold( reference, h_data, num_elements);
cutStopTimer(timer);
printf("\n\n**===-------------------------------------------------===**\n");
printf("Processing %d elements...\n", num_elements);
printf("Host CPU Processing time: %f (ms)\n", cutGetTimerValue(timer));
host_time = cutGetTimerValue(timer);
CUT_SAFE_CALL(cutDeleteTimer(timer));
// **===-------- Lab4: Allocate data structure here -----------===**
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
// auxiliary array allocations
float* h_aux = (float*) malloc(sizeof(float)*NUM_BLOCKS);
float* d_aux = NULL;
float* d_auxScanned = NULL;
// fill auxiliary array with zeros
for (int i = 0; i < NUM_BLOCKS; i++) {
h_aux[i] = 0;
}
CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, mem_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, mem_size));
// cuda malloc auxiliary arrays
CUDA_SAFE_CALL( hipMalloc( (void**) &d_aux, sizeof(float)*NUM_BLOCKS));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_auxScanned, sizeof(float)*NUM_BLOCKS));
// copy host memory to device input array
CUDA_SAFE_CALL( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
CUDA_SAFE_CALL( hipMemcpy( d_odata, h_data, mem_size, hipMemcpyHostToDevice) );
// initialize auxArray as zeros
CUDA_SAFE_CALL( hipMemcpy( d_aux, h_aux, sizeof(float)*NUM_BLOCKS, hipMemcpyHostToDevice) );
// initialize auxArrayScanned as zeros
CUDA_SAFE_CALL( hipMemcpy( d_auxScanned, h_aux, sizeof(float)*NUM_BLOCKS, hipMemcpyHostToDevice) );
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
prescanArray(d_odata, d_idata, d_aux, d_auxScanned, 16, NUM_BLOCKS);
// Make sure device is done before starting timer
// Added this line because time calculation was including time for both calls to prescanArray
hipDeviceSynchronize();
// Run the prescan
CUT_SAFE_CALL(cutCreateTimer(&timer));
cutStartTimer(timer);
// **===-------- Lab4: Modify the body of this function -----------===**
prescanArray(d_odata, d_idata, d_aux, d_auxScanned, num_elements, NUM_BLOCKS);
// **===-----------------------------------------------------------===**
CUDA_SAFE_CALL( hipDeviceSynchronize() );
cutStopTimer(timer);
printf("CUDA Processing time: %f (ms)\n", cutGetTimerValue(timer));
device_time = cutGetTimerValue(timer);
printf("Speedup: %fX\n", host_time/device_time);
// **===-------- Lab4: Deallocate data structure here -----------===**
// deallocBlockSums();
// **===-----------------------------------------------------------===**
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy( h_data, d_odata, sizeof(float) * num_elements,
hipMemcpyDeviceToHost));
if ((argc - 1) == 3) // Three Arguments, write result to file
{
WriteFile(h_data, argv[3], num_elements);
}
else if ((argc - 1) == 1) // One Argument, write result to file
{
WriteFile(h_data, argv[1], num_elements);
}
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
cutDeleteTimer(timer);
free( h_data);
free( reference);
free( h_aux);
hipFree( d_odata);
hipFree( d_idata);
hipFree( d_aux);
}
int ReadFile(float* M, char* file_name, int size)
{
unsigned int elements_read = size;
if (cutReadFilef(file_name, &M, &elements_read, true))
return 1;
else
return 0;
}
void WriteFile(float* M, char* file_name, int size)
{
cutWriteFilef(file_name, M, size, 0.0001f);
}
| 1c7f28b1cbc792c055a2a29098e7c6e71948a335.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil.h>
// includes, kernels
#include <scan_largearray_kernel.cu>
#define DEFAULT_NUM_ELEMENTS 16777216
#define MAX_RAND 3
#define BLOCK_SIZE 1024
// if not divisible by 2, add another block to compute the remainder of the input
#define NUM_BLOCKS ((DEFAULT_NUM_ELEMENTS % 2 == 0) ? (DEFAULT_NUM_ELEMENTS / BLOCK_SIZE) : (DEFAULT_NUM_ELEMENTS / BLOCK_SIZE + 1))
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int ReadFile(float*, char* file_name, int size);
void WriteFile(float*, char* file_name, int size);
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int errorM = 0;
float device_time;
float host_time;
int* size = NULL; //(int*)malloc(1 * sizeof(int));
unsigned int data2read = 1;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( float) * num_elements;
float* h_data = (float*) malloc( mem_size);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Randomly generate input data and write the result to
// file name specified by first argument
// * Two arguments: Read the first argument which indicate the size of the array,
// randomly generate input data and write the input data
// to the second argument. (for generating random input data)
// * Three arguments: Read the first file which indicate the size of the array,
// then input data from the file name specified by 2nd argument and write the
// SCAN output to file name specified by the 3rd argument.
switch(argc-1)
{
case 2:
// Determine size of array
cutReadFilei(argv[1], &size, &data2read, true);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = (int)(rand() % MAX_RAND);
}
WriteFile(h_data, argv[2], num_elements);
break;
case 3: // Three Arguments
cutReadFilei(argv[1], &size, &data2read, true);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
errorM = ReadFile(h_data, argv[2], size[0]);
if(errorM != 1)
{
printf("Error reading input file!\n");
exit(1);
}
break;
default: // No Arguments or one argument
// initialize the input data on the host to be integer values
// between 0 and 1000
// Use DEFAULT_NUM_ELEMENTS num_elements
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
// compute reference solution
float* reference = (float*) malloc( mem_size);
cutStartTimer(timer);
computeGold( reference, h_data, num_elements);
cutStopTimer(timer);
printf("\n\n**===-------------------------------------------------===**\n");
printf("Processing %d elements...\n", num_elements);
printf("Host CPU Processing time: %f (ms)\n", cutGetTimerValue(timer));
host_time = cutGetTimerValue(timer);
CUT_SAFE_CALL(cutDeleteTimer(timer));
// **===-------- Lab4: Allocate data structure here -----------===**
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
// auxiliary array allocations
float* h_aux = (float*) malloc(sizeof(float)*NUM_BLOCKS);
float* d_aux = NULL;
float* d_auxScanned = NULL;
// fill auxiliary array with zeros
for (int i = 0; i < NUM_BLOCKS; i++) {
h_aux[i] = 0;
}
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, mem_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, mem_size));
// cuda malloc auxiliary arrays
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_aux, sizeof(float)*NUM_BLOCKS));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_auxScanned, sizeof(float)*NUM_BLOCKS));
// copy host memory to device input array
CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
CUDA_SAFE_CALL( cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice) );
// initialize auxArray as zeros
CUDA_SAFE_CALL( cudaMemcpy( d_aux, h_aux, sizeof(float)*NUM_BLOCKS, cudaMemcpyHostToDevice) );
// initialize auxArrayScanned as zeros
CUDA_SAFE_CALL( cudaMemcpy( d_auxScanned, h_aux, sizeof(float)*NUM_BLOCKS, cudaMemcpyHostToDevice) );
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
prescanArray(d_odata, d_idata, d_aux, d_auxScanned, 16, NUM_BLOCKS);
// Make sure device is done before starting timer
// Added this line because time calculation was including time for both calls to prescanArray
cudaDeviceSynchronize();
// Run the prescan
CUT_SAFE_CALL(cutCreateTimer(&timer));
cutStartTimer(timer);
// **===-------- Lab4: Modify the body of this function -----------===**
prescanArray(d_odata, d_idata, d_aux, d_auxScanned, num_elements, NUM_BLOCKS);
// **===-----------------------------------------------------------===**
CUDA_SAFE_CALL( cudaThreadSynchronize() );
cutStopTimer(timer);
printf("CUDA Processing time: %f (ms)\n", cutGetTimerValue(timer));
device_time = cutGetTimerValue(timer);
printf("Speedup: %fX\n", host_time/device_time);
// **===-------- Lab4: Deallocate data structure here -----------===**
// deallocBlockSums();
// **===-----------------------------------------------------------===**
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy( h_data, d_odata, sizeof(float) * num_elements,
cudaMemcpyDeviceToHost));
if ((argc - 1) == 3) // Three Arguments, write result to file
{
WriteFile(h_data, argv[3], num_elements);
}
else if ((argc - 1) == 1) // One Argument, write result to file
{
WriteFile(h_data, argv[1], num_elements);
}
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
cutDeleteTimer(timer);
free( h_data);
free( reference);
free( h_aux);
cudaFree( d_odata);
cudaFree( d_idata);
cudaFree( d_aux);
}
int ReadFile(float* M, char* file_name, int size)
{
unsigned int elements_read = size;
if (cutReadFilef(file_name, &M, &elements_read, true))
return 1;
else
return 0;
}
void WriteFile(float* M, char* file_name, int size)
{
cutWriteFilef(file_name, M, size, 0.0001f);
}
|
1902dd05fcd9a7e03748336dd4ae24e88daf93ef.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=1024 --gridDim=1024 --no-inline
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <math_functions.h>
#define DIM 2 //1024 in the future
#define N 2//DIM*DIM
__global__ void mul24_test (int* A, int* B)
{
int idxa = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
unsigned int idxb = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
A[idxa] = idxa;
B[idxb] = idxa;
}
int main (){
int *a, *b;
int *dev_a, *dev_b;
int size = N*sizeof(int);
hipMalloc((void**)&dev_a, size);
hipMalloc((void**)&dev_b, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 1;
for (int i = 0; i < N; i++)
b[i] = 1;
hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mul24_test), dim3(DIM),dim3(DIM), 0, 0, dev_a,dev_b);
//ESBMC_verify_kernel(mul24_test,1,N,dev_a,dev_b);
hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost);
hipMemcpy(b,dev_b,size,hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
assert (a[i] == i);
for (int i = 0; i < N; i++)
assert (b[i] == i);
free(a); free(b);
hipFree(dev_a);
hipFree(dev_b);
return 0;
}
| 1902dd05fcd9a7e03748336dd4ae24e88daf93ef.cu | //pass
//--blockDim=1024 --gridDim=1024 --no-inline
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math_functions.h>
#define DIM 2 //1024 in the future
#define N 2//DIM*DIM
__global__ void mul24_test (int* A, int* B)
{
int idxa = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
unsigned int idxb = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
A[idxa] = idxa;
B[idxb] = idxa;
}
int main (){
int *a, *b;
int *dev_a, *dev_b;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 1;
for (int i = 0; i < N; i++)
b[i] = 1;
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size, cudaMemcpyHostToDevice);
mul24_test<<<DIM,DIM>>>(dev_a,dev_b);
//ESBMC_verify_kernel(mul24_test,1,N,dev_a,dev_b);
cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost);
cudaMemcpy(b,dev_b,size,cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
assert (a[i] == i);
for (int i = 0; i < N; i++)
assert (b[i] == i);
free(a); free(b);
cudaFree(dev_a);
cudaFree(dev_b);
return 0;
}
|
32441a29176536033305ac6d3c86fbb7e107ed72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,n,xyz,m,xyz2,result,result_i);
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,m,xyz2,n,xyz,result2,result2_i);
hipDeviceSynchronize();
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
void NmDistanceGradKernelLauncher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2){
hipMemset(grad_xyz1,0,b*n*3*4);
hipMemset(grad_xyz2,0,b*m*3*4);
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2);
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1);
hipDeviceSynchronize();
}
#endif
| 32441a29176536033305ac6d3c86fbb7e107ed72.cu | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){
NmDistanceKernel<<<dim3(32,16,1),512>>>(b,n,xyz,m,xyz2,result,result_i);
NmDistanceKernel<<<dim3(32,16,1),512>>>(b,m,xyz2,n,xyz,result2,result2_i);
cudaDeviceSynchronize();
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
void NmDistanceGradKernelLauncher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2){
cudaMemset(grad_xyz1,0,b*n*3*4);
cudaMemset(grad_xyz2,0,b*m*3*4);
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2);
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1);
cudaDeviceSynchronize();
}
#endif
|
7edd0d7cf96ba11d3191cc2acd36e2294d1ffe80.hip | // !!! This is a file automatically generated by hipify!!!
/***
* Ashutosh Dhar
* Department of Electrical and Computer Engineeing
* University of Illinois, Urbana-Champaign
*
*/
#include <hip/hip_runtime.h>
#include <iostream>
#include <cstdio>
#define THREADS_PER_SM 1
#define BLOCKS_PER_SM 1
int ITERATIONS;
int L2_CACHE_SIZE = 512*1024;
int DATA_SIZE;// (L2_CACHE_SIZE * ITERATIONS)
using namespace std;
__global__ void cache_latency(double *latency, int *data, int DATA_SIZE)
{
//__shared__ double sh_start;
//__shared__ double sh_stop;
__shared__ long long int run_latency;
unsigned int start_t, stop_t;
//float local;
int load=0;
for(int i=0; i<DATA_SIZE; i++)
{
start_t = clock();
load = data[load];
stop_t = clock();
__syncthreads();
//data[load] = local + 1;
run_latency += (stop_t - start_t);
__syncthreads();
}
latency[0] = (double)(run_latency)/(DATA_SIZE);
}
int main(int argc, char **argv)
{
if(argc <2)
{
cerr<<"Enter iterations!";
return -1;
}
ITERATIONS = atoi(argv[1]);
DATA_SIZE = L2_CACHE_SIZE * ITERATIONS;
//double sum;
int *data;
data = (int*) malloc(sizeof(int)*DATA_SIZE);
srand(12);
for(int i=0; i<DATA_SIZE; i++)
{
data[i] = i;//1.0*rand();
}
double *latency;
latency = (double*) malloc((sizeof(double)) *1);
double *d_latency;
int *d_data;
hipError_t errorFlag = hipSuccess;
errorFlag = hipMalloc((void**) &d_latency, (sizeof(double)*1));
if(errorFlag != hipSuccess)
{
fprintf(stderr, "Failed to alloc memory (error code %s)!\n", hipGetErrorString(errorFlag));
exit(-1);
}
errorFlag = hipMalloc((void**) &d_data, (sizeof(int)*DATA_SIZE));
if(errorFlag != hipSuccess)
{
fprintf(stderr, "Failed to alloc memory (error code %s)!\n", hipGetErrorString(errorFlag));
exit(-1);
}
errorFlag = hipMemcpy(d_data, data, (sizeof(int)*DATA_SIZE), hipMemcpyHostToDevice);
if(errorFlag != hipSuccess)
{
fprintf(stderr, "Failed to copyback (error code %s)!\n", hipGetErrorString(errorFlag));
exit(-1);
}
dim3 dimBlock(THREADS_PER_SM,1,1);
dim3 dimGrid(BLOCKS_PER_SM,1,1);
hipLaunchKernelGGL(( cache_latency), dim3(dimGrid),dim3(dimBlock), 0, 0, d_latency,d_data,DATA_SIZE);
hipDeviceSynchronize();
errorFlag = hipGetLastError();
if(errorFlag != hipSuccess)
{
fprintf(stderr, "Kernel launch error! (error code %s)!\n", hipGetErrorString(errorFlag));
exit(-1);
}
errorFlag = hipMemcpy(latency, d_latency, (sizeof(double)*1), hipMemcpyDeviceToHost);
if(errorFlag != hipSuccess)
{
fprintf(stderr, "Failed to copyback (error code %s)!\n", hipGetErrorString(errorFlag));
exit(-1);
}
cout<<"\nLatency\n";
//for(int i=0; i<1; i++)
//{
// sum+=latency[i+ITERATIONS] - latency[i];
//}
cout<<": "<< latency[0]<<endl;
cout<<endl;
return 0;
}
| 7edd0d7cf96ba11d3191cc2acd36e2294d1ffe80.cu | /***
* Ashutosh Dhar
* Department of Electrical and Computer Engineeing
* University of Illinois, Urbana-Champaign
*
*/
#include <cuda.h>
#include <iostream>
#include <cstdio>
#define THREADS_PER_SM 1
#define BLOCKS_PER_SM 1
int ITERATIONS;
int L2_CACHE_SIZE = 512*1024;
int DATA_SIZE;// (L2_CACHE_SIZE * ITERATIONS)
using namespace std;
__global__ void cache_latency(double *latency, int *data, int DATA_SIZE)
{
//__shared__ double sh_start;
//__shared__ double sh_stop;
__shared__ long long int run_latency;
unsigned int start_t, stop_t;
//float local;
int load=0;
for(int i=0; i<DATA_SIZE; i++)
{
start_t = clock();
load = data[load];
stop_t = clock();
__syncthreads();
//data[load] = local + 1;
run_latency += (stop_t - start_t);
__syncthreads();
}
latency[0] = (double)(run_latency)/(DATA_SIZE);
}
int main(int argc, char **argv)
{
if(argc <2)
{
cerr<<"Enter iterations!";
return -1;
}
ITERATIONS = atoi(argv[1]);
DATA_SIZE = L2_CACHE_SIZE * ITERATIONS;
//double sum;
int *data;
data = (int*) malloc(sizeof(int)*DATA_SIZE);
srand(12);
for(int i=0; i<DATA_SIZE; i++)
{
data[i] = i;//1.0*rand();
}
double *latency;
latency = (double*) malloc((sizeof(double)) *1);
double *d_latency;
int *d_data;
cudaError_t errorFlag = cudaSuccess;
errorFlag = cudaMalloc((void**) &d_latency, (sizeof(double)*1));
if(errorFlag != cudaSuccess)
{
fprintf(stderr, "Failed to alloc memory (error code %s)!\n", cudaGetErrorString(errorFlag));
exit(-1);
}
errorFlag = cudaMalloc((void**) &d_data, (sizeof(int)*DATA_SIZE));
if(errorFlag != cudaSuccess)
{
fprintf(stderr, "Failed to alloc memory (error code %s)!\n", cudaGetErrorString(errorFlag));
exit(-1);
}
errorFlag = cudaMemcpy(d_data, data, (sizeof(int)*DATA_SIZE), cudaMemcpyHostToDevice);
if(errorFlag != cudaSuccess)
{
fprintf(stderr, "Failed to copyback (error code %s)!\n", cudaGetErrorString(errorFlag));
exit(-1);
}
dim3 dimBlock(THREADS_PER_SM,1,1);
dim3 dimGrid(BLOCKS_PER_SM,1,1);
cache_latency<<<dimGrid,dimBlock>>>(d_latency,d_data,DATA_SIZE);
cudaDeviceSynchronize();
errorFlag = cudaGetLastError();
if(errorFlag != cudaSuccess)
{
fprintf(stderr, "Kernel launch error! (error code %s)!\n", cudaGetErrorString(errorFlag));
exit(-1);
}
errorFlag = cudaMemcpy(latency, d_latency, (sizeof(double)*1), cudaMemcpyDeviceToHost);
if(errorFlag != cudaSuccess)
{
fprintf(stderr, "Failed to copyback (error code %s)!\n", cudaGetErrorString(errorFlag));
exit(-1);
}
cout<<"\nLatency\n";
//for(int i=0; i<1; i++)
//{
// sum+=latency[i+ITERATIONS] - latency[i];
//}
cout<<": "<< latency[0]<<endl;
cout<<endl;
return 0;
}
|
841548bd6225437b299bbbbb48ac24a4004fbc9f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <algorithm>
#include <cuda_utils.cuh>
#include <random/permute.cuh>
#include <random/rng.cuh>
#include <vector>
#include "test_utils.h"
namespace MLCommon {
namespace Random {
template <typename T>
struct PermInputs {
int N, D;
bool needPerms, needShuffle, rowMajor;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const PermInputs<T> &dims) {
return os;
}
template <typename T>
class PermTest : public ::testing::TestWithParam<PermInputs<T>> {
protected:
void SetUp() override {
CUDA_CHECK(hipStreamCreate(&stream));
params = ::testing::TestWithParam<PermInputs<T>>::GetParam();
// forcefully set needPerms, since we need it for unit-testing!
if (params.needShuffle) {
params.needPerms = true;
}
Random::Rng r(params.seed);
int N = params.N;
int D = params.D;
int len = N * D;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
if (params.needPerms)
allocate(outPerms, N);
else
outPerms = nullptr;
if (params.needShuffle) {
allocate(in, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0), stream);
} else {
in = out = nullptr;
}
permute(outPerms, out, in, D, N, params.rowMajor, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override {
if (params.needPerms) CUDA_CHECK(hipFree(outPerms));
if (params.needShuffle) {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out));
}
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
PermInputs<T> params;
T *in, *out;
int *outPerms;
hipStream_t stream;
};
template <typename T, typename L>
::testing::AssertionResult devArrMatchRange(const T *actual, size_t size,
T start, L eq_compare,
bool doSort = true,
hipStream_t stream = 0) {
std::vector<T> act_h(size);
updateHost<T>(&(act_h[0]), actual, size, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
if (doSort) std::sort(act_h.begin(), act_h.end());
for (size_t i(0); i < size; ++i) {
auto act = act_h[i];
auto expected = start + i;
if (!eq_compare(expected, act)) {
return ::testing::AssertionFailure()
<< "actual=" << act << " != expected=" << expected << " @" << i;
}
}
return ::testing::AssertionSuccess();
}
template <typename T, typename L>
::testing::AssertionResult devArrMatchShuffle(const int *perms, const T *out,
const T *in, int D, int N,
bool rowMajor, L eq_compare,
hipStream_t stream = 0) {
std::vector<int> h_perms(N);
updateHost<int>(&(h_perms[0]), perms, N, stream);
std::vector<T> h_out(N * D), h_in(N * D);
updateHost<T>(&(h_out[0]), out, N * D, stream);
updateHost<T>(&(h_in[0]), in, N * D, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int i = 0; i < N; ++i) {
for (int j = 0; j < D; ++j) {
int outPos = rowMajor ? i * D + j : j * N + i;
int inPos = rowMajor ? h_perms[i] * D + j : j * N + h_perms[i];
auto act = h_out[outPos];
auto expected = h_in[inPos];
if (!eq_compare(expected, act)) {
return ::testing::AssertionFailure()
<< "actual=" << act << " != expected=" << expected << " @" << i
<< ", " << j;
}
}
}
return ::testing::AssertionSuccess();
}
const std::vector<PermInputs<float>> inputsf = {
// only generate permutations
{32, 8, true, false, true, 1234ULL},
{32, 8, true, false, true, 1234567890ULL},
{1024, 32, true, false, true, 1234ULL},
{1024, 32, true, false, true, 1234567890ULL},
{2 * 1024, 32, true, false, true, 1234ULL},
{2 * 1024, 32, true, false, true, 1234567890ULL},
{2 * 1024 + 500, 32, true, false, true, 1234ULL},
{2 * 1024 + 500, 32, true, false, true, 1234567890ULL},
{100000, 32, true, false, true, 1234ULL},
{100000, 32, true, false, true, 1234567890ULL},
{100001, 33, true, false, true, 1234567890ULL},
// permute and shuffle the data row major
{32, 8, true, true, true, 1234ULL},
{32, 8, true, true, true, 1234567890ULL},
{1024, 32, true, true, true, 1234ULL},
{1024, 32, true, true, true, 1234567890ULL},
{2 * 1024, 32, true, true, true, 1234ULL},
{2 * 1024, 32, true, true, true, 1234567890ULL},
{2 * 1024 + 500, 32, true, true, true, 1234ULL},
{2 * 1024 + 500, 32, true, true, true, 1234567890ULL},
{100000, 32, true, true, true, 1234ULL},
{100000, 32, true, true, true, 1234567890ULL},
{100001, 31, true, true, true, 1234567890ULL},
// permute and shuffle the data column major
{32, 8, true, true, false, 1234ULL},
{32, 8, true, true, false, 1234567890ULL},
{1024, 32, true, true, false, 1234ULL},
{1024, 32, true, true, false, 1234567890ULL},
{2 * 1024, 32, true, true, false, 1234ULL},
{2 * 1024, 32, true, true, false, 1234567890ULL},
{2 * 1024 + 500, 32, true, true, false, 1234ULL},
{2 * 1024 + 500, 32, true, true, false, 1234567890ULL},
{100000, 32, true, true, false, 1234ULL},
{100000, 32, true, true, false, 1234567890ULL},
{100001, 33, true, true, false, 1234567890ULL}};
typedef PermTest<float> PermTestF;
TEST_P(PermTestF, Result) {
if (params.needPerms) {
ASSERT_TRUE(devArrMatchRange(outPerms, params.N, 0, Compare<int>()));
}
if (params.needShuffle) {
ASSERT_TRUE(devArrMatchShuffle(outPerms, out, in, params.D, params.N,
params.rowMajor, Compare<float>()));
}
}
INSTANTIATE_TEST_CASE_P(PermTests, PermTestF, ::testing::ValuesIn(inputsf));
const std::vector<PermInputs<double>> inputsd = {
// only generate permutations
{32, 8, true, false, true, 1234ULL},
{32, 8, true, false, true, 1234567890ULL},
{1024, 32, true, false, true, 1234ULL},
{1024, 32, true, false, true, 1234567890ULL},
{2 * 1024, 32, true, false, true, 1234ULL},
{2 * 1024, 32, true, false, true, 1234567890ULL},
{2 * 1024 + 500, 32, true, false, true, 1234ULL},
{2 * 1024 + 500, 32, true, false, true, 1234567890ULL},
{100000, 32, true, false, true, 1234ULL},
{100000, 32, true, false, true, 1234567890ULL},
{100001, 33, true, false, true, 1234567890ULL},
// permute and shuffle the data row major
{32, 8, true, true, true, 1234ULL},
{32, 8, true, true, true, 1234567890ULL},
{1024, 32, true, true, true, 1234ULL},
{1024, 32, true, true, true, 1234567890ULL},
{2 * 1024, 32, true, true, true, 1234ULL},
{2 * 1024, 32, true, true, true, 1234567890ULL},
{2 * 1024 + 500, 32, true, true, true, 1234ULL},
{2 * 1024 + 500, 32, true, true, true, 1234567890ULL},
{100000, 32, true, true, true, 1234ULL},
{100000, 32, true, true, true, 1234567890ULL},
{100001, 31, true, true, true, 1234567890ULL},
// permute and shuffle the data column major
{32, 8, true, true, false, 1234ULL},
{32, 8, true, true, false, 1234567890ULL},
{1024, 32, true, true, false, 1234ULL},
{1024, 32, true, true, false, 1234567890ULL},
{2 * 1024, 32, true, true, false, 1234ULL},
{2 * 1024, 32, true, true, false, 1234567890ULL},
{2 * 1024 + 500, 32, true, true, false, 1234ULL},
{2 * 1024 + 500, 32, true, true, false, 1234567890ULL},
{100000, 32, true, true, false, 1234ULL},
{100000, 32, true, true, false, 1234567890ULL},
{100001, 33, true, true, false, 1234567890ULL}};
typedef PermTest<double> PermTestD;
TEST_P(PermTestD, Result) {
if (params.needPerms) {
ASSERT_TRUE(devArrMatchRange(outPerms, params.N, 0, Compare<int>()));
}
if (params.needShuffle) {
ASSERT_TRUE(devArrMatchShuffle(outPerms, out, in, params.D, params.N,
params.rowMajor, Compare<double>()));
}
}
INSTANTIATE_TEST_CASE_P(PermTests, PermTestD, ::testing::ValuesIn(inputsd));
} // end namespace Random
} // end namespace MLCommon
| 841548bd6225437b299bbbbb48ac24a4004fbc9f.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <algorithm>
#include <cuda_utils.cuh>
#include <random/permute.cuh>
#include <random/rng.cuh>
#include <vector>
#include "test_utils.h"
namespace MLCommon {
namespace Random {
template <typename T>
struct PermInputs {
int N, D;
bool needPerms, needShuffle, rowMajor;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const PermInputs<T> &dims) {
return os;
}
template <typename T>
class PermTest : public ::testing::TestWithParam<PermInputs<T>> {
protected:
void SetUp() override {
CUDA_CHECK(cudaStreamCreate(&stream));
params = ::testing::TestWithParam<PermInputs<T>>::GetParam();
// forcefully set needPerms, since we need it for unit-testing!
if (params.needShuffle) {
params.needPerms = true;
}
Random::Rng r(params.seed);
int N = params.N;
int D = params.D;
int len = N * D;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
if (params.needPerms)
allocate(outPerms, N);
else
outPerms = nullptr;
if (params.needShuffle) {
allocate(in, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0), stream);
} else {
in = out = nullptr;
}
permute(outPerms, out, in, D, N, params.rowMajor, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override {
if (params.needPerms) CUDA_CHECK(cudaFree(outPerms));
if (params.needShuffle) {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out));
}
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
PermInputs<T> params;
T *in, *out;
int *outPerms;
cudaStream_t stream;
};
template <typename T, typename L>
::testing::AssertionResult devArrMatchRange(const T *actual, size_t size,
T start, L eq_compare,
bool doSort = true,
cudaStream_t stream = 0) {
std::vector<T> act_h(size);
updateHost<T>(&(act_h[0]), actual, size, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
if (doSort) std::sort(act_h.begin(), act_h.end());
for (size_t i(0); i < size; ++i) {
auto act = act_h[i];
auto expected = start + i;
if (!eq_compare(expected, act)) {
return ::testing::AssertionFailure()
<< "actual=" << act << " != expected=" << expected << " @" << i;
}
}
return ::testing::AssertionSuccess();
}
template <typename T, typename L>
::testing::AssertionResult devArrMatchShuffle(const int *perms, const T *out,
const T *in, int D, int N,
bool rowMajor, L eq_compare,
cudaStream_t stream = 0) {
std::vector<int> h_perms(N);
updateHost<int>(&(h_perms[0]), perms, N, stream);
std::vector<T> h_out(N * D), h_in(N * D);
updateHost<T>(&(h_out[0]), out, N * D, stream);
updateHost<T>(&(h_in[0]), in, N * D, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int i = 0; i < N; ++i) {
for (int j = 0; j < D; ++j) {
int outPos = rowMajor ? i * D + j : j * N + i;
int inPos = rowMajor ? h_perms[i] * D + j : j * N + h_perms[i];
auto act = h_out[outPos];
auto expected = h_in[inPos];
if (!eq_compare(expected, act)) {
return ::testing::AssertionFailure()
<< "actual=" << act << " != expected=" << expected << " @" << i
<< ", " << j;
}
}
}
return ::testing::AssertionSuccess();
}
const std::vector<PermInputs<float>> inputsf = {
// only generate permutations
{32, 8, true, false, true, 1234ULL},
{32, 8, true, false, true, 1234567890ULL},
{1024, 32, true, false, true, 1234ULL},
{1024, 32, true, false, true, 1234567890ULL},
{2 * 1024, 32, true, false, true, 1234ULL},
{2 * 1024, 32, true, false, true, 1234567890ULL},
{2 * 1024 + 500, 32, true, false, true, 1234ULL},
{2 * 1024 + 500, 32, true, false, true, 1234567890ULL},
{100000, 32, true, false, true, 1234ULL},
{100000, 32, true, false, true, 1234567890ULL},
{100001, 33, true, false, true, 1234567890ULL},
// permute and shuffle the data row major
{32, 8, true, true, true, 1234ULL},
{32, 8, true, true, true, 1234567890ULL},
{1024, 32, true, true, true, 1234ULL},
{1024, 32, true, true, true, 1234567890ULL},
{2 * 1024, 32, true, true, true, 1234ULL},
{2 * 1024, 32, true, true, true, 1234567890ULL},
{2 * 1024 + 500, 32, true, true, true, 1234ULL},
{2 * 1024 + 500, 32, true, true, true, 1234567890ULL},
{100000, 32, true, true, true, 1234ULL},
{100000, 32, true, true, true, 1234567890ULL},
{100001, 31, true, true, true, 1234567890ULL},
// permute and shuffle the data column major
{32, 8, true, true, false, 1234ULL},
{32, 8, true, true, false, 1234567890ULL},
{1024, 32, true, true, false, 1234ULL},
{1024, 32, true, true, false, 1234567890ULL},
{2 * 1024, 32, true, true, false, 1234ULL},
{2 * 1024, 32, true, true, false, 1234567890ULL},
{2 * 1024 + 500, 32, true, true, false, 1234ULL},
{2 * 1024 + 500, 32, true, true, false, 1234567890ULL},
{100000, 32, true, true, false, 1234ULL},
{100000, 32, true, true, false, 1234567890ULL},
{100001, 33, true, true, false, 1234567890ULL}};
typedef PermTest<float> PermTestF;
TEST_P(PermTestF, Result) {
if (params.needPerms) {
ASSERT_TRUE(devArrMatchRange(outPerms, params.N, 0, Compare<int>()));
}
if (params.needShuffle) {
ASSERT_TRUE(devArrMatchShuffle(outPerms, out, in, params.D, params.N,
params.rowMajor, Compare<float>()));
}
}
INSTANTIATE_TEST_CASE_P(PermTests, PermTestF, ::testing::ValuesIn(inputsf));
const std::vector<PermInputs<double>> inputsd = {
// only generate permutations
{32, 8, true, false, true, 1234ULL},
{32, 8, true, false, true, 1234567890ULL},
{1024, 32, true, false, true, 1234ULL},
{1024, 32, true, false, true, 1234567890ULL},
{2 * 1024, 32, true, false, true, 1234ULL},
{2 * 1024, 32, true, false, true, 1234567890ULL},
{2 * 1024 + 500, 32, true, false, true, 1234ULL},
{2 * 1024 + 500, 32, true, false, true, 1234567890ULL},
{100000, 32, true, false, true, 1234ULL},
{100000, 32, true, false, true, 1234567890ULL},
{100001, 33, true, false, true, 1234567890ULL},
// permute and shuffle the data row major
{32, 8, true, true, true, 1234ULL},
{32, 8, true, true, true, 1234567890ULL},
{1024, 32, true, true, true, 1234ULL},
{1024, 32, true, true, true, 1234567890ULL},
{2 * 1024, 32, true, true, true, 1234ULL},
{2 * 1024, 32, true, true, true, 1234567890ULL},
{2 * 1024 + 500, 32, true, true, true, 1234ULL},
{2 * 1024 + 500, 32, true, true, true, 1234567890ULL},
{100000, 32, true, true, true, 1234ULL},
{100000, 32, true, true, true, 1234567890ULL},
{100001, 31, true, true, true, 1234567890ULL},
// permute and shuffle the data column major
{32, 8, true, true, false, 1234ULL},
{32, 8, true, true, false, 1234567890ULL},
{1024, 32, true, true, false, 1234ULL},
{1024, 32, true, true, false, 1234567890ULL},
{2 * 1024, 32, true, true, false, 1234ULL},
{2 * 1024, 32, true, true, false, 1234567890ULL},
{2 * 1024 + 500, 32, true, true, false, 1234ULL},
{2 * 1024 + 500, 32, true, true, false, 1234567890ULL},
{100000, 32, true, true, false, 1234ULL},
{100000, 32, true, true, false, 1234567890ULL},
{100001, 33, true, true, false, 1234567890ULL}};
typedef PermTest<double> PermTestD;
TEST_P(PermTestD, Result) {
if (params.needPerms) {
ASSERT_TRUE(devArrMatchRange(outPerms, params.N, 0, Compare<int>()));
}
if (params.needShuffle) {
ASSERT_TRUE(devArrMatchShuffle(outPerms, out, in, params.D, params.N,
params.rowMajor, Compare<double>()));
}
}
INSTANTIATE_TEST_CASE_P(PermTests, PermTestD, ::testing::ValuesIn(inputsd));
} // end namespace Random
} // end namespace MLCommon
|
caedc4b5e5a31eb3b9a335a3c80267d5a143b6d8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "json_reader_impl.hpp"
#include <hip/hip_runtime.h>
#include <algorithm>
#include <iostream>
#include <map>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <nvstrings/NVStrings.h>
#include <cudf/cudf.h>
#include <utilities/cudf_utils.h>
#include <utilities/error_utils.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <io/comp/io_uncomp.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <io/cuio_common.hpp>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/wrapper_utils.hpp>
namespace cudf {
namespace io {
namespace json {
using string_pair = std::pair<const char *, size_t>;
reader::Impl::Impl(reader_options const &args) : args_(args) {
// Check if the passed arguments are supported
CUDF_EXPECTS(args_.lines, "Only Json Lines format is currently supported.\n");
d_true_trie_ = createSerializedTrie({"true"});
opts_.trueValuesTrie = d_true_trie_.data().get();
d_false_trie_ = createSerializedTrie({"false"});
opts_.falseValuesTrie = d_false_trie_.data().get();
d_na_trie_ = createSerializedTrie({"null"});
opts_.naValuesTrie = d_na_trie_.data().get();
}
/**---------------------------------------------------------------------------*
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the JSON file (optional)
*
* @return Estimated maximum size of a row, in bytes
*---------------------------------------------------------------------------**/
constexpr size_t calculateMaxRowSize(int num_columns = 0) noexcept {
constexpr size_t max_row_bytes = 16 * 1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
if (num_columns == 0) {
// Use flat size if the number of columns is not known
return max_row_bytes;
} else {
// Expand the size based on the number of columns, if available
return base_padding + num_columns * column_bytes;
}
}
table reader::Impl::read() {
ingestRawInput();
CUDF_EXPECTS(buffer_ != nullptr, "Ingest failed: input data is null.\n");
decompressInput();
CUDF_EXPECTS(uncomp_data_ != nullptr, "Ingest failed: uncompressed input data is null.\n");
CUDF_EXPECTS(uncomp_size_ != 0, "Ingest failed: uncompressed input data has zero size.\n");
setRecordStarts();
CUDF_EXPECTS(!rec_starts_.empty(), "Error enumerating records.\n");
uploadDataToDevice();
CUDF_EXPECTS(!d_data_.empty(), "Error uploading input data to the GPU.\n");
setColumnNames();
CUDF_EXPECTS(!column_names_.empty(), "Error determining column names.\n");
setDataTypes();
CUDF_EXPECTS(!dtypes_.empty(), "Error in data type detection.\n");
convertDataToColumns();
CUDF_EXPECTS(!columns_.empty(), "Error converting json input into gdf columns.\n");
// Transfer ownership to raw pointer output
std::vector<gdf_column *> out_cols(columns_.size());
for (size_t i = 0; i < columns_.size(); ++i) {
out_cols[i] = columns_[i].release();
}
return table(out_cols.data(), out_cols.size());
}
table reader::Impl::read_byte_range(size_t offset, size_t size) {
byte_range_offset_ = offset;
byte_range_size_ = size;
return read();
}
void reader::Impl::ingestRawInput() {
size_t range_size = 0;
if (byte_range_size_ != 0) {
range_size = byte_range_size_ + calculateMaxRowSize(args_.dtype.size());
}
source_ = [&] {
if (args_.source_type == FILE_PATH) {
return datasource::create(args_.source, byte_range_offset_, range_size);
} else if (args_.source_type == HOST_BUFFER) {
return datasource::create(args_.source.c_str(), args_.source.size());
} else {
CUDF_FAIL("Invalid input type");
}
}();
buffer_ = source_->get_buffer(byte_range_offset_,
::max(byte_range_size_, source_->size()));
}
void reader::Impl::decompressInput() {
const auto compression_type = inferCompressionType(
args_.compression, args_.source_type, args_.source,
{{"gz", "gzip"}, {"zip", "zip"}, {"bz2", "bz2"}, {"xz", "xz"}});
if (compression_type == "none") {
// Do not use the owner vector here to avoid extra copy
uncomp_data_ = reinterpret_cast<const char *>(buffer_->data());
uncomp_size_ = buffer_->size();
} else {
CUDF_EXPECTS(getUncompressedHostData(
reinterpret_cast<const char *>(buffer_->data()),
buffer_->size(), compression_type,
uncomp_data_owner_) == GDF_SUCCESS,
"Input data decompression failed.\n");
uncomp_data_ = uncomp_data_owner_.data();
uncomp_size_ = uncomp_data_owner_.size();
}
}
void reader::Impl::setRecordStarts() {
std::vector<char> chars_to_count{'\n'};
// Currently, ignoring lineterminations within quotes is handled by recording the records of both,
// and then filtering out the records that is a quotechar or a linetermination within a quotechar pair.
if (allow_newlines_in_strings_) {
chars_to_count.push_back('\"');
}
// If not starting at an offset, add an extra row to account for the first row in the file
const auto prefilter_count =
countAllFromSet(uncomp_data_, uncomp_size_, chars_to_count) + ((byte_range_offset_ == 0) ? 1 : 0);
rec_starts_ = device_buffer<uint64_t>(prefilter_count);
auto *find_result_ptr = rec_starts_.data();
// Manually adding an extra row to account for the first row in the file
if (byte_range_offset_ == 0) {
find_result_ptr++;
CUDA_TRY(hipMemsetAsync(rec_starts_.data(), 0ull, sizeof(uint64_t)));
}
std::vector<char> chars_to_find{'\n'};
if (allow_newlines_in_strings_) {
chars_to_find.push_back('\"');
}
// Passing offset = 1 to return positions AFTER the found character
findAllFromSet(uncomp_data_, uncomp_size_, chars_to_find, 1, find_result_ptr);
// Previous call stores the record pinput_file.typeositions as encountered by all threads
// Sort the record positions as subsequent processing may require filtering
// certain rows or other processing on specific records
thrust::sort(rmm::exec_policy()->on(0), rec_starts_.data(), rec_starts_.data() + prefilter_count);
auto filtered_count = prefilter_count;
if (allow_newlines_in_strings_) {
std::vector<uint64_t> h_rec_starts(prefilter_count);
CUDA_TRY(
hipMemcpy(h_rec_starts.data(), rec_starts_.data(), sizeof(uint64_t) * prefilter_count, hipMemcpyDefault));
bool quotation = false;
for (gdf_size_type i = 1; i < prefilter_count; ++i) {
if (uncomp_data_[h_rec_starts[i] - 1] == '\"') {
quotation = !quotation;
h_rec_starts[i] = uncomp_size_;
filtered_count--;
} else if (quotation) {
h_rec_starts[i] = uncomp_size_;
filtered_count--;
}
}
CUDA_TRY(hipMemcpy(rec_starts_.data(), h_rec_starts.data(), prefilter_count, hipMemcpyHostToDevice));
thrust::sort(rmm::exec_policy()->on(0), rec_starts_.data(), rec_starts_.data() + prefilter_count);
}
// Exclude the ending newline as it does not precede a record start
if (uncomp_data_[uncomp_size_ - 1] == '\n') {
filtered_count--;
}
rec_starts_.resize(filtered_count);
}
void reader::Impl::uploadDataToDevice() {
size_t start_offset = 0;
size_t end_offset = uncomp_size_;
// Trim lines that are outside range
if (byte_range_size_ != 0 || byte_range_offset_ != 0) {
std::vector<uint64_t> h_rec_starts(rec_starts_.size());
CUDA_TRY(
hipMemcpy(h_rec_starts.data(), rec_starts_.data(), sizeof(uint64_t) * h_rec_starts.size(), hipMemcpyDefault));
if (byte_range_size_ != 0) {
auto it = h_rec_starts.end() - 1;
while (it >= h_rec_starts.begin() && *it > byte_range_size_) {
end_offset = *it;
--it;
}
h_rec_starts.erase(it + 1, h_rec_starts.end());
}
// Resize to exclude rows outside of the range; adjust row start positions to account for the data subcopy
start_offset = h_rec_starts.front();
rec_starts_.resize(h_rec_starts.size());
thrust::transform(rmm::exec_policy()->on(0), rec_starts_.data(), rec_starts_.data() + rec_starts_.size(),
thrust::make_constant_iterator(start_offset), rec_starts_.data(), thrust::minus<uint64_t>());
}
const size_t bytes_to_upload = end_offset - start_offset;
CUDF_EXPECTS(bytes_to_upload <= uncomp_size_, "Error finding the record within the specified byte range.\n");
// Upload the raw data that is within the rows of interest
d_data_ = device_buffer<char>(bytes_to_upload);
CUDA_TRY(hipMemcpy(d_data_.data(), uncomp_data_ + start_offset, bytes_to_upload, hipMemcpyHostToDevice));
}
/**---------------------------------------------------------------------------*
* @brief Extract value names from a JSON object
*
* @param[in] json_obj Host vector containing the JSON object
* @param[in] opts Parsing options (e.g. delimiter and quotation character)
*
* @return std::vector<std::string> names of JSON object values
*---------------------------------------------------------------------------**/
std::vector<std::string> getNamesFromJsonObject(const std::vector<char> &json_obj, const ParseOptions &opts) {
enum class ParseState { preColName, colName, postColName };
std::vector<std::string> names;
bool quotation = false;
auto state = ParseState::preColName;
int name_start = 0;
for (size_t pos = 0; pos < json_obj.size(); ++pos) {
if (state == ParseState::preColName) {
if (json_obj[pos] == opts.quotechar) {
name_start = pos + 1;
state = ParseState::colName;
continue;
}
} else if (state == ParseState::colName) {
if (json_obj[pos] == opts.quotechar && json_obj[pos - 1] != '\\') {
// if found a non-escaped quote character, it's the end of the column name
names.emplace_back(&json_obj[name_start], &json_obj[pos]);
state = ParseState::postColName;
continue;
}
} else if (state == ParseState::postColName) {
// TODO handle complex data types that might include unquoted commas
if (!quotation && json_obj[pos] == opts.delimiter) {
state = ParseState::preColName;
continue;
} else if (json_obj[pos] == opts.quotechar) {
quotation = !quotation;
}
}
}
return names;
}
void reader::Impl::setColumnNames() {
// If file only contains one row, use the file size for the row size
uint64_t first_row_len = d_data_.size() / sizeof(char);
if (rec_starts_.size() > 1) {
// Set first_row_len to the offset of the second row, if it exists
CUDA_TRY(hipMemcpy(&first_row_len, rec_starts_.data() + 1, sizeof(uint64_t), hipMemcpyDefault));
}
std::vector<char> first_row(first_row_len);
CUDA_TRY(hipMemcpy(first_row.data(), d_data_.data(), first_row_len * sizeof(char), hipMemcpyDefault));
// Determine the row format between:
// JSON array - [val1, val2, ...] and
// JSON object - {"col1":val1, "col2":val2, ...}
// based on the top level opening bracket
const auto first_square_bracket = std::find(first_row.begin(), first_row.end(), '[');
const auto first_curly_bracket = std::find(first_row.begin(), first_row.end(), '{');
CUDF_EXPECTS(first_curly_bracket != first_row.end() || first_square_bracket != first_row.end(),
"Input data is not a valid JSON file.");
// If the first opening bracket is '{', assume object format
const bool is_object = first_curly_bracket < first_square_bracket;
if (is_object) {
column_names_ = getNamesFromJsonObject(first_row, opts_);
} else {
int cols_found = 0;
bool quotation = false;
for (size_t pos = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == opts_.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == opts_.delimiter)) {
column_names_.emplace_back(std::to_string(cols_found++));
}
}
}
}
void reader::Impl::convertDataToColumns() {
const auto num_columns = dtypes_.size();
for (size_t col = 0; col < num_columns; ++col) {
columns_.emplace_back(rec_starts_.size(), dtypes_[col], gdf_dtype_extra_info{TIME_UNIT_NONE}, column_names_[col]);
CUDF_EXPECTS(columns_.back().allocate() == GDF_SUCCESS, "Cannot allocate columns.\n");
}
thrust::host_vector<gdf_dtype> h_dtypes(num_columns);
thrust::host_vector<void *> h_data(num_columns);
thrust::host_vector<gdf_valid_type *> h_valid(num_columns);
for (size_t i = 0; i < num_columns; ++i) {
h_dtypes[i] = columns_[i]->dtype;
h_data[i] = columns_[i]->data;
h_valid[i] = columns_[i]->valid;
}
rmm::device_vector<gdf_dtype> d_dtypes = h_dtypes;
rmm::device_vector<void *> d_data = h_data;
rmm::device_vector<gdf_valid_type *> d_valid = h_valid;
rmm::device_vector<gdf_size_type> d_valid_counts(num_columns, 0);
convertJsonToColumns(d_dtypes.data().get(), d_data.data().get(), d_valid.data().get(), d_valid_counts.data().get());
CUDA_TRY(hipDeviceSynchronize());
CUDA_TRY(hipGetLastError());
thrust::host_vector<gdf_size_type> h_valid_counts = d_valid_counts;
for (size_t i = 0; i < num_columns; ++i) {
columns_[i]->null_count = columns_[i]->size - h_valid_counts[i];
}
// Handle string columns
for (auto &column : columns_) {
if (column->dtype == GDF_STRING) {
auto str_list = static_cast<string_pair *>(column->data);
auto str_data = NVStrings::create_from_index(str_list, column->size);
RMM_FREE(std::exchange(column->data, str_data), 0);
}
}
}
/**---------------------------------------------------------------------------*
* @brief Functor for converting plain text data to cuDF data type value.
*---------------------------------------------------------------------------**/
struct ConvertFunctor {
/**---------------------------------------------------------------------------*
* @brief Template specialization for operator() for types whose values can be
* convertible to a 0 or 1 to represent false/true. The converting is done by
* checking against the default and user-specified true/false values list.
*
* It is handled here rather than within convertStrToValue() as that function
* is used by other types (ex. timestamp) that aren't 'booleable'.
*---------------------------------------------------------------------------**/
template <typename T, typename std::enable_if_t<std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ void operator()(const char *data, void *gdf_columns, long row, long start,
long end, const ParseOptions &opts) {
T &value{static_cast<T *>(gdf_columns)[row]};
// Check for user-specified true/false values first, where the output is
// replaced with 1/0 respectively
const size_t field_len = end - start + 1;
if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len)) {
value = 1;
} else if (serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) {
value = 0;
} else {
value = convertStrToValue<T>(data, start, end, opts);
}
}
/**---------------------------------------------------------------------------*
* @brief Default template operator() dispatch specialization all data types
* (including wrapper types) that is not covered by above.
*---------------------------------------------------------------------------**/
template <typename T, typename std::enable_if_t<!std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ void operator()(const char *data, void *gdf_columns, long row, long start,
long end, const ParseOptions &opts) {
T &value{static_cast<T *>(gdf_columns)[row]};
value = convertStrToValue<T>(data, start, end, opts);
}
};
/**---------------------------------------------------------------------------*
* @brief CUDA Kernel that modifies the start and stop offsets to exclude
* the sections outside of the top level brackets.
*
* The top level brackets characters are excluded from the resulting range.
* Parameter stop has the same semantics as end() in STL containers
* (one past the last element)
*
* @param[in] data Pointer to the device buffer containing the data to process
* @param[in,out] start Offset of the first character in the range
* @param[in,out] stop Offset of the first character after the range
*
* @return void
*---------------------------------------------------------------------------**/
__device__ void limitRangeToBrackets(const char *data, long &start, long &stop) {
while (start < stop && data[start] != '[' && data[start] != '{') {
start++;
}
start++;
while (start < stop && data[stop - 1] != ']' && data[stop - 1] != '}') {
stop--;
}
stop--;
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that finds the end position of the next field name,
* including the colon that separates the name from the field value.
*
* Returns the position after the colon that preceeds the value token.
*
* @param[in] data Pointer to the device buffer containing the data to process
* @param[in] opts Parsing options (e.g. delimiter and quotation character)
* @param[in] start Offset of the first character in the range
* @param[in] stop Offset of the first character after the range
*
* @return long Position of the first character after the field name.
*---------------------------------------------------------------------------**/
__device__ long seekFieldNameEnd(const char *data, const ParseOptions opts, long start, long stop) {
bool quotation = false;
for (auto pos = start; pos < stop; ++pos) {
// Ignore escaped quotes
if (data[pos] == opts.quotechar && data[pos - 1] != '\\') {
quotation = !quotation;
} else if (!quotation && data[pos] == ':') {
return pos + 1;
}
}
return stop;
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that parses and converts plain text data into cuDF column data.
*
* Data is processed one record at a time
*
* @param[in] data The entire data to read
* @param[in] data_size Size of the data buffer, in bytes
* @param[in] rec_starts The start of each data record
* @param[in] num_records The number of lines/rows
* @param[in] dtypes The data type of each column
* @param[in] opts A set of parsing options
* @param[out] gdf_columns The output column data
* @param[in] num_columns The number of columns
* @param[out] valid_fields The bitmaps indicating whether column fields are valid
* @param[out] num_valid_fields The numbers of valid fields in columns
*
* @return void
*---------------------------------------------------------------------------**/
__global__ void convertJsonToGdf(const char *data, size_t data_size, const uint64_t *rec_starts,
gdf_size_type num_records, const gdf_dtype *dtypes, ParseOptions opts,
void *const *gdf_columns, int num_columns, gdf_valid_type *const *valid_fields,
gdf_size_type *num_valid_fields) {
const long rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= num_records)
return;
long start = rec_starts[rec_id];
// has the same semantics as end() in STL containers (one past last element)
long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size);
limitRangeToBrackets(data, start, stop);
const bool is_object = (data[start - 1] == '{');
for (int col = 0; col < num_columns && start < stop; col++) {
if (is_object) {
start = seekFieldNameEnd(data, opts, start, stop);
}
// field_end is at the next delimiter/newline
const long field_end = seekFieldEnd(data, opts, start, stop);
long field_data_last = field_end - 1;
// Modify start & end to ignore whitespace and quotechars
adjustForWhitespaceAndQuotes(data, &start, &field_data_last, opts.quotechar);
// Empty fields are not legal values
if (start <= field_data_last && !serializedTrieContains(opts.naValuesTrie, data + start, field_end - start)) {
// Type dispatcher does not handle GDF_STRINGS
if (dtypes[col] == gdf_dtype::GDF_STRING) {
auto str_list = static_cast<string_pair *>(gdf_columns[col]);
str_list[rec_id].first = data + start;
str_list[rec_id].second = field_data_last - start + 1;
} else {
cudf::type_dispatcher(dtypes[col], ConvertFunctor{}, data, gdf_columns[col], rec_id, start, field_data_last,
opts);
}
// set the valid bitmap - all bits were set to 0 to start
setBitmapBit(valid_fields[col], rec_id);
atomicAdd(&num_valid_fields[col], 1);
} else if (dtypes[col] == gdf_dtype::GDF_STRING) {
auto str_list = static_cast<string_pair *>(gdf_columns[col]);
str_list[rec_id].first = nullptr;
str_list[rec_id].second = 0;
}
start = field_end + 1;
}
}
void reader::Impl::convertJsonToColumns(gdf_dtype *const dtypes, void *const *gdf_columns,
gdf_valid_type *const *valid_fields, gdf_size_type *num_valid_fields) {
int block_size;
int min_grid_size;
CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, convertJsonToGdf));
const int grid_size = (rec_starts_.size() + block_size - 1) / block_size;
hipLaunchKernelGGL(( convertJsonToGdf), dim3(grid_size), dim3(block_size), 0, 0, d_data_.data(), d_data_.size(), rec_starts_.data(), rec_starts_.size(),
dtypes, opts_, gdf_columns, columns_.size(), valid_fields,
num_valid_fields);
CUDA_TRY(hipGetLastError());
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that parses and converts data into cuDF column data.
*
* Data is processed in one row/record at a time, so the number of total
* threads (tid) is equal to the number of rows.
*
* @param[in] data The entire plain text data to read
* @param[in] data_size Size of the data buffer, in bytes
* @param[in] opts A set of parsing options
* @param[in] num_columns The number of columns of input data
* @param[in] rec_starts The start the input data of interest
* @param[in] num_records The number of lines/rows of input data
* @param[out] column_infos The count for each column data type
*
* @returns void
*---------------------------------------------------------------------------**/
__global__ void detectJsonDataTypes(const char *data, size_t data_size, const ParseOptions opts, int num_columns,
const uint64_t *rec_starts, gdf_size_type num_records, ColumnInfo *column_infos) {
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= num_records)
return;
long start = rec_starts[rec_id];
// has the same semantics as end() in STL containers (one past last element)
long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size);
limitRangeToBrackets(data, start, stop);
const bool is_object = (data[start - 1] == '{');
for (int col = 0; col < num_columns; col++) {
if (is_object) {
start = seekFieldNameEnd(data, opts, start, stop);
}
const long field_end = seekFieldEnd(data, opts, start, stop);
long field_data_last = field_end - 1;
adjustForWhitespaceAndQuotes(data, &start, &field_data_last);
const int field_len = field_data_last - start + 1;
// Checking if the field is empty
if (start > field_data_last || serializedTrieContains(opts.naValuesTrie, data + start, field_len)) {
atomicAdd(&column_infos[col].null_count, 1);
start = field_end + 1;
continue;
}
int digit_count = 0;
int decimal_count = 0;
int slash_count = 0;
int dash_count = 0;
int colon_count = 0;
int exponent_count = 0;
int other_count = 0;
const bool maybe_hex = ((field_len > 2 && data[start] == '0' && data[start + 1] == 'x') ||
(field_len > 3 && data[start] == '-' && data[start + 1] == '0' && data[start + 2] == 'x'));
for (long pos = start; pos <= field_data_last; pos++) {
if (isDigit(data[pos], maybe_hex)) {
digit_count++;
continue;
}
// Looking for unique characters that will help identify column types
switch (data[pos]) {
case '.':
decimal_count++;
break;
case '-':
dash_count++;
break;
case '/':
slash_count++;
break;
case ':':
colon_count++;
break;
case 'e':
case 'E':
if (!maybe_hex && pos > start && pos < field_data_last)
exponent_count++;
break;
default:
other_count++;
break;
}
}
// Integers have to have the length of the string
int int_req_number_cnt = field_len;
// Off by one if they start with a minus sign
if (data[start] == '-' && field_len > 1) {
--int_req_number_cnt;
}
// Off by one if they are a hexadecimal number
if (maybe_hex) {
--int_req_number_cnt;
}
if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len) ||
serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) {
atomicAdd(&column_infos[col].bool_count, 1);
} else if (digit_count == int_req_number_cnt) {
atomicAdd(&column_infos[col].int_count, 1);
} else if (isLikeFloat(field_len, digit_count, decimal_count, dash_count, exponent_count)) {
atomicAdd(&column_infos[col].float_count, 1);
}
// A date-time field cannot have more than 3 non-special characters
// A number field cannot have more than one decimal point
else if (other_count > 3 || decimal_count > 1) {
atomicAdd(&column_infos[col].string_count, 1);
} else {
// A date field can have either one or two '-' or '\'; A legal combination will only have one of them
// To simplify the process of auto column detection, we are not covering all the date-time formation permutations
if ((dash_count > 0 && dash_count <= 2 && slash_count == 0) ||
(dash_count == 0 && slash_count > 0 && slash_count <= 2)) {
if (colon_count <= 2) {
atomicAdd(&column_infos[col].datetime_count, 1);
} else {
atomicAdd(&column_infos[col].string_count, 1);
}
} else {
// Default field type is string
atomicAdd(&column_infos[col].string_count, 1);
}
}
start = field_end + 1;
}
}
void reader::Impl::detectDataTypes(ColumnInfo *column_infos) {
int block_size;
int min_grid_size;
CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, detectJsonDataTypes));
// Calculate actual block count to use based on records count
const int grid_size = (rec_starts_.size() + block_size - 1) / block_size;
hipLaunchKernelGGL(( detectJsonDataTypes), dim3(grid_size), dim3(block_size), 0, 0, d_data_.data(), d_data_.size(), opts_, column_names_.size(),
rec_starts_.data(), rec_starts_.size(), column_infos);
CUDA_TRY(hipGetLastError());
}
void reader::Impl::setDataTypes() {
if (!args_.dtype.empty()) {
CUDF_EXPECTS(args_.dtype.size() == column_names_.size(), "Need to specify the type of each column.\n");
// Assume that the dtype is in dictionary format only if all elements contain a colon
const bool is_dict = std::all_of(args_.dtype.begin(), args_.dtype.end(), [](const std::string &s) {
return std::find(s.begin(), s.end(), ':') != s.end();
});
if (is_dict) {
std::map<std::string, gdf_dtype> col_type_map;
for (const auto &ts : args_.dtype) {
const size_t colon_idx = ts.find(":");
const std::string col_name(ts.begin(), ts.begin() + colon_idx);
const std::string type_str(ts.begin() + colon_idx + 1, ts.end());
col_type_map[col_name] = convertStringToDtype(type_str);
}
// Using the map here allows O(n log n) complexity
for (size_t col = 0; col < args_.dtype.size(); ++col) {
dtypes_.push_back(col_type_map[column_names_[col]]);
}
} else {
for (size_t col = 0; col < args_.dtype.size(); ++col) {
dtypes_.push_back(convertStringToDtype(args_.dtype[col]));
}
}
} else {
CUDF_EXPECTS(rec_starts_.size() != 0, "No data available for data type inference.\n");
const auto num_columns = column_names_.size();
rmm::device_vector<ColumnInfo> d_column_infos(num_columns, ColumnInfo{});
detectDataTypes(d_column_infos.data().get());
thrust::host_vector<ColumnInfo> h_column_infos = d_column_infos;
for (const auto &cinfo : h_column_infos) {
if (cinfo.null_count == static_cast<int>(rec_starts_.size())) {
// Entire column is NULL; allocate the smallest amount of memory
dtypes_.push_back(GDF_INT8);
} else if (cinfo.string_count > 0) {
dtypes_.push_back(GDF_STRING);
} else if (cinfo.datetime_count > 0) {
dtypes_.push_back(GDF_DATE64);
} else if (cinfo.float_count > 0 || (cinfo.int_count > 0 && cinfo.null_count > 0)) {
dtypes_.push_back(GDF_FLOAT64);
} else if (cinfo.int_count > 0) {
dtypes_.push_back(GDF_INT64);
} else if (cinfo.bool_count > 0) {
dtypes_.push_back(GDF_BOOL8);
} else {
CUDF_FAIL("Data type detection failed.\n");
}
}
}
}
reader::reader(reader_options const &args)
: impl_(std::make_unique<Impl>(args)) {}
table reader::read() { return impl_->read(); }
table reader::read_byte_range(size_t offset, size_t size) {
return impl_->read_byte_range(offset, size);
}
reader::~reader() = default;
} // namespace json
} // namespace io
} // namespace cudf
| caedc4b5e5a31eb3b9a335a3c80267d5a143b6d8.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "json_reader_impl.hpp"
#include <cuda_runtime.h>
#include <algorithm>
#include <iostream>
#include <map>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <nvstrings/NVStrings.h>
#include <cudf/cudf.h>
#include <utilities/cudf_utils.h>
#include <utilities/error_utils.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <io/comp/io_uncomp.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <io/cuio_common.hpp>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/wrapper_utils.hpp>
namespace cudf {
namespace io {
namespace json {
using string_pair = std::pair<const char *, size_t>;
reader::Impl::Impl(reader_options const &args) : args_(args) {
// Check if the passed arguments are supported
CUDF_EXPECTS(args_.lines, "Only Json Lines format is currently supported.\n");
d_true_trie_ = createSerializedTrie({"true"});
opts_.trueValuesTrie = d_true_trie_.data().get();
d_false_trie_ = createSerializedTrie({"false"});
opts_.falseValuesTrie = d_false_trie_.data().get();
d_na_trie_ = createSerializedTrie({"null"});
opts_.naValuesTrie = d_na_trie_.data().get();
}
/**---------------------------------------------------------------------------*
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the JSON file (optional)
*
* @return Estimated maximum size of a row, in bytes
*---------------------------------------------------------------------------**/
constexpr size_t calculateMaxRowSize(int num_columns = 0) noexcept {
constexpr size_t max_row_bytes = 16 * 1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
if (num_columns == 0) {
// Use flat size if the number of columns is not known
return max_row_bytes;
} else {
// Expand the size based on the number of columns, if available
return base_padding + num_columns * column_bytes;
}
}
table reader::Impl::read() {
ingestRawInput();
CUDF_EXPECTS(buffer_ != nullptr, "Ingest failed: input data is null.\n");
decompressInput();
CUDF_EXPECTS(uncomp_data_ != nullptr, "Ingest failed: uncompressed input data is null.\n");
CUDF_EXPECTS(uncomp_size_ != 0, "Ingest failed: uncompressed input data has zero size.\n");
setRecordStarts();
CUDF_EXPECTS(!rec_starts_.empty(), "Error enumerating records.\n");
uploadDataToDevice();
CUDF_EXPECTS(!d_data_.empty(), "Error uploading input data to the GPU.\n");
setColumnNames();
CUDF_EXPECTS(!column_names_.empty(), "Error determining column names.\n");
setDataTypes();
CUDF_EXPECTS(!dtypes_.empty(), "Error in data type detection.\n");
convertDataToColumns();
CUDF_EXPECTS(!columns_.empty(), "Error converting json input into gdf columns.\n");
// Transfer ownership to raw pointer output
std::vector<gdf_column *> out_cols(columns_.size());
for (size_t i = 0; i < columns_.size(); ++i) {
out_cols[i] = columns_[i].release();
}
return table(out_cols.data(), out_cols.size());
}
table reader::Impl::read_byte_range(size_t offset, size_t size) {
byte_range_offset_ = offset;
byte_range_size_ = size;
return read();
}
void reader::Impl::ingestRawInput() {
size_t range_size = 0;
if (byte_range_size_ != 0) {
range_size = byte_range_size_ + calculateMaxRowSize(args_.dtype.size());
}
source_ = [&] {
if (args_.source_type == FILE_PATH) {
return datasource::create(args_.source, byte_range_offset_, range_size);
} else if (args_.source_type == HOST_BUFFER) {
return datasource::create(args_.source.c_str(), args_.source.size());
} else {
CUDF_FAIL("Invalid input type");
}
}();
buffer_ = source_->get_buffer(byte_range_offset_,
std::max(byte_range_size_, source_->size()));
}
void reader::Impl::decompressInput() {
const auto compression_type = inferCompressionType(
args_.compression, args_.source_type, args_.source,
{{"gz", "gzip"}, {"zip", "zip"}, {"bz2", "bz2"}, {"xz", "xz"}});
if (compression_type == "none") {
// Do not use the owner vector here to avoid extra copy
uncomp_data_ = reinterpret_cast<const char *>(buffer_->data());
uncomp_size_ = buffer_->size();
} else {
CUDF_EXPECTS(getUncompressedHostData(
reinterpret_cast<const char *>(buffer_->data()),
buffer_->size(), compression_type,
uncomp_data_owner_) == GDF_SUCCESS,
"Input data decompression failed.\n");
uncomp_data_ = uncomp_data_owner_.data();
uncomp_size_ = uncomp_data_owner_.size();
}
}
void reader::Impl::setRecordStarts() {
std::vector<char> chars_to_count{'\n'};
// Currently, ignoring lineterminations within quotes is handled by recording the records of both,
// and then filtering out the records that is a quotechar or a linetermination within a quotechar pair.
if (allow_newlines_in_strings_) {
chars_to_count.push_back('\"');
}
// If not starting at an offset, add an extra row to account for the first row in the file
const auto prefilter_count =
countAllFromSet(uncomp_data_, uncomp_size_, chars_to_count) + ((byte_range_offset_ == 0) ? 1 : 0);
rec_starts_ = device_buffer<uint64_t>(prefilter_count);
auto *find_result_ptr = rec_starts_.data();
// Manually adding an extra row to account for the first row in the file
if (byte_range_offset_ == 0) {
find_result_ptr++;
CUDA_TRY(cudaMemsetAsync(rec_starts_.data(), 0ull, sizeof(uint64_t)));
}
std::vector<char> chars_to_find{'\n'};
if (allow_newlines_in_strings_) {
chars_to_find.push_back('\"');
}
// Passing offset = 1 to return positions AFTER the found character
findAllFromSet(uncomp_data_, uncomp_size_, chars_to_find, 1, find_result_ptr);
// Previous call stores the record pinput_file.typeositions as encountered by all threads
// Sort the record positions as subsequent processing may require filtering
// certain rows or other processing on specific records
thrust::sort(rmm::exec_policy()->on(0), rec_starts_.data(), rec_starts_.data() + prefilter_count);
auto filtered_count = prefilter_count;
if (allow_newlines_in_strings_) {
std::vector<uint64_t> h_rec_starts(prefilter_count);
CUDA_TRY(
cudaMemcpy(h_rec_starts.data(), rec_starts_.data(), sizeof(uint64_t) * prefilter_count, cudaMemcpyDefault));
bool quotation = false;
for (gdf_size_type i = 1; i < prefilter_count; ++i) {
if (uncomp_data_[h_rec_starts[i] - 1] == '\"') {
quotation = !quotation;
h_rec_starts[i] = uncomp_size_;
filtered_count--;
} else if (quotation) {
h_rec_starts[i] = uncomp_size_;
filtered_count--;
}
}
CUDA_TRY(cudaMemcpy(rec_starts_.data(), h_rec_starts.data(), prefilter_count, cudaMemcpyHostToDevice));
thrust::sort(rmm::exec_policy()->on(0), rec_starts_.data(), rec_starts_.data() + prefilter_count);
}
// Exclude the ending newline as it does not precede a record start
if (uncomp_data_[uncomp_size_ - 1] == '\n') {
filtered_count--;
}
rec_starts_.resize(filtered_count);
}
void reader::Impl::uploadDataToDevice() {
size_t start_offset = 0;
size_t end_offset = uncomp_size_;
// Trim lines that are outside range
if (byte_range_size_ != 0 || byte_range_offset_ != 0) {
std::vector<uint64_t> h_rec_starts(rec_starts_.size());
CUDA_TRY(
cudaMemcpy(h_rec_starts.data(), rec_starts_.data(), sizeof(uint64_t) * h_rec_starts.size(), cudaMemcpyDefault));
if (byte_range_size_ != 0) {
auto it = h_rec_starts.end() - 1;
while (it >= h_rec_starts.begin() && *it > byte_range_size_) {
end_offset = *it;
--it;
}
h_rec_starts.erase(it + 1, h_rec_starts.end());
}
// Resize to exclude rows outside of the range; adjust row start positions to account for the data subcopy
start_offset = h_rec_starts.front();
rec_starts_.resize(h_rec_starts.size());
thrust::transform(rmm::exec_policy()->on(0), rec_starts_.data(), rec_starts_.data() + rec_starts_.size(),
thrust::make_constant_iterator(start_offset), rec_starts_.data(), thrust::minus<uint64_t>());
}
const size_t bytes_to_upload = end_offset - start_offset;
CUDF_EXPECTS(bytes_to_upload <= uncomp_size_, "Error finding the record within the specified byte range.\n");
// Upload the raw data that is within the rows of interest
d_data_ = device_buffer<char>(bytes_to_upload);
CUDA_TRY(cudaMemcpy(d_data_.data(), uncomp_data_ + start_offset, bytes_to_upload, cudaMemcpyHostToDevice));
}
/**---------------------------------------------------------------------------*
* @brief Extract value names from a JSON object
*
* @param[in] json_obj Host vector containing the JSON object
* @param[in] opts Parsing options (e.g. delimiter and quotation character)
*
* @return std::vector<std::string> names of JSON object values
*---------------------------------------------------------------------------**/
std::vector<std::string> getNamesFromJsonObject(const std::vector<char> &json_obj, const ParseOptions &opts) {
enum class ParseState { preColName, colName, postColName };
std::vector<std::string> names;
bool quotation = false;
auto state = ParseState::preColName;
int name_start = 0;
for (size_t pos = 0; pos < json_obj.size(); ++pos) {
if (state == ParseState::preColName) {
if (json_obj[pos] == opts.quotechar) {
name_start = pos + 1;
state = ParseState::colName;
continue;
}
} else if (state == ParseState::colName) {
if (json_obj[pos] == opts.quotechar && json_obj[pos - 1] != '\\') {
// if found a non-escaped quote character, it's the end of the column name
names.emplace_back(&json_obj[name_start], &json_obj[pos]);
state = ParseState::postColName;
continue;
}
} else if (state == ParseState::postColName) {
// TODO handle complex data types that might include unquoted commas
if (!quotation && json_obj[pos] == opts.delimiter) {
state = ParseState::preColName;
continue;
} else if (json_obj[pos] == opts.quotechar) {
quotation = !quotation;
}
}
}
return names;
}
void reader::Impl::setColumnNames() {
// If file only contains one row, use the file size for the row size
uint64_t first_row_len = d_data_.size() / sizeof(char);
if (rec_starts_.size() > 1) {
// Set first_row_len to the offset of the second row, if it exists
CUDA_TRY(cudaMemcpy(&first_row_len, rec_starts_.data() + 1, sizeof(uint64_t), cudaMemcpyDefault));
}
std::vector<char> first_row(first_row_len);
CUDA_TRY(cudaMemcpy(first_row.data(), d_data_.data(), first_row_len * sizeof(char), cudaMemcpyDefault));
// Determine the row format between:
// JSON array - [val1, val2, ...] and
// JSON object - {"col1":val1, "col2":val2, ...}
// based on the top level opening bracket
const auto first_square_bracket = std::find(first_row.begin(), first_row.end(), '[');
const auto first_curly_bracket = std::find(first_row.begin(), first_row.end(), '{');
CUDF_EXPECTS(first_curly_bracket != first_row.end() || first_square_bracket != first_row.end(),
"Input data is not a valid JSON file.");
// If the first opening bracket is '{', assume object format
const bool is_object = first_curly_bracket < first_square_bracket;
if (is_object) {
column_names_ = getNamesFromJsonObject(first_row, opts_);
} else {
int cols_found = 0;
bool quotation = false;
for (size_t pos = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == opts_.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == opts_.delimiter)) {
column_names_.emplace_back(std::to_string(cols_found++));
}
}
}
}
void reader::Impl::convertDataToColumns() {
const auto num_columns = dtypes_.size();
for (size_t col = 0; col < num_columns; ++col) {
columns_.emplace_back(rec_starts_.size(), dtypes_[col], gdf_dtype_extra_info{TIME_UNIT_NONE}, column_names_[col]);
CUDF_EXPECTS(columns_.back().allocate() == GDF_SUCCESS, "Cannot allocate columns.\n");
}
thrust::host_vector<gdf_dtype> h_dtypes(num_columns);
thrust::host_vector<void *> h_data(num_columns);
thrust::host_vector<gdf_valid_type *> h_valid(num_columns);
for (size_t i = 0; i < num_columns; ++i) {
h_dtypes[i] = columns_[i]->dtype;
h_data[i] = columns_[i]->data;
h_valid[i] = columns_[i]->valid;
}
rmm::device_vector<gdf_dtype> d_dtypes = h_dtypes;
rmm::device_vector<void *> d_data = h_data;
rmm::device_vector<gdf_valid_type *> d_valid = h_valid;
rmm::device_vector<gdf_size_type> d_valid_counts(num_columns, 0);
convertJsonToColumns(d_dtypes.data().get(), d_data.data().get(), d_valid.data().get(), d_valid_counts.data().get());
CUDA_TRY(cudaDeviceSynchronize());
CUDA_TRY(cudaGetLastError());
thrust::host_vector<gdf_size_type> h_valid_counts = d_valid_counts;
for (size_t i = 0; i < num_columns; ++i) {
columns_[i]->null_count = columns_[i]->size - h_valid_counts[i];
}
// Handle string columns
for (auto &column : columns_) {
if (column->dtype == GDF_STRING) {
auto str_list = static_cast<string_pair *>(column->data);
auto str_data = NVStrings::create_from_index(str_list, column->size);
RMM_FREE(std::exchange(column->data, str_data), 0);
}
}
}
/**---------------------------------------------------------------------------*
* @brief Functor for converting plain text data to cuDF data type value.
*---------------------------------------------------------------------------**/
struct ConvertFunctor {
/**---------------------------------------------------------------------------*
* @brief Template specialization for operator() for types whose values can be
* convertible to a 0 or 1 to represent false/true. The converting is done by
* checking against the default and user-specified true/false values list.
*
* It is handled here rather than within convertStrToValue() as that function
* is used by other types (ex. timestamp) that aren't 'booleable'.
*---------------------------------------------------------------------------**/
template <typename T, typename std::enable_if_t<std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ void operator()(const char *data, void *gdf_columns, long row, long start,
long end, const ParseOptions &opts) {
T &value{static_cast<T *>(gdf_columns)[row]};
// Check for user-specified true/false values first, where the output is
// replaced with 1/0 respectively
const size_t field_len = end - start + 1;
if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len)) {
value = 1;
} else if (serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) {
value = 0;
} else {
value = convertStrToValue<T>(data, start, end, opts);
}
}
/**---------------------------------------------------------------------------*
* @brief Default template operator() dispatch specialization all data types
* (including wrapper types) that is not covered by above.
*---------------------------------------------------------------------------**/
template <typename T, typename std::enable_if_t<!std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ void operator()(const char *data, void *gdf_columns, long row, long start,
long end, const ParseOptions &opts) {
T &value{static_cast<T *>(gdf_columns)[row]};
value = convertStrToValue<T>(data, start, end, opts);
}
};
/**---------------------------------------------------------------------------*
* @brief CUDA Kernel that modifies the start and stop offsets to exclude
* the sections outside of the top level brackets.
*
* The top level brackets characters are excluded from the resulting range.
* Parameter stop has the same semantics as end() in STL containers
* (one past the last element)
*
* @param[in] data Pointer to the device buffer containing the data to process
* @param[in,out] start Offset of the first character in the range
* @param[in,out] stop Offset of the first character after the range
*
* @return void
*---------------------------------------------------------------------------**/
__device__ void limitRangeToBrackets(const char *data, long &start, long &stop) {
while (start < stop && data[start] != '[' && data[start] != '{') {
start++;
}
start++;
while (start < stop && data[stop - 1] != ']' && data[stop - 1] != '}') {
stop--;
}
stop--;
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that finds the end position of the next field name,
* including the colon that separates the name from the field value.
*
* Returns the position after the colon that preceeds the value token.
*
* @param[in] data Pointer to the device buffer containing the data to process
* @param[in] opts Parsing options (e.g. delimiter and quotation character)
* @param[in] start Offset of the first character in the range
* @param[in] stop Offset of the first character after the range
*
* @return long Position of the first character after the field name.
*---------------------------------------------------------------------------**/
__device__ long seekFieldNameEnd(const char *data, const ParseOptions opts, long start, long stop) {
bool quotation = false;
for (auto pos = start; pos < stop; ++pos) {
// Ignore escaped quotes
if (data[pos] == opts.quotechar && data[pos - 1] != '\\') {
quotation = !quotation;
} else if (!quotation && data[pos] == ':') {
return pos + 1;
}
}
return stop;
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that parses and converts plain text data into cuDF column data.
*
* Data is processed one record at a time
*
* @param[in] data The entire data to read
* @param[in] data_size Size of the data buffer, in bytes
* @param[in] rec_starts The start of each data record
* @param[in] num_records The number of lines/rows
* @param[in] dtypes The data type of each column
* @param[in] opts A set of parsing options
* @param[out] gdf_columns The output column data
* @param[in] num_columns The number of columns
* @param[out] valid_fields The bitmaps indicating whether column fields are valid
* @param[out] num_valid_fields The numbers of valid fields in columns
*
* @return void
*---------------------------------------------------------------------------**/
__global__ void convertJsonToGdf(const char *data, size_t data_size, const uint64_t *rec_starts,
gdf_size_type num_records, const gdf_dtype *dtypes, ParseOptions opts,
void *const *gdf_columns, int num_columns, gdf_valid_type *const *valid_fields,
gdf_size_type *num_valid_fields) {
const long rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= num_records)
return;
long start = rec_starts[rec_id];
// has the same semantics as end() in STL containers (one past last element)
long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size);
limitRangeToBrackets(data, start, stop);
const bool is_object = (data[start - 1] == '{');
for (int col = 0; col < num_columns && start < stop; col++) {
if (is_object) {
start = seekFieldNameEnd(data, opts, start, stop);
}
// field_end is at the next delimiter/newline
const long field_end = seekFieldEnd(data, opts, start, stop);
long field_data_last = field_end - 1;
// Modify start & end to ignore whitespace and quotechars
adjustForWhitespaceAndQuotes(data, &start, &field_data_last, opts.quotechar);
// Empty fields are not legal values
if (start <= field_data_last && !serializedTrieContains(opts.naValuesTrie, data + start, field_end - start)) {
// Type dispatcher does not handle GDF_STRINGS
if (dtypes[col] == gdf_dtype::GDF_STRING) {
auto str_list = static_cast<string_pair *>(gdf_columns[col]);
str_list[rec_id].first = data + start;
str_list[rec_id].second = field_data_last - start + 1;
} else {
cudf::type_dispatcher(dtypes[col], ConvertFunctor{}, data, gdf_columns[col], rec_id, start, field_data_last,
opts);
}
// set the valid bitmap - all bits were set to 0 to start
setBitmapBit(valid_fields[col], rec_id);
atomicAdd(&num_valid_fields[col], 1);
} else if (dtypes[col] == gdf_dtype::GDF_STRING) {
auto str_list = static_cast<string_pair *>(gdf_columns[col]);
str_list[rec_id].first = nullptr;
str_list[rec_id].second = 0;
}
start = field_end + 1;
}
}
void reader::Impl::convertJsonToColumns(gdf_dtype *const dtypes, void *const *gdf_columns,
gdf_valid_type *const *valid_fields, gdf_size_type *num_valid_fields) {
int block_size;
int min_grid_size;
CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, convertJsonToGdf));
const int grid_size = (rec_starts_.size() + block_size - 1) / block_size;
convertJsonToGdf<<<grid_size, block_size>>>(d_data_.data(), d_data_.size(), rec_starts_.data(), rec_starts_.size(),
dtypes, opts_, gdf_columns, columns_.size(), valid_fields,
num_valid_fields);
CUDA_TRY(cudaGetLastError());
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that parses and converts data into cuDF column data.
*
* Data is processed in one row/record at a time, so the number of total
* threads (tid) is equal to the number of rows.
*
* @param[in] data The entire plain text data to read
* @param[in] data_size Size of the data buffer, in bytes
* @param[in] opts A set of parsing options
* @param[in] num_columns The number of columns of input data
* @param[in] rec_starts The start the input data of interest
* @param[in] num_records The number of lines/rows of input data
* @param[out] column_infos The count for each column data type
*
* @returns void
*---------------------------------------------------------------------------**/
__global__ void detectJsonDataTypes(const char *data, size_t data_size, const ParseOptions opts, int num_columns,
const uint64_t *rec_starts, gdf_size_type num_records, ColumnInfo *column_infos) {
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= num_records)
return;
long start = rec_starts[rec_id];
// has the same semantics as end() in STL containers (one past last element)
long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size);
limitRangeToBrackets(data, start, stop);
const bool is_object = (data[start - 1] == '{');
for (int col = 0; col < num_columns; col++) {
if (is_object) {
start = seekFieldNameEnd(data, opts, start, stop);
}
const long field_end = seekFieldEnd(data, opts, start, stop);
long field_data_last = field_end - 1;
adjustForWhitespaceAndQuotes(data, &start, &field_data_last);
const int field_len = field_data_last - start + 1;
// Checking if the field is empty
if (start > field_data_last || serializedTrieContains(opts.naValuesTrie, data + start, field_len)) {
atomicAdd(&column_infos[col].null_count, 1);
start = field_end + 1;
continue;
}
int digit_count = 0;
int decimal_count = 0;
int slash_count = 0;
int dash_count = 0;
int colon_count = 0;
int exponent_count = 0;
int other_count = 0;
const bool maybe_hex = ((field_len > 2 && data[start] == '0' && data[start + 1] == 'x') ||
(field_len > 3 && data[start] == '-' && data[start + 1] == '0' && data[start + 2] == 'x'));
for (long pos = start; pos <= field_data_last; pos++) {
if (isDigit(data[pos], maybe_hex)) {
digit_count++;
continue;
}
// Looking for unique characters that will help identify column types
switch (data[pos]) {
case '.':
decimal_count++;
break;
case '-':
dash_count++;
break;
case '/':
slash_count++;
break;
case ':':
colon_count++;
break;
case 'e':
case 'E':
if (!maybe_hex && pos > start && pos < field_data_last)
exponent_count++;
break;
default:
other_count++;
break;
}
}
// Integers have to have the length of the string
int int_req_number_cnt = field_len;
// Off by one if they start with a minus sign
if (data[start] == '-' && field_len > 1) {
--int_req_number_cnt;
}
// Off by one if they are a hexadecimal number
if (maybe_hex) {
--int_req_number_cnt;
}
if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len) ||
serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) {
atomicAdd(&column_infos[col].bool_count, 1);
} else if (digit_count == int_req_number_cnt) {
atomicAdd(&column_infos[col].int_count, 1);
} else if (isLikeFloat(field_len, digit_count, decimal_count, dash_count, exponent_count)) {
atomicAdd(&column_infos[col].float_count, 1);
}
// A date-time field cannot have more than 3 non-special characters
// A number field cannot have more than one decimal point
else if (other_count > 3 || decimal_count > 1) {
atomicAdd(&column_infos[col].string_count, 1);
} else {
// A date field can have either one or two '-' or '\'; A legal combination will only have one of them
// To simplify the process of auto column detection, we are not covering all the date-time formation permutations
if ((dash_count > 0 && dash_count <= 2 && slash_count == 0) ||
(dash_count == 0 && slash_count > 0 && slash_count <= 2)) {
if (colon_count <= 2) {
atomicAdd(&column_infos[col].datetime_count, 1);
} else {
atomicAdd(&column_infos[col].string_count, 1);
}
} else {
// Default field type is string
atomicAdd(&column_infos[col].string_count, 1);
}
}
start = field_end + 1;
}
}
void reader::Impl::detectDataTypes(ColumnInfo *column_infos) {
int block_size;
int min_grid_size;
CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, detectJsonDataTypes));
// Calculate actual block count to use based on records count
const int grid_size = (rec_starts_.size() + block_size - 1) / block_size;
detectJsonDataTypes<<<grid_size, block_size>>>(d_data_.data(), d_data_.size(), opts_, column_names_.size(),
rec_starts_.data(), rec_starts_.size(), column_infos);
CUDA_TRY(cudaGetLastError());
}
void reader::Impl::setDataTypes() {
if (!args_.dtype.empty()) {
CUDF_EXPECTS(args_.dtype.size() == column_names_.size(), "Need to specify the type of each column.\n");
// Assume that the dtype is in dictionary format only if all elements contain a colon
const bool is_dict = std::all_of(args_.dtype.begin(), args_.dtype.end(), [](const std::string &s) {
return std::find(s.begin(), s.end(), ':') != s.end();
});
if (is_dict) {
std::map<std::string, gdf_dtype> col_type_map;
for (const auto &ts : args_.dtype) {
const size_t colon_idx = ts.find(":");
const std::string col_name(ts.begin(), ts.begin() + colon_idx);
const std::string type_str(ts.begin() + colon_idx + 1, ts.end());
col_type_map[col_name] = convertStringToDtype(type_str);
}
// Using the map here allows O(n log n) complexity
for (size_t col = 0; col < args_.dtype.size(); ++col) {
dtypes_.push_back(col_type_map[column_names_[col]]);
}
} else {
for (size_t col = 0; col < args_.dtype.size(); ++col) {
dtypes_.push_back(convertStringToDtype(args_.dtype[col]));
}
}
} else {
CUDF_EXPECTS(rec_starts_.size() != 0, "No data available for data type inference.\n");
const auto num_columns = column_names_.size();
rmm::device_vector<ColumnInfo> d_column_infos(num_columns, ColumnInfo{});
detectDataTypes(d_column_infos.data().get());
thrust::host_vector<ColumnInfo> h_column_infos = d_column_infos;
for (const auto &cinfo : h_column_infos) {
if (cinfo.null_count == static_cast<int>(rec_starts_.size())) {
// Entire column is NULL; allocate the smallest amount of memory
dtypes_.push_back(GDF_INT8);
} else if (cinfo.string_count > 0) {
dtypes_.push_back(GDF_STRING);
} else if (cinfo.datetime_count > 0) {
dtypes_.push_back(GDF_DATE64);
} else if (cinfo.float_count > 0 || (cinfo.int_count > 0 && cinfo.null_count > 0)) {
dtypes_.push_back(GDF_FLOAT64);
} else if (cinfo.int_count > 0) {
dtypes_.push_back(GDF_INT64);
} else if (cinfo.bool_count > 0) {
dtypes_.push_back(GDF_BOOL8);
} else {
CUDF_FAIL("Data type detection failed.\n");
}
}
}
}
reader::reader(reader_options const &args)
: impl_(std::make_unique<Impl>(args)) {}
table reader::read() { return impl_->read(); }
table reader::read_byte_range(size_t offset, size_t size) {
return impl_->read_byte_range(offset, size);
}
reader::~reader() = default;
} // namespace json
} // namespace io
} // namespace cudf
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.